content stringlengths 5 1.05M |
|---|
import math
def collision(state, n):
"""returns true if boulder collides with alien"""
for i in range(n):
p=False
if state[0:n][i]==state[-n:][i] and state[0:n][i]=='1':
return True
return False
def bottom_row_rewards(full_states, n):
"""returns the reward for only the bottom row
1 for not collision, -inf for collisison"""
next_row_only = list(set([a[:n]+a[-n-n:-n] for a in full_states]))
collisions=[]
for state in next_row_only:
if collision(state, n):
collisions.append(state)
state_reward={}
for state in next_row_only:
if state in collisions:
state_reward[state] = -1000
else:
state_reward[state] = 1
return next_row_only, collisions, state_reward
def bottom_row_movement(next_row_only, n):
"""Defines movement logic and returns results of 3 actions RLU"""
movement_states={}
for state in next_row_only:
empty_R=['0']*n; empty_L=['0']*n
a_index=list(state[:n]).index('1')
if a_index==0:
empty_R[1]='1'; right=''.join(empty_R)
left=state[:n]
elif a_index==n-1:
right=state[:n]
empty_L[a_index-1]='1'; left = ''.join(empty_L)
else:
empty_R[a_index+1]='1'; right=''.join(empty_R)
empty_L[a_index-1]='1'; left=''.join(empty_L)
movement_states[state] = [right+state[n:], left+state[n:], state] #RLU
return movement_states
def bottom_row_movement_rewards(movement_states, state_reward, n):
"""Assigns rewards for each action RLU"""
movement_rewards_partial={}
for k,v_list in movement_states.items():
m_reward=[]
for v in v_list:
m_reward.append(state_reward[v])
movement_rewards_partial[k]=m_reward
return movement_rewards_partial
def gen_full_movement_rewards(full_states, movement_rewards_partial, n):
"""Expands to all states"""
movement_rewards_full={}
for state in full_states:
partial_state=state[:n]+state[-n-n:-n]
movement_rewards_full[state]=movement_rewards_partial[partial_state]
return movement_rewards_full
def main(n, full_states):
next_row_only, collisions, state_reward=bottom_row_rewards(full_states, n)
movement_states = bottom_row_movement(next_row_only, n)
movement_rewards_partial= bottom_row_movement_rewards(movement_states, state_reward, n)
movement_rewards=gen_full_movement_rewards(full_states, movement_rewards_partial, n)
return movement_rewards
|
from __future__ import absolute_import
from builtins import object
import concurrent
import grpc
from . import bridge_pb2
from . import bridge_message
class PsBridgeServer(object):
def __init__(self, bind, ps_factory):
self.server = grpc.server(concurrent.futures.ThreadPoolExecutor(max_workers=1))
bridge_pb2.add_BridgeServicer_to_server(Servicer(ps_factory), self.server)
self.server.add_insecure_port('%s:%d' % bind)
def start(self):
self.server.start()
class Servicer(bridge_pb2.BridgeServicer):
def __init__(self, ps_factory):
self.ps_factory = ps_factory
def Init(self, request, context):
self.ps = self.ps_factory()
return bridge_pb2.NullMessage()
def Run(self, request_iterator, context):
names, args, kwargs = bridge_message.BridgeMessage.deserialize(request_iterator)
last = self.ps.session
for name in names:
last = getattr(last, name)
result = last(*args, **kwargs)
return bridge_message.BridgeMessage.serialize(result)
def GetX(self, request, context):
return bridge_pb2.X(value=self.ps.n_step())
def StoreMetric(self, request_iterator, context):
data = bridge_message.BridgeMessage.deserialize(request_iterator)
getattr(self.ps.metrics, data['method'])(**data['kwargs'])
return bridge_pb2.NullMessage()
|
from __future__ import absolute_import, division, print_function, unicode_literals
import textwrap
import pkg_resources
from mock import MagicMock, patch
from pies.overrides import *
from frosted import checker
PyCF_ONLY_AST = 1024
@patch.object(checker.Checker, 'report')
def test_plugins(m_report):
""" Plugins should be invoked by their "check" method
"""
tree = compile(textwrap.dedent(""), "<test>", "exec", PyCF_ONLY_AST)
m_report.return_value = None
m_check = MagicMock(name="check", return_value=[(MagicMock(), None, (), {})])
m_checker = MagicMock(name="checker", check=m_check)
m_load = MagicMock(name="load", return_value=m_checker)
m_plugin = MagicMock(name="plugin", load=m_load)
with patch.object(pkg_resources, 'iter_entry_points', return_value=[m_plugin]) as mock_ep:
checker.Checker(tree, "")
assert m_check.assert_called()
assert m_report.assert_called()
|
"""
Show a Pareto curve plot for individuals in a given generation.
"""
from __future__ import division
from __future__ import print_function
import pandas as pd
import plotly.graph_objs as go
import cea.plots.optimization
from cea.plots.variable_naming import NAMING
__author__ = "Bhargava Srepathi"
__copyright__ = "Copyright 2018, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Bhargava Srepathi", "Daren Thomas"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
class ParetoCurveForOneGenerationPlot(cea.plots.optimization.GenerationPlotBase):
"""Show a pareto curve for a single generation"""
name = "Pareto curve of costs, emissions and primary energy"
expected_parameters = {
'generation': 'plots-optimization:generation',
'normalization': 'plots-optimization:normalization',
'multicriteria': 'plots-optimization:multicriteria',
'scenario-name': 'general:scenario-name',
}
def __init__(self, project, parameters, cache):
super(ParetoCurveForOneGenerationPlot, self).__init__(project, parameters, cache)
self.analysis_fields = ['individual_name',
'TAC_sys_USD',
'GHG_sys_tonCO2',
'PEN_sys_MJoil',
'Capex_total_sys_USD',
'Opex_a_sys_USD']
self.objectives = ['TAC_sys_USD', 'GHG_sys_tonCO2', 'PEN_sys_MJoil']
self.normalization = self.parameters['normalization']
self.input_files = [(self.locator.get_optimization_generation_total_performance, [self.generation])]
self.multi_criteria = self.parameters['multicriteria']
self.titlex, self.titley, self.titlez = self.calc_titles()
def calc_titles(self):
if self.normalization == "gross floor area":
titlex = 'Total annualized costs [USD$(2015)/m2.yr]'
titley = 'GHG emissions [ton CO2-eq/m2.yr]'
titlez = 'Primary Energy <br>[MJ Oil-eq/m2.yr]'
elif self.normalization == "net floor area":
titlex = 'Total annualized costs [USD$(2015)/m2.yr]'
titley = 'GHG emissions [ton CO2-eq/m2.yr]'
titlez = 'Primary Energy <br>[MJ Oil-eq/m2.yr]'
elif self.normalization == "air conditioned floor area":
titlex = 'Total annualized costs [USD$(2015)/m2.yr]'
titley = 'GHG emissions [ton CO2-eq/m2.yr]'
titlez = 'Primary Energy <br>[MJ Oil-eq/m2.yr]'
elif self.normalization == "building occupancy":
titlex = 'Total annualized costs [USD$(2015)/pax.yr]'
titley = 'GHG emissions [ton CO2-eq/pax.yr]'
titlez = 'Primary Energy <br>[MJ Oil-eq/pax.yr]'
else:
titlex = 'Total annualized costs [USD$(2015)/yr]'
titley = 'GHG emissions [ton CO2-eq/yr]'
titlez = 'Primary Energy <br>[MJ Oil-eq/yr]'
return titlex, titley, titlez
@property
def layout(self):
return go.Layout(legend=dict(orientation="v", x=0.8, y=0.95),
xaxis=dict(title=self.titlex),
yaxis=dict(title=self.titley))
@property
def title(self):
if self.normalization != "none":
return "Pareto curve for generation {generation} normalized to {normalized}".format(generation=self.generation, normalized=self.normalization)
else:
return "Pareto curve for generation {generation}".format(generation=self.generation)
@property
def output_path(self):
return self.locator.get_timeseries_plots_file('gen{generation}_pareto_curve'.format(generation=self.generation),
self.category_name)
def calc_graph(self):
graph = []
# #PUT THE HALL OF FAME INSIDE
# data_HOF = self.process_generation_total_performance_halloffame()
# data_HOF = self.normalize_data(data_HOF, self.normalization, self.objectives)
# xs_HOF = data_HOF[self.objectives[0]].values
# ys_HOF = data_HOF[self.objectives[1]].values
# individual_names = data_HOF['individual_name'].values
# trace = go.Scattergl(x=xs_HOF, y=ys_HOF, mode='markers', name='Hall of fame', text=individual_names,
# marker=dict(size='12', color='grey_light'))
# graph.append(trace)
# PUT THE PARETO CURVE INSIDE
data = self.process_generation_total_performance_pareto()
data = self.normalize_data(data, self.normalization, self.objectives)
xs = data[self.objectives[0]].values
ys = data[self.objectives[1]].values
zs = data[self.objectives[2]].values
individual_names = data['individual_name'].values
trace = go.Scattergl(x=xs, y=ys, mode='markers', name='Pareto curve', text=individual_names,
marker=dict(size='12', color=zs,
colorbar=go.ColorBar(title=self.titlez, titleside='bottom'),
colorscale='Jet', showscale=True, opacity=0.8))
graph.append(trace)
# This includes the points of the multicriteria assessment in here
if self.multi_criteria:
# Insert scatter points of MCDA assessment.
final_dataframe = calc_final_dataframe(data)
xs = final_dataframe[self.objectives[0]].values
ys = final_dataframe[self.objectives[1]].values
name = final_dataframe["Attribute"].values
trace = go.Scattergl(x=xs, y=ys, mode='markers', name="Selected by Multi-criteria", text=name,
marker=dict(size='20', color='white', line=dict(
color='black',
width=2)))
graph.append(trace)
return graph
# def calc_table(self):
# final_dataframe = calc_final_dataframe(self.process_generation_total_performance())
#
# # transform data into currency
# for column in final_dataframe.columns:
# if '_USD' in column or '_MJoil' in column or '_tonCO2' in column:
# final_dataframe[column] = final_dataframe[column].apply(lambda x: '{:20,.2f}'.format(x))
#
# columns = ["Attribute"] + self.analysis_fields
# values = []
# for field in columns:
# if field in ["Attribute", "individual_name"]:
# values.append(final_dataframe[field].values)
# else:
# values.append(final_dataframe[field].values)
#
# columns = ["Attribute"] + [NAMING[field] for field in self.analysis_fields]
# table_df = pd.DataFrame({cn: cv for cn, cv in zip(columns, values)}, columns=columns)
# return table_df
def calc_final_dataframe(individual_data):
least_annualized_cost = individual_data.loc[
individual_data["TAC_rank"] < 2] # less than two because in the case there are two individuals MCDA calculates 1.5
least_emissions = individual_data.loc[individual_data["GHG_rank"] < 2]
least_primaryenergy = individual_data.loc[individual_data["PEN_rank"] < 2]
user_defined_mcda = individual_data.loc[individual_data["user_MCDA_rank"] < 2]
# do a check in the case more individuals had the same ranking.
if least_annualized_cost.shape[0] > 1:
individual = str(least_annualized_cost["individual_name"].values)
least_annualized_cost = least_annualized_cost.reset_index(drop=True)
least_annualized_cost = least_annualized_cost[:1]
least_annualized_cost["System option"] = individual
if least_emissions.shape[0] > 1:
individual = str(least_emissions["individual_name"].values)
least_emissions = least_emissions.reset_index(drop=True)
least_emissions = least_emissions.iloc[0].T
least_emissions["System option"] = individual
if least_primaryenergy.shape[0] > 1:
individual = str(least_primaryenergy["individual_name"].values)
least_primaryenergy = least_primaryenergy.reset_index(drop=True)
least_primaryenergy = least_primaryenergy.iloc[0].T
least_primaryenergy["System option"] = individual
if user_defined_mcda.shape[0] > 1:
individual = str(user_defined_mcda["individual_name"].values)
user_defined_mcda = user_defined_mcda.reset_index(drop=True)
user_defined_mcda = user_defined_mcda.iloc[0].T
user_defined_mcda["System option"] = individual
# Now extend all dataframes
final_dataframe = least_annualized_cost.append(least_emissions).append(least_primaryenergy).append(
user_defined_mcda)
final_dataframe.reset_index(drop=True, inplace=True)
final_dataframe["Attribute"] = ["least annualized costs", "least emissions", "least primary energy",
"user defined MCDA"]
return final_dataframe
def main():
"""Test this plot"""
import cea.config
import cea.plots.cache
config = cea.config.Configuration()
cache = cea.plots.cache.NullPlotCache()
ParetoCurveForOneGenerationPlot(config.project,
{'scenario-name': config.scenario_name,
'generation': config.plots_optimization.generation,
'multicriteria': config.plots_optimization.multicriteria,
'normalization': config.plots_optimization.normalization},
cache).plot(auto_open=True)
if __name__ == '__main__':
main()
|
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2018
# --------------------------------------------------------------------------
# gendoc: ignore
# This file is generated !
opl_version_major = 12
opl_version_minor = 9
opl_version_micro = 0
opl_version_string = '12.9.0' |
import os
import torch
class Multiview_Dataset(torch.utils.data.Dataset):
"""
Dataset class to read bird instances that have multiview matches.
Each index outputs one instance and its multiview annotations
"""
def __init__(self, root='data/cowbird/images',
annfile='data/cowbird/annotations/multiview_instance.pth'):
self.root = root
self.anns = torch.load(annfile)
def __getitem__(self, index):
ann = self.anns[index]
masks = self.get_fullsize_masks(ann['masks'], ann['bboxes'])
data = {
'img_ids': ann['img_ids'],
'imgpaths': [os.path.join(self.root, file) for file in ann['img_filenames']],
'frames': ann['frames'],
'bboxes': ann['bboxes'],
'keypoints': ann['keypoints'].float(),
'masks': masks
}
return data
def __len__(self):
return len(self.anns)
def get_fullsize_masks(self, masks, bboxes, h=1200, w=1920):
full_masks = []
for i in range(len(masks)):
box = bboxes[i]
full_mask = torch.zeros([h, w], dtype=torch.bool)
full_mask[box[1]:box[1]+box[3]+1, box[0]:box[0]+box[2]+1] = masks[i]
full_masks.append(full_mask)
full_masks = torch.stack(full_masks)
return full_masks
|
# Copyright (C) 2018-2020 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import inspect
import operator
import os
import shutil
import sys
import tempfile
from buildbot.process import remotetransfer
from buildbot.process.results import Results, SUCCESS, FAILURE, WARNINGS, SKIPPED, EXCEPTION, RETRY
from buildbot.test.fake.remotecommand import Expect, ExpectRemoteRef, ExpectShell
from buildbot.test.util.steps import BuildStepMixin
from buildbot.util import identifiers as buildbot_identifiers
from mock import call
from twisted.internet import error, reactor
from twisted.python import failure, log
from twisted.trial import unittest
from steps import (AnalyzeAPITestsResults, AnalyzeCompileWebKitResults, AnalyzeJSCTestsResults,
AnalyzeLayoutTestsResults, ApplyPatch, ApplyWatchList, ArchiveBuiltProduct, ArchiveTestResults,
CheckOutSource, CheckOutSpecificRevision, CheckPatchRelevance, CheckPatchStatusOnEWSQueues, CheckStyle,
CleanBuild, CleanUpGitIndexLock, CleanWorkingDirectory, CompileJSC, CompileJSCWithoutPatch, CompileWebKit,
CompileWebKitWithoutPatch, ConfigureBuild, CreateLocalGITCommit,
DownloadBuiltProduct, DownloadBuiltProductFromMaster, ExtractBuiltProduct, ExtractTestResults,
FindModifiedChangeLogs, InstallGtkDependencies, InstallWpeDependencies, KillOldProcesses,
PrintConfiguration, PushCommitToWebKitRepo, ReRunAPITests, ReRunJavaScriptCoreTests, ReRunWebKitPerlTests,
ReRunWebKitTests, RunAPITests, RunAPITestsWithoutPatch, RunBindingsTests, RunBuildWebKitOrgUnitTests,
RunEWSBuildbotCheckConfig, RunEWSUnitTests, RunResultsdbpyTests, RunJavaScriptCoreTests,
RunJSCTestsWithoutPatch, RunWebKit1Tests, RunWebKitPerlTests, RunWebKitPyPython2Tests,
RunWebKitPyPython3Tests, RunWebKitTests, RunWebKitTestsWithoutPatch, TestWithFailureCount,
Trigger, TransferToS3, UnApplyPatchIfRequired, UpdateWorkingDirectory, UploadBuiltProduct,
UploadTestResults, ValidateCommiterAndReviewer, ValidatePatch)
import send_email
send_email.BOT_WATCHERS_EMAILS = []
# Workaround for https://github.com/buildbot/buildbot/issues/4669
from buildbot.test.fake.fakebuild import FakeBuild
FakeBuild.addStepsAfterCurrentStep = lambda FakeBuild, step_factories: None
def mock_step(step, logs='', results=SUCCESS, stopped=False, properties=None):
step.logs = logs
step.results = results
step.stopped = stopped
return step
class ExpectMasterShellCommand(object):
def __init__(self, command, workdir=None, env=None, usePTY=0):
self.args = command
self.usePTY = usePTY
self.rc = None
self.path = None
self.logs = []
if env is not None:
self.env = env
else:
self.env = os.environ
if workdir:
self.path = os.path.join(os.getcwd(), workdir)
@classmethod
def log(self, name, value):
return ('log', name, value)
def __add__(self, other):
if isinstance(other, int):
self.rc = other
elif isinstance(other, tuple) and other[0] == 'log':
self.logs.append((other[1], other[2]))
return self
def __repr__(self):
return 'ExpectMasterShellCommand({0})'.format(repr(self.args))
class BuildStepMixinAdditions(BuildStepMixin):
def setUpBuildStep(self):
self.patch(reactor, 'spawnProcess', lambda *args, **kwargs: self._checkSpawnProcess(*args, **kwargs))
self._expected_local_commands = []
self._temp_directory = tempfile.mkdtemp()
os.chdir(self._temp_directory)
self._expected_uploaded_files = []
super(BuildStepMixinAdditions, self).setUpBuildStep()
def tearDownBuildStep(self):
shutil.rmtree(self._temp_directory)
super(BuildStepMixinAdditions, self).tearDownBuildStep()
def fakeBuildFinished(self, text, results):
self.build.text = text
self.build.results = results
def setupStep(self, step, *args, **kwargs):
self.previous_steps = kwargs.get('previous_steps') or []
if self.previous_steps:
del kwargs['previous_steps']
super(BuildStepMixinAdditions, self).setupStep(step, *args, **kwargs)
self.build.terminate = False
self.build.stopped = False
self.build.executedSteps = self.executedSteps
self.build.buildFinished = self.fakeBuildFinished
self._expected_added_urls = []
self._expected_sources = None
@property
def executedSteps(self):
return filter(lambda step: not step.stopped, self.previous_steps)
def setProperty(self, name, value, source='Unknown'):
self.properties.setProperty(name, value, source)
def getProperty(self, name):
return self.properties.getProperty(name)
def expectAddedURLs(self, added_urls):
self._expected_added_urls = added_urls
def expectUploadedFile(self, path):
self._expected_uploaded_files.append(path)
def expectLocalCommands(self, *expected_commands):
self._expected_local_commands.extend(expected_commands)
def expectRemoteCommands(self, *expected_commands):
self.expectCommands(*expected_commands)
def expectSources(self, expected_sources):
self._expected_sources = expected_sources
def _checkSpawnProcess(self, processProtocol, executable, args, env, path, usePTY, **kwargs):
got = (executable, args, env, path, usePTY)
if not self._expected_local_commands:
self.fail('got local command {0} when no further commands were expected'.format(got))
local_command = self._expected_local_commands.pop(0)
try:
self.assertEqual(got, (local_command.args[0], local_command.args, local_command.env, local_command.path, local_command.usePTY))
except AssertionError:
log.err()
raise
for name, value in local_command.logs:
if name == 'stdout':
processProtocol.outReceived(value)
elif name == 'stderr':
processProtocol.errReceived(value)
if local_command.rc != 0:
value = error.ProcessTerminated(exitCode=local_command.rc)
else:
value = error.ProcessDone(None)
processProtocol.processEnded(failure.Failure(value))
def _added_files(self):
results = []
for dirpath, dirnames, filenames in os.walk(self._temp_directory):
relative_root_path = os.path.relpath(dirpath, start=self._temp_directory)
if relative_root_path == '.':
relative_root_path = ''
for name in filenames:
results.append(os.path.join(relative_root_path, name))
return results
def runStep(self):
def check(result):
self.assertEqual(self._expected_local_commands, [], 'assert all expected local commands were run')
self.expectAddedURLs(self._expected_added_urls)
self.assertEqual(self._added_files(), self._expected_uploaded_files)
if self._expected_sources is not None:
# Convert to dictionaries because assertEqual() only knows how to diff Python built-in types.
actual_sources = sorted([source.asDict() for source in self.build.sources], key=operator.itemgetter('codebase'))
expected_sources = sorted([source.asDict() for source in self._expected_sources], key=operator.itemgetter('codebase'))
self.assertEqual(actual_sources, expected_sources)
deferred_result = super(BuildStepMixinAdditions, self).runStep()
deferred_result.addCallback(check)
return deferred_result
def uploadFileWithContentsOfString(string, timestamp=None):
def behavior(command):
writer = command.args['writer']
writer.remote_write(string + '\n')
writer.remote_close()
if timestamp:
writer.remote_utime(timestamp)
return behavior
class TestStepNameShouldBeValidIdentifier(BuildStepMixinAdditions, unittest.TestCase):
def test_step_names_are_valid(self):
import steps
build_step_classes = inspect.getmembers(steps, inspect.isclass)
for build_step in build_step_classes:
if 'name' in vars(build_step[1]):
name = build_step[1].name
self.assertFalse(' ' in name, 'step name "{}" contain space.'.format(name))
self.assertTrue(buildbot_identifiers.ident_re.match(name), 'step name "{}" is not a valid buildbot identifier.'.format(name))
class TestCheckStyle(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success_internal(self):
self.setupStep(CheckStyle())
self.setProperty('try-codebase', 'internal')
self.setProperty('platform', 'mac')
self.setProperty('configuration', 'debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/Scripts/check-webkit-style'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='check-webkit-style')
return self.runStep()
def test_failure_unknown_try_codebase(self):
self.setupStep(CheckStyle())
self.setProperty('try-codebase', 'foo')
self.setProperty('platform', 'mac')
self.setProperty('configuration', 'debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/Scripts/check-webkit-style'],
)
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='check-webkit-style (failure)')
return self.runStep()
def test_failures_with_style_issues(self):
self.setupStep(CheckStyle())
self.setProperty('try-codebase', 'internal')
self.setProperty('platform', 'mac')
self.setProperty('configuration', 'debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/Scripts/check-webkit-style'],
)
+ ExpectShell.log('stdio', stdout='''ERROR: Source/WebCore/layout/FloatingContext.cpp:36: Code inside a namespace should not be indented. [whitespace/indent] [4]
ERROR: Source/WebCore/layout/FormattingContext.h:94: Weird number of spaces at line-start. Are you using a 4-space indent? [whitespace/indent] [3]
ERROR: Source/WebCore/layout/LayoutContext.cpp:52: Place brace on its own line for function definitions. [whitespace/braces] [4]
ERROR: Source/WebCore/layout/LayoutContext.cpp:55: Extra space before last semicolon. If this should be an empty statement, use { } instead. [whitespace/semicolon] [5]
ERROR: Source/WebCore/layout/LayoutContext.cpp:60: Tab found; better to use spaces [whitespace/tab] [1]
ERROR: Source/WebCore/layout/Verification.cpp:88: Missing space before ( in while( [whitespace/parens] [5]
Total errors found: 8 in 48 files''')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='8 style errors')
return self.runStep()
def test_failures_no_style_issues(self):
self.setupStep(CheckStyle())
self.setProperty('try-codebase', 'internal')
self.setProperty('platform', 'mac')
self.setProperty('configuration', 'debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/Scripts/check-webkit-style'],
)
+ ExpectShell.log('stdio', stdout='Total errors found: 0 in 6 files')
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='check-webkit-style')
return self.runStep()
def test_failures_no_changes(self):
self.setupStep(CheckStyle())
self.setProperty('try-codebase', 'internal')
self.setProperty('platform', 'mac')
self.setProperty('configuration', 'debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/Scripts/check-webkit-style'],
)
+ ExpectShell.log('stdio', stdout='Total errors found: 0 in 0 files')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='check-webkit-style (failure)')
return self.runStep()
class TestApplyWatchList(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(ApplyWatchList())
self.setProperty('bug_id', '1234')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=120,
logEnviron=False,
command=['python', 'Tools/Scripts/webkit-patch', 'apply-watchlist-local', '1234'])
+ ExpectShell.log('stdio', stdout='Result of watchlist: cc "" messages ""')
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Applied WatchList')
return self.runStep()
def test_failure(self):
self.setupStep(ApplyWatchList())
self.setProperty('bug_id', '1234')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=120,
logEnviron=False,
command=['python', 'Tools/Scripts/webkit-patch', 'apply-watchlist-local', '1234'])
+ ExpectShell.log('stdio', stdout='Unexpected failure')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Failed to apply watchlist')
return self.runStep()
class TestRunBindingsTests(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
self.jsonFileName = 'bindings_test_results.json'
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(RunBindingsTests())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=300,
logEnviron=False,
command=['python', 'Tools/Scripts/run-bindings-tests', '--json-output={0}'.format(self.jsonFileName)],
logfiles={'json': self.jsonFileName},
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Passed bindings tests')
return self.runStep()
def test_failure(self):
self.setupStep(RunBindingsTests())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=300,
logEnviron=False,
command=['python', 'Tools/Scripts/run-bindings-tests', '--json-output={0}'.format(self.jsonFileName)],
logfiles={'json': self.jsonFileName},
)
+ ExpectShell.log('stdio', stdout='FAIL: (JS) JSTestInterface.cpp')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='bindings-tests (failure)')
return self.runStep()
class TestRunWebKitPerlTests(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def configureStep(self):
self.setupStep(RunWebKitPerlTests())
def test_success(self):
self.configureStep()
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/test-webkitperl'],
timeout=120,
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Passed webkitperl tests')
return self.runStep()
def test_failure(self):
self.configureStep()
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/test-webkitperl'],
timeout=120,
)
+ ExpectShell.log('stdio', stdout='''Failed tests: 1-3, 5-7, 9, 11-13
Files=40, Tests=630, 4 wallclock secs ( 0.16 usr 0.09 sys + 2.78 cusr 0.64 csys = 3.67 CPU)
Result: FAIL
Failed 1/40 test programs. 10/630 subtests failed.''')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Failed webkitperl tests')
return self.runStep()
class TestWebKitPyPython2Tests(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
self.jsonFileName = 'webkitpy_test_python2_results.json'
self.json_with_failure = '''{"failures": [{"name": "webkitpy.port.wpe_unittest.WPEPortTest.test_diff_image"}]}\n'''
self.json_with_errros = '''{"failures": [],
"errors": [{"name": "webkitpy.style.checkers.cpp_unittest.WebKitStyleTest.test_os_version_checks"}, {"name": "webkitpy.port.win_unittest.WinPortTest.test_diff_image__missing_actual"}]}\n'''
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(RunWebKitPyPython2Tests())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/Scripts/test-webkitpy', '--verbose', '--json-output={0}'.format(self.jsonFileName)],
logfiles={'json': self.jsonFileName},
timeout=120,
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Passed webkitpy python2 tests')
return self.runStep()
def test_unexpected_failure(self):
self.setupStep(RunWebKitPyPython2Tests())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/Scripts/test-webkitpy', '--verbose', '--json-output={0}'.format(self.jsonFileName)],
logfiles={'json': self.jsonFileName},
timeout=120,
)
+ ExpectShell.log('stdio', stdout='''Ran 1744 tests in 5.913s
FAILED (failures=1, errors=0)''')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='webkitpy-tests (failure)')
return self.runStep()
def test_failure(self):
self.setupStep(RunWebKitPyPython2Tests())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/Scripts/test-webkitpy', '--verbose', '--json-output={0}'.format(self.jsonFileName)],
logfiles={'json': self.jsonFileName},
timeout=120,
) +
ExpectShell.log('json', stdout=self.json_with_failure) +
2,
)
self.expectOutcome(result=FAILURE, state_string='Found 1 webkitpy python2 test failure: webkitpy.port.wpe_unittest.WPEPortTest.test_diff_image')
return self.runStep()
def test_errors(self):
self.setupStep(RunWebKitPyPython2Tests())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/Scripts/test-webkitpy', '--verbose', '--json-output={0}'.format(self.jsonFileName)],
logfiles={'json': self.jsonFileName},
timeout=120,
) +
ExpectShell.log('json', stdout=self.json_with_errros) +
2,
)
self.expectOutcome(result=FAILURE, state_string='Found 2 webkitpy python2 test failures: webkitpy.style.checkers.cpp_unittest.WebKitStyleTest.test_os_version_checks, webkitpy.port.win_unittest.WinPortTest.test_diff_image__missing_actual')
return self.runStep()
class TestWebKitPyPython3Tests(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
self.jsonFileName = 'webkitpy_test_python3_results.json'
self.json_with_failure = '''{"failures": [{"name": "webkitpy.port.wpe_unittest.WPEPortTest.test_diff_image"}]}\n'''
self.json_with_errros = '''{"failures": [],
"errors": [{"name": "webkitpy.style.checkers.cpp_unittest.WebKitStyleTest.test_os_version_checks"}, {"name": "webkitpy.port.win_unittest.WinPortTest.test_diff_image__missing_actual"}]}\n'''
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(RunWebKitPyPython3Tests())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python3', 'Tools/Scripts/test-webkitpy', '--verbose', '--json-output={0}'.format(self.jsonFileName)],
logfiles={'json': self.jsonFileName},
timeout=120,
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Passed webkitpy python3 tests')
return self.runStep()
def test_unexpected_failure(self):
self.setupStep(RunWebKitPyPython3Tests())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python3', 'Tools/Scripts/test-webkitpy', '--verbose', '--json-output={0}'.format(self.jsonFileName)],
logfiles={'json': self.jsonFileName},
timeout=120,
)
+ ExpectShell.log('stdio', stdout='''Ran 1744 tests in 5.913s
FAILED (failures=1, errors=0)''')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='webkitpy-tests (failure)')
return self.runStep()
def test_failure(self):
self.setupStep(RunWebKitPyPython3Tests())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python3', 'Tools/Scripts/test-webkitpy', '--verbose', '--json-output={0}'.format(self.jsonFileName)],
logfiles={'json': self.jsonFileName},
timeout=120,
) +
ExpectShell.log('json', stdout=self.json_with_failure) +
2,
)
self.expectOutcome(result=FAILURE, state_string='Found 1 webkitpy python3 test failure: webkitpy.port.wpe_unittest.WPEPortTest.test_diff_image')
return self.runStep()
def test_errors(self):
self.setupStep(RunWebKitPyPython3Tests())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python3', 'Tools/Scripts/test-webkitpy', '--verbose', '--json-output={0}'.format(self.jsonFileName)],
logfiles={'json': self.jsonFileName},
timeout=120,
) +
ExpectShell.log('json', stdout=self.json_with_errros) +
2,
)
self.expectOutcome(result=FAILURE, state_string='Found 2 webkitpy python3 test failures: webkitpy.style.checkers.cpp_unittest.WebKitStyleTest.test_os_version_checks, webkitpy.port.win_unittest.WinPortTest.test_diff_image__missing_actual')
return self.runStep()
class TestRunEWSBuildbotCheckConfig(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(RunEWSBuildbotCheckConfig())
self.expectRemoteCommands(
ExpectShell(workdir='build/Tools/BuildSlaveSupport/ews-build',
timeout=120,
logEnviron=False,
command=['buildbot', 'checkconfig'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Passed buildbot checkconfig')
return self.runStep()
def test_failure(self):
self.setupStep(RunEWSBuildbotCheckConfig())
self.expectRemoteCommands(
ExpectShell(workdir='build/Tools/BuildSlaveSupport/ews-build',
timeout=120,
logEnviron=False,
command=['buildbot', 'checkconfig'],
)
+ ExpectShell.log('stdio', stdout='Configuration Errors: builder(s) iOS-12-Debug-Build-EWS have no schedulers to drive them')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Failed buildbot checkconfig')
return self.runStep()
class TestRunEWSUnitTests(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(RunEWSUnitTests())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=120,
logEnviron=False,
command=['python', 'Tools/BuildSlaveSupport/ews-build/runUnittests.py'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Passed EWS unit tests')
return self.runStep()
def test_failure(self):
self.setupStep(RunEWSUnitTests())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=120,
logEnviron=False,
command=['python', 'Tools/BuildSlaveSupport/ews-build/runUnittests.py'],
)
+ ExpectShell.log('stdio', stdout='Unhandled Error. Traceback (most recent call last): Keys in cmd missing from expectation: [logfiles.json]')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Failed EWS unit tests')
return self.runStep()
class TestRunResultsdbpyTests(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(RunResultsdbpyTests())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=120,
logEnviron=False,
command=['python3', 'Tools/resultsdbpy/resultsdbpy/run-tests', '--verbose', '--no-selenium', '--fast-tests'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Passed resultsdbpy unit tests')
return self.runStep()
def test_failure(self):
self.setupStep(RunResultsdbpyTests())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=120,
logEnviron=False,
command=['python3', 'Tools/resultsdbpy/resultsdbpy/run-tests', '--verbose', '--no-selenium', '--fast-tests'],
)
+ ExpectShell.log('stdio', stdout='FAILED (errors=5, skipped=224)')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Failed resultsdbpy unit tests')
return self.runStep()
class TestRunBuildWebKitOrgUnitTests(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(RunBuildWebKitOrgUnitTests())
self.expectRemoteCommands(
ExpectShell(workdir='build/Tools/BuildSlaveSupport/build.webkit.org-config',
timeout=120,
logEnviron=False,
command=['python', 'steps_unittest.py'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Passed build.webkit.org unit tests')
return self.runStep()
def test_failure(self):
self.setupStep(RunBuildWebKitOrgUnitTests())
self.expectRemoteCommands(
ExpectShell(workdir='build/Tools/BuildSlaveSupport/build.webkit.org-config',
timeout=120,
logEnviron=False,
command=['python', 'steps_unittest.py'],
)
+ ExpectShell.log('stdio', stdout='Unhandled Error. Traceback (most recent call last): Keys in cmd missing from expectation: [logfiles.json]')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Failed build.webkit.org unit tests')
return self.runStep()
class TestKillOldProcesses(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(KillOldProcesses())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
command=['python', 'Tools/BuildSlaveSupport/kill-old-processes', 'buildbot'],
logEnviron=False,
timeout=60,
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Killed old processes')
return self.runStep()
def test_failure(self):
self.setupStep(KillOldProcesses())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
command=['python', 'Tools/BuildSlaveSupport/kill-old-processes', 'buildbot'],
logEnviron=False,
timeout=60,
)
+ ExpectShell.log('stdio', stdout='Unexpected error.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Failed to kill old processes')
return self.runStep()
class TestCleanBuild(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(CleanBuild())
self.setProperty('fullPlatform', 'ios-11')
self.setProperty('configuration', 'release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
command=['python', 'Tools/BuildSlaveSupport/clean-build', '--platform=ios-11', '--release'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Deleted WebKitBuild directory')
return self.runStep()
def test_failure(self):
self.setupStep(CleanBuild())
self.setProperty('fullPlatform', 'ios-simulator-11')
self.setProperty('configuration', 'debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
command=['python', 'Tools/BuildSlaveSupport/clean-build', '--platform=ios-simulator-11', '--debug'],
)
+ ExpectShell.log('stdio', stdout='Unexpected error.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Deleted WebKitBuild directory (failure)')
return self.runStep()
class TestCleanUpGitIndexLock(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(CleanUpGitIndexLock())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=120,
logEnviron=False,
command=['rm', '-f', '.git/index.lock'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Deleted .git/index.lock')
return self.runStep()
def test_success_windows(self):
self.setupStep(CleanUpGitIndexLock())
self.setProperty('platform', 'win')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=120,
logEnviron=False,
command=['rm', '-f', '.git/index.lock'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Deleted .git/index.lock')
return self.runStep()
def test_success_wincairo(self):
self.setupStep(CleanUpGitIndexLock())
self.setProperty('platform', 'wincairo')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=120,
logEnviron=False,
command=['del', '.git\index.lock'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Deleted .git/index.lock')
return self.runStep()
def test_failure(self):
self.setupStep(CleanUpGitIndexLock())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=120,
logEnviron=False,
command=['rm', '-f', '.git/index.lock'],
)
+ ExpectShell.log('stdio', stdout='Unexpected error.')
+ 1,
)
self.expectOutcome(result=FAILURE, state_string='Deleted .git/index.lock (failure)')
return self.runStep()
class TestInstallGtkDependencies(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(InstallGtkDependencies())
self.setProperty('configuration', 'release')
self.assertEqual(InstallGtkDependencies.haltOnFailure, True)
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/update-webkitgtk-libs', '--release'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Updated gtk dependencies')
return self.runStep()
def test_failure(self):
self.setupStep(InstallGtkDependencies())
self.setProperty('configuration', 'release')
self.assertEqual(InstallGtkDependencies.haltOnFailure, True)
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/update-webkitgtk-libs', '--release'],
)
+ ExpectShell.log('stdio', stdout='Unexpected error.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Updated gtk dependencies (failure)')
return self.runStep()
class TestInstallWpeDependencies(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(InstallWpeDependencies())
self.setProperty('configuration', 'release')
self.assertEqual(InstallWpeDependencies.haltOnFailure, True)
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/update-webkitwpe-libs', '--release'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Updated wpe dependencies')
return self.runStep()
def test_failure(self):
self.setupStep(InstallWpeDependencies())
self.setProperty('configuration', 'release')
self.assertEqual(InstallWpeDependencies.haltOnFailure, True)
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/update-webkitwpe-libs', '--release'],
)
+ ExpectShell.log('stdio', stdout='Unexpected error.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Updated wpe dependencies (failure)')
return self.runStep()
class TestCompileWebKit(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(CompileWebKit())
self.setProperty('fullPlatform', 'ios-simulator-11')
self.setProperty('configuration', 'release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/build-webkit', '--release'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Compiled WebKit')
return self.runStep()
def test_success_gtk(self):
self.setupStep(CompileWebKit())
self.setProperty('platform', 'gtk')
self.setProperty('fullPlatform', 'gtk')
self.setProperty('configuration', 'release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/build-webkit', '--release', '--gtk'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Compiled WebKit')
return self.runStep()
def test_success_wpe(self):
self.setupStep(CompileWebKit())
self.setProperty('platform', 'wpe')
self.setProperty('fullPlatform', 'wpe')
self.setProperty('configuration', 'release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/build-webkit', '--release', '--wpe'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Compiled WebKit')
return self.runStep()
def test_failure(self):
self.setupStep(CompileWebKit())
self.setProperty('fullPlatform', 'mac-sierra')
self.setProperty('configuration', 'debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/build-webkit', '--debug'],
)
+ ExpectShell.log('stdio', stdout='1 error generated.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Failed to compile WebKit')
return self.runStep()
def test_skip_for_revert_patches_on_commit_queue(self):
self.setupStep(CompileWebKit())
self.setProperty('buildername', 'Commit-Queue')
self.setProperty('configuration', 'debug')
self.setProperty('revert', True)
self.expectOutcome(result=SKIPPED, state_string='Compiled WebKit (skipped)')
return self.runStep()
class TestCompileWebKitWithoutPatch(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(CompileWebKitWithoutPatch())
self.setProperty('fullPlatform', 'ios-simulator-11')
self.setProperty('configuration', 'release')
self.setProperty('patchFailedToBuild', True)
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/build-webkit', '--release'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Compiled WebKit')
return self.runStep()
def test_failure(self):
self.setupStep(CompileWebKitWithoutPatch())
self.setProperty('fullPlatform', 'mac-sierra')
self.setProperty('configuration', 'debug')
self.setProperty('patchFailedTests', True)
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/build-webkit', '--debug'],
)
+ ExpectShell.log('stdio', stdout='1 error generated.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Failed to compile WebKit')
return self.runStep()
def test_skip(self):
self.setupStep(CompileWebKitWithoutPatch())
self.setProperty('fullPlatform', 'ios-simulator-11')
self.setProperty('configuration', 'release')
self.expectHidden(True)
self.expectOutcome(result=SKIPPED, state_string='Compiled WebKit (skipped)')
return self.runStep()
class TestAnalyzeCompileWebKitResults(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
AnalyzeCompileWebKitResults.send_email_for_build_failure = lambda self: None
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_patch_with_build_failure(self):
previous_steps = [
mock_step(CompileWebKit(), results=FAILURE),
mock_step(CompileWebKitWithoutPatch(), results=SUCCESS),
]
self.setupStep(AnalyzeCompileWebKitResults(), previous_steps=previous_steps)
self.setProperty('patch_id', '1234')
self.expectOutcome(result=FAILURE, state_string='Patch 1234 does not build (failure)')
rc = self.runStep()
self.assertEqual(self.getProperty('bugzilla_comment_text'), None)
self.assertEqual(self.getProperty('build_finish_summary'), None)
return rc
def test_patch_with_build_failure_on_commit_queue(self):
previous_steps = [
mock_step(CompileWebKit(), results=FAILURE),
mock_step(CompileWebKitWithoutPatch(), results=SUCCESS),
]
self.setupStep(AnalyzeCompileWebKitResults(), previous_steps=previous_steps)
self.setProperty('patch_id', '1234')
self.setProperty('buildername', 'commit-queue')
self.expectOutcome(result=FAILURE, state_string='Patch 1234 does not build (failure)')
rc = self.runStep()
self.assertEqual(self.getProperty('bugzilla_comment_text'), 'Patch 1234 does not build')
self.assertEqual(self.getProperty('build_finish_summary'), 'Patch 1234 does not build')
return rc
def test_patch_with_trunk_failure(self):
previous_steps = [
mock_step(CompileWebKit(), results=FAILURE),
mock_step(CompileWebKitWithoutPatch(), results=FAILURE),
]
self.setupStep(AnalyzeCompileWebKitResults(), previous_steps=previous_steps)
self.expectOutcome(result=FAILURE, state_string='Unable to build WebKit without patch, retrying build (failure)')
return self.runStep()
def test_filter_logs_containing_error(self):
logs = 'In file included from WebCore/unified-sources/UnifiedSource263.cpp:4:\nImageBufferIOSurfaceBackend.cpp:108:30: error: definition of implicitly declared destructor'
expected_output = 'ImageBufferIOSurfaceBackend.cpp:108:30: error: definition of implicitly declared destructor'
output = AnalyzeCompileWebKitResults().filter_logs_containing_error(logs)
self.assertEqual(expected_output, output)
def test_filter_logs_containing_error_with_too_many_errors(self):
logs = 'Error:1\nError:2\nerror:3\nerror:4\nerror:5\nrandom-string\nerror:6\nerror:7\nerror8\nerror:9\nerror:10\nerror:11\nerror:12\nerror:13'
expected_output = 'error:3\nerror:4\nerror:5\nerror:6\nerror:7\nerror:9\nerror:10\nerror:11\nerror:12\nerror:13'
output = AnalyzeCompileWebKitResults().filter_logs_containing_error(logs)
self.assertEqual(expected_output, output)
def test_filter_logs_containing_error_with_no_error(self):
logs = 'CompileC /Volumes/Data/worker/macOS-Mojave-Release-Build-EWS'
expected_output = ''
output = AnalyzeCompileWebKitResults().filter_logs_containing_error(logs)
self.assertEqual(expected_output, output)
class TestCompileJSC(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(CompileJSC())
self.setProperty('fullPlatform', 'jsc-only')
self.setProperty('configuration', 'release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/build-jsc', '--release'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Compiled JSC')
return self.runStep()
def test_failure(self):
self.setupStep(CompileJSC())
self.setProperty('fullPlatform', 'jsc-only')
self.setProperty('configuration', 'debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/build-jsc', '--debug'],
)
+ ExpectShell.log('stdio', stdout='1 error generated.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Failed to compile JSC')
return self.runStep()
class TestCompileJSCWithoutPatch(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(CompileJSCWithoutPatch())
self.setProperty('fullPlatform', 'jsc-only')
self.setProperty('configuration', 'release')
self.setProperty('patchFailedToBuild', 'True')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/build-jsc', '--release'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Compiled JSC')
return self.runStep()
def test_failure(self):
self.setupStep(CompileJSCWithoutPatch())
self.setProperty('fullPlatform', 'jsc-only')
self.setProperty('configuration', 'debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/build-jsc', '--debug'],
)
+ ExpectShell.log('stdio', stdout='1 error generated.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Failed to compile JSC')
return self.runStep()
class TestRunJavaScriptCoreTests(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
self.jsonFileName = 'jsc_results.json'
self.jsc_masm_failure = '''{"allDFGTestsPassed":true,"allMasmTestsPassed":false,"allB3TestsPassed":true,"allAirTestsPassed":true,"stressTestFailures":[],"allApiTestsPassed":true}\n'''
self.jsc_b3_and_stress_test_failure = '''{"allDFGTestsPassed":true,"allMasmTestsPassed":true,"allB3TestsPassed":false,"allAirTestsPassed":true,"allApiTestsPassed":true,"stressTestFailures":["stress/weakset-gc.js"]}\n'''
self.jsc_dfg_air_and_stress_test_failure = '''{"allDFGTestsPassed":false,"allMasmTestsPassed":true,"allB3TestsPassed":true,"allAirTestsPassed":false,"allApiTestsPassed":true,"stressTestFailures":["stress/weakset-gc.js"]}\n'''
self.jsc_single_stress_test_failure = '''{"allDFGTestsPassed":true,"allMasmTestsPassed":true,"allB3TestsPassed":true,"allAirTestsPassed":true,"stressTestFailures":["stress/switch-on-char-llint-rope.js.dfg-eager"],"allApiTestsPassed":true}\n'''
self.jsc_multiple_stress_test_failures = '''{"allDFGTestsPassed":true,"allMasmTestsPassed":true,"allB3TestsPassed":true,"allAirTestsPassed":true,"stressTestFailures":["stress/switch-on-char-llint-rope.js.dfg-eager","stress/switch-on-char-llint-rope.js.dfg-eager-no-cjit-validate","stress/switch-on-char-llint-rope.js.eager-jettison-no-cjit","stress/switch-on-char-llint-rope.js.ftl-eager","stress/switch-on-char-llint-rope.js.ftl-eager-no-cjit","stress/switch-on-char-llint-rope.js.ftl-eager-no-cjit-b3o1","stress/switch-on-char-llint-rope.js.ftl-no-cjit-b3o0","stress/switch-on-char-llint-rope.js.ftl-no-cjit-no-inline-validate","stress/switch-on-char-llint-rope.js.ftl-no-cjit-no-put-stack-validate","stress/switch-on-char-llint-rope.js.ftl-no-cjit-small-pool","stress/switch-on-char-llint-rope.js.ftl-no-cjit-validate-sampling-profiler","stress/switch-on-char-llint-rope.js.no-cjit-collect-continuously","stress/switch-on-char-llint-rope.js.no-cjit-validate-phases","stress/switch-on-char-llint-rope.js.no-ftl"],"allApiTestsPassed":true}\n'''
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def configureStep(self, platform=None, fullPlatform=None, configuration=None):
self.setupStep(RunJavaScriptCoreTests())
self.prefix = RunJavaScriptCoreTests.prefix
if platform:
self.setProperty('platform', platform)
if fullPlatform:
self.setProperty('fullPlatform', fullPlatform)
if configuration:
self.setProperty('configuration', configuration)
def test_success(self):
self.configureStep(platform='mac', fullPlatform='mac-highsierra', configuration='release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/run-javascriptcore-tests', '--no-build', '--no-fail-fast', '--json-output={0}'.format(self.jsonFileName), '--release'],
logfiles={'json': self.jsonFileName},
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Passed JSC tests')
return self.runStep()
def test_remote_success(self):
self.configureStep(platform='jsc-only', fullPlatform='jsc-only', configuration='release')
self.setProperty('remotes', 'remote-machines.json')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/run-javascriptcore-tests', '--no-build', '--no-fail-fast', '--json-output={0}'.format(self.jsonFileName), '--release', '--remote-config-file=remote-machines.json', '--no-testmasm', '--no-testair', '--no-testb3', '--no-testdfg', '--no-testapi', '--memory-limited', '--jsc-only'],
logfiles={'json': self.jsonFileName},
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Passed JSC tests')
return self.runStep()
def test_failure(self):
self.configureStep(platform='mac', fullPlatform='mac-highsierra', configuration='debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/run-javascriptcore-tests', '--no-build', '--no-fail-fast', '--json-output={0}'.format(self.jsonFileName), '--debug'],
logfiles={'json': self.jsonFileName},
)
+ ExpectShell.log('stdio', stdout='9 failures found.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='jscore-tests (failure)')
return self.runStep()
def test_single_stress_test_failure(self):
self.configureStep(platform='mac', fullPlatform='mac-highsierra', configuration='debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
logfiles={'json': self.jsonFileName},
command=['perl', 'Tools/Scripts/run-javascriptcore-tests', '--no-build', '--no-fail-fast', '--json-output={0}'.format(self.jsonFileName), '--debug'],
)
+ 2
+ ExpectShell.log('json', stdout=self.jsc_single_stress_test_failure),
)
self.expectOutcome(result=FAILURE, state_string='Found 1 jsc stress test failure: stress/switch-on-char-llint-rope.js.dfg-eager')
rc = self.runStep()
self.assertEqual(self.getProperty(self.prefix + 'stress_test_failures'), ['stress/switch-on-char-llint-rope.js.dfg-eager'])
self.assertEqual(self.getProperty(self.prefix + 'binary_failures'), None)
return rc
def test_lot_of_stress_test_failure(self):
self.configureStep(platform='mac', fullPlatform='mac-highsierra', configuration='debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
logfiles={'json': self.jsonFileName},
command=['perl', 'Tools/Scripts/run-javascriptcore-tests', '--no-build', '--no-fail-fast', '--json-output={0}'.format(self.jsonFileName), '--debug'],
)
+ 2
+ ExpectShell.log('json', stdout=self.jsc_multiple_stress_test_failures),
)
self.expectOutcome(result=FAILURE, state_string='Found 14 jsc stress test failures: stress/switch-on-char-llint-rope.js.dfg-eager, stress/switch-on-char-llint-rope.js.dfg-eager-no-cjit-validate, stress/switch-on-char-llint-rope.js.eager-jettison-no-cjit, stress/switch-on-char-llint-rope.js.ftl-eager, stress/switch-on-char-llint-rope.js.ftl-eager-no-cjit ...')
rc = self.runStep()
self.assertEqual(self.getProperty(self.prefix + 'stress_test_failures'), ["stress/switch-on-char-llint-rope.js.dfg-eager", "stress/switch-on-char-llint-rope.js.dfg-eager-no-cjit-validate", "stress/switch-on-char-llint-rope.js.eager-jettison-no-cjit", "stress/switch-on-char-llint-rope.js.ftl-eager", "stress/switch-on-char-llint-rope.js.ftl-eager-no-cjit", "stress/switch-on-char-llint-rope.js.ftl-eager-no-cjit-b3o1", "stress/switch-on-char-llint-rope.js.ftl-no-cjit-b3o0", "stress/switch-on-char-llint-rope.js.ftl-no-cjit-no-inline-validate", "stress/switch-on-char-llint-rope.js.ftl-no-cjit-no-put-stack-validate", "stress/switch-on-char-llint-rope.js.ftl-no-cjit-small-pool", "stress/switch-on-char-llint-rope.js.ftl-no-cjit-validate-sampling-profiler", "stress/switch-on-char-llint-rope.js.no-cjit-collect-continuously", "stress/switch-on-char-llint-rope.js.no-cjit-validate-phases", "stress/switch-on-char-llint-rope.js.no-ftl"])
self.assertEqual(self.getProperty(self.prefix + 'binary_failures'), None)
return rc
def test_masm_failure(self):
self.configureStep(platform='mac', fullPlatform='mac-highsierra', configuration='debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
logfiles={'json': self.jsonFileName},
command=['perl', 'Tools/Scripts/run-javascriptcore-tests', '--no-build', '--no-fail-fast', '--json-output={0}'.format(self.jsonFileName), '--debug'],
)
+ 2
+ ExpectShell.log('json', stdout=self.jsc_masm_failure),
)
self.expectOutcome(result=FAILURE, state_string='JSC test binary failure: testmasm')
rc = self.runStep()
self.assertEqual(self.getProperty(self.prefix + 'stress_test_failures'), None)
self.assertEqual(self.getProperty(self.prefix + 'binary_failures'), ['testmasm'])
return rc
def test_b3_and_stress_test_failure(self):
self.configureStep(platform='mac', fullPlatform='mac-highsierra', configuration='release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
logfiles={'json': self.jsonFileName},
command=['perl', 'Tools/Scripts/run-javascriptcore-tests', '--no-build', '--no-fail-fast', '--json-output={0}'.format(self.jsonFileName), '--release'],
)
+ 2
+ ExpectShell.log('json', stdout=self.jsc_b3_and_stress_test_failure),
)
self.expectOutcome(result=FAILURE, state_string='Found 1 jsc stress test failure: stress/weakset-gc.js, JSC test binary failure: testb3')
rc = self.runStep()
self.assertEqual(self.getProperty(self.prefix + 'stress_test_failures'), ['stress/weakset-gc.js'])
self.assertEqual(self.getProperty(self.prefix + 'binary_failures'), ['testb3'])
return rc
def test_dfg_air_and_stress_test_failure(self):
self.configureStep(platform='jsc-only', fullPlatform='jsc-only', configuration='release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
logfiles={'json': self.jsonFileName},
command=['perl', 'Tools/Scripts/run-javascriptcore-tests', '--no-build', '--no-fail-fast', '--json-output={0}'.format(self.jsonFileName), '--release', '--jsc-only'],
)
+ 2
+ ExpectShell.log('json', stdout=self.jsc_dfg_air_and_stress_test_failure),
)
self.expectOutcome(result=FAILURE, state_string='Found 1 jsc stress test failure: stress/weakset-gc.js, JSC test binary failures: testair, testdfg')
rc = self.runStep()
self.assertEqual(self.getProperty(self.prefix + 'stress_test_failures'), ['stress/weakset-gc.js'])
self.assertEqual(self.getProperty(self.prefix + 'binary_failures'), ['testair', 'testdfg'])
return rc
class TestReRunJavaScriptCoreTests(TestRunJavaScriptCoreTests):
def configureStep(self, platform=None, fullPlatform=None, configuration=None):
self.setupStep(ReRunJavaScriptCoreTests())
self.prefix = ReRunJavaScriptCoreTests.prefix
if platform:
self.setProperty('platform', platform)
if fullPlatform:
self.setProperty('fullPlatform', fullPlatform)
if configuration:
self.setProperty('configuration', configuration)
def test_success(self):
self.configureStep(platform='mac', fullPlatform='mac-highsierra', configuration='release')
self.setProperty('jsc_stress_test_failures', ['test1', 'test2'])
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/run-javascriptcore-tests', '--no-build', '--no-fail-fast', '--json-output={0}'.format(self.jsonFileName), '--release'],
logfiles={'json': self.jsonFileName},
) +
0,
)
self.expectOutcome(result=SUCCESS, state_string='Found flaky tests: test1, test2')
return self.runStep()
def test_remote_success(self):
self.configureStep(platform='jsc-only', fullPlatform='jsc-only', configuration='release')
self.setProperty('remotes', 'remote-machines.json')
self.setProperty('jsc_binary_failures', ['testmasm'])
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/run-javascriptcore-tests', '--no-build', '--no-fail-fast', '--json-output={0}'.format(self.jsonFileName), '--release', '--remote-config-file=remote-machines.json', '--no-testmasm', '--no-testair', '--no-testb3', '--no-testdfg', '--no-testapi', '--memory-limited', '--jsc-only'],
logfiles={'json': self.jsonFileName},
) +
0,
)
self.expectOutcome(result=SUCCESS, state_string='Found flaky test: testmasm')
return self.runStep()
class TestRunJSCTestsWithoutPatch(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
self.jsonFileName = 'jsc_results.json'
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(RunJSCTestsWithoutPatch())
self.setProperty('fullPlatform', 'jsc-only')
self.setProperty('configuration', 'release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/run-javascriptcore-tests', '--no-build', '--no-fail-fast', '--json-output={0}'.format(self.jsonFileName), '--release'],
logfiles={'json': self.jsonFileName},
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='jscore-tests')
return self.runStep()
def test_failure(self):
self.setupStep(RunJSCTestsWithoutPatch())
self.setProperty('fullPlatform', 'jsc-only')
self.setProperty('configuration', 'debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/run-javascriptcore-tests', '--no-build', '--no-fail-fast', '--json-output={0}'.format(self.jsonFileName), '--debug'],
logfiles={'json': self.jsonFileName},
)
+ ExpectShell.log('stdio', stdout='9 failures found.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='jscore-tests (failure)')
return self.runStep()
class TestAnalyzeJSCTestsResults(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def configureStep(self):
self.setupStep(AnalyzeJSCTestsResults())
self.setProperty('jsc_stress_test_failures', [])
self.setProperty('jsc_binary_failures', [])
self.setProperty('jsc_rerun_stress_test_failures', [])
self.setProperty('jsc_rerun_binary_failures', [])
self.setProperty('jsc_clean_tree_stress_test_failures', [])
self.setProperty('jsc_clean_tree_binary_failures', [])
def test_single_new_stress_failure(self):
self.configureStep()
self.setProperty('jsc_stress_test_failures', ['stress/force-error.js.bytecode-cache'])
self.setProperty('jsc_rerun_stress_test_failures', ['stress/force-error.js.bytecode-cache'])
self.expectOutcome(result=FAILURE, state_string='Found 1 new JSC stress test failure: stress/force-error.js.bytecode-cache (failure)')
return self.runStep()
def test_single_new_binary_failure(self):
self.configureStep()
self.setProperty('jsc_binary_failures', ['testmasm'])
self.setProperty('jsc_rerun_binary_failures', ['testmasm'])
self.expectOutcome(result=FAILURE, state_string='Found 1 new JSC binary failure: testmasm (failure)')
return self.runStep()
def test_multiple_new_stress_failure(self):
self.configureStep()
self.setProperty('jsc_stress_test_failures', ['test{}'.format(i) for i in range(0, 30)])
self.setProperty('jsc_rerun_stress_test_failures', ['test{}'.format(i) for i in range(0, 30)])
self.expectOutcome(result=FAILURE, state_string='Found 30 new JSC stress test failures: test1, test0, test3, test2, test5, test4, test7, test6, test9, test8 ... (failure)')
return self.runStep()
def test_multiple_new_binary_failure(self):
self.configureStep()
self.setProperty('jsc_binary_failures', ['testmasm', 'testair', 'testb3', 'testdfg', 'testapi'])
self.setProperty('jsc_rerun_binary_failures', ['testmasm', 'testair', 'testb3', 'testdfg', 'testapi'])
self.expectOutcome(result=FAILURE, state_string='Found 5 new JSC binary failures: testb3, testmasm, testapi, testdfg, testair (failure)')
return self.runStep()
def test_new_stress_and_binary_failure(self):
self.configureStep()
self.setProperty('jsc_stress_test_failures', ['es6.yaml/es6/Set_iterator_closing.js.default'])
self.setProperty('jsc_binary_failures', ['testmasm'])
self.setProperty('jsc_rerun_stress_test_failures', ['es6.yaml/es6/Set_iterator_closing.js.default'])
self.setProperty('jsc_rerun_binary_failures', ['testmasm'])
self.expectOutcome(result=FAILURE, state_string='Found 1 new JSC binary failure: testmasm, Found 1 new JSC stress test failure: es6.yaml/es6/Set_iterator_closing.js.default (failure)')
return self.runStep()
def test_stress_failure_on_clean_tree(self):
self.configureStep()
self.setProperty('jsc_stress_test_failures', ['stress/force-error.js.default'])
self.setProperty('jsc_rerun_stress_test_failures', ['stress/force-error.js.default'])
self.setProperty('jsc_clean_tree_stress_test_failures', ['stress/force-error.js.default'])
self.expectOutcome(result=SUCCESS, state_string='Passed JSC tests')
return self.runStep()
def test_binary_failure_on_clean_tree(self):
self.configureStep()
self.setProperty('jsc_binary_failures', ['testdfg'])
self.setProperty('jsc_rerun_binary_failures', ['testdfg'])
self.setProperty('jsc_clean_tree_binary_failures', ['testdfg'])
self.expectOutcome(result=SUCCESS, state_string='Passed JSC tests')
return self.runStep()
def test_stress_and_binary_failure_on_clean_tree(self):
self.configureStep()
self.setProperty('jsc_stress_test_failures', ['es6.yaml/es6/Set_iterator_closing.js.default'])
self.setProperty('jsc_binary_failures', ['testair'])
self.setProperty('jsc_rerun_stress_test_failures', ['es6.yaml/es6/Set_iterator_closing.js.default'])
self.setProperty('jsc_rerun_binary_failures', ['testair'])
self.setProperty('jsc_clean_tree_stress_test_failures', ['es6.yaml/es6/Set_iterator_closing.js.default'])
self.setProperty('jsc_clean_tree_binary_failures', ['testair'])
self.expectOutcome(result=SUCCESS, state_string='Passed JSC tests')
return self.runStep()
def test_flaky_stress_and_binary_failures(self):
self.configureStep()
self.setProperty('jsc_stress_test_failures', ['stress/force-error.js.default'])
self.setProperty('jsc_binary_failures', ['testapi'])
self.expectOutcome(result=SUCCESS, state_string='Passed JSC tests')
return self.runStep()
def test_flaky_and_consistent_stress_failures(self):
self.configureStep()
self.setProperty('jsc_stress_test_failures', ['test1', 'test2'])
self.setProperty('jsc_rerun_stress_test_failures', ['test2'])
self.expectOutcome(result=FAILURE, state_string='Found 1 new JSC stress test failure: test2 (failure)')
return self.runStep()
def test_flaky_and_consistent_failures_with_clean_tree_failures(self):
self.configureStep()
self.setProperty('jsc_stress_test_failures', ['test1', 'test2'])
self.setProperty('jsc_rerun_stress_test_failures', ['test1'])
self.setProperty('jsc_clean_tree_stress_test_failures', ['test1', 'test2'])
self.expectOutcome(result=SUCCESS, state_string='Passed JSC tests')
return self.runStep()
class TestRunWebKitTests(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
self.jsonFileName = 'layout-test-results/full_results.json'
self.results_json_regressions = '''ADD_RESULTS({"tests":{"imported":{"w3c":{"web-platform-tests":{"IndexedDB":{"interleaved-cursors-large.html":{"report":"REGRESSION","expected":"PASS","actual":"TEXT"}},"wasm":{"jsapi":{"interface.any.html":{"report":"REGRESSION","expected":"PASS","actual":"TEXT"},"instance":{"constructor-bad-imports.any.html":{"report":"REGRESSION","expected":"PASS","actual":"TEXT"}},"global":{"constructor.any.html":{"report":"REGRESSION","expected":"PASS","actual":"TEXT"},"constructor.any.worker.html":{"report":"REGRESSION","expected":"PASS","actual":"TEXT"},"toString.any.html":{"report":"REGRESSION","expected":"PASS","actual":"TEXT"}},"constructor":{"instantiate-bad-imports.any.html":{"report":"REGRESSION","expected":"PASS","actual":"TEXT"},"instantiate-bad-imports.any.worker.html":{"report":"REGRESSION","expected":"PASS","actual":"TEXT"}},"interface.any.worker.html":{"report":"REGRESSION","expected":"PASS","actual":"TEXT"}}}}},"blink":{"storage":{"indexeddb":{"blob-valid-before-commit.html":{"report":"REGRESSION","expected":"PASS","actual":"TIMEOUT","has_stderr":true}}}}}},"skipped":23256,"num_regressions":10,"other_crashes":{},"interrupted":true,"num_missing":0,"layout_tests_dir":"/Volumes/Data/worker/iOS-12-Simulator-WK2-Tests-EWS/build/LayoutTests","version":4,"num_passes":32056,"pixel_tests_enabled":false,"date":"06:21AM on July 15, 2019","has_pretty_patch":true,"fixable":23267,"num_flaky":0,"uses_expectations_file":true});
'''
self.results_json_flakes = '''ADD_RESULTS({"tests":{"http":{"tests":{"workers":{"service":{"service-worker-resource-timing.https.html":{"report":"FLAKY","expected":"PASS","actual":"TIMEOUT PASS","has_stderr":true}}},"xmlhttprequest":{"post-content-type-document.html":{"report":"FLAKY","expected":"PASS","actual":"TIMEOUT PASS"}}}},"imported":{"blink":{"storage":{"indexeddb":{"blob-valid-before-commit.html":{"report":"FLAKY","expected":"PASS","actual":"TIMEOUT PASS","has_stderr":true}}},"fast":{"text":{"international":{"repaint-glyph-bounds.html":{"report":"FLAKY","expected":"PASS","actual":"IMAGE PASS","reftest_type":["=="],"image_diff_percent":0.08}}}}}}},"skipped":13176,"num_regressions":0,"other_crashes":{},"interrupted":false,"num_missing":0,"layout_tests_dir":"/Volumes/Data/worker/iOS-12-Simulator-WK2-Tests-EWS/build/LayoutTests","version":4,"num_passes":42185,"pixel_tests_enabled":false,"date":"06:54AM on July 17, 2019","has_pretty_patch":true,"fixable":55356,"num_flaky":4,"uses_expectations_file":true});
'''
self.results_json_mix_flakes_and_regression = '''ADD_RESULTS({"tests":{"http":{"tests":{"IndexedDB":{"collect-IDB-objects.https.html":{"report":"FLAKY","expected":"PASS","actual":"TEXT PASS"}},"xmlhttprequest":{"on-network-timeout-error-during-preflight.html":{"report":"FLAKY","expected":"PASS","actual":"TIMEOUT PASS"}}}},"transitions":{"lengthsize-transition-to-from-auto.html":{"report":"FLAKY","expected":"PASS","actual":"TIMEOUT PASS"}},"imported":{"blink":{"storage":{"indexeddb":{"blob-valid-before-commit.html":{"report":"FLAKY","expected":"PASS","actual":"TIMEOUT PASS","has_stderr":true}}}}},"fast":{"text":{"font-weight-fallback.html":{"report":"FLAKY","expected":"PASS","actual":"TIMEOUT PASS","has_stderr":true,"reftest_type":["=="]}},"scrolling":{"ios":{"reconcile-layer-position-recursive.html":{"report":"REGRESSION","expected":"PASS","actual":"TEXT"}}}}},"skipped":13174,"num_regressions":1,"other_crashes":{},"interrupted":false,"num_missing":0,"layout_tests_dir":"/Volumes/Data/worker/iOS-12-Simulator-WK2-Tests-EWS/build/LayoutTests","version":4,"num_passes":42158,"pixel_tests_enabled":false,"date":"11:28AM on July 16, 2019","has_pretty_patch":true,"fixable":55329,"num_flaky":5,"uses_expectations_file":true});
'''
self.results_json_with_newlines = '''ADD_RESULTS({"tests":{"http":{"tests":{"IndexedDB":{"collect-IDB-objects.https.html":{"report":"FLAKY","expected":"PASS","actual":"TEXT PASS"}},"xmlhttprequest":{"on-network-timeout-error-during-preflight.html":{"report":"FLAKY","expected":"PASS","actual":"TIMEOUT PASS"}}}},"transitions":{"lengthsize-trans
ition-to-from-auto.html":{"report":"FLAKY","expected":"PASS","actual":"TIMEOUT PASS"}},"imported":{"blink":{"storage":{"indexeddb":{"blob-valid-before-commit.html":{"report":"FLAKY","expected":"PASS","actual":"TIMEOUT PASS","has_stderr":true}}}}},"fast":{"text":{"font-weight-fallback.html":{"report":"FLAKY","expected":"PASS","actual":"TIMEOUT PASS","has_stderr":true,"reftest_type":["=="]}},"scrolling":{"ios":{"reconcile-layer-position-recursive.html":{"report":"REGRESSION","expected":"PASS","actual":"TEXT"}}}}},"skipped":13174,"num_regressions":1,"other_crashes":{},"interrupted":false,"num_missing":0,"layout_tests_dir":"/Volumes/Data/worker/iOS-12-Simulator-WK2-Tests-EWS/build/LayoutTes
ts","version":4,"num_passes":42158,"pixel_tests_enabled":false,"date":"11:28AM on July 16, 2019","has_pretty_patch":true,"fixable":55329,"num_flaky":5,"uses_expectations_file":true});
'''
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def configureStep(self):
self.setupStep(RunWebKitTests())
self.property_exceed_failure_limit = 'first_results_exceed_failure_limit'
self.property_failures = 'first_run_failures'
def test_success(self):
self.configureStep()
self.setProperty('fullPlatform', 'ios-simulator')
self.setProperty('configuration', 'release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logfiles={'json': self.jsonFileName},
logEnviron=False,
command=['python', 'Tools/Scripts/run-webkit-tests', '--no-build', '--no-show-results', '--no-new-test-results', '--clobber-old-results', '--release', '--results-directory', 'layout-test-results', '--debug-rwt-logging', '--exit-after-n-failures', '30', '--skip-failing-tests'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Passed layout tests')
return self.runStep()
def test_warnings(self):
self.configureStep()
self.setProperty('fullPlatform', 'ios-simulator')
self.setProperty('configuration', 'release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logfiles={'json': self.jsonFileName},
logEnviron=False,
command=['python', 'Tools/Scripts/run-webkit-tests', '--no-build', '--no-show-results', '--no-new-test-results', '--clobber-old-results', '--release', '--results-directory', 'layout-test-results', '--debug-rwt-logging', '--exit-after-n-failures', '30', '--skip-failing-tests'],
)
+ 0
+ ExpectShell.log('stdio', stdout='''Unexpected flakiness: timeouts (2)
imported/blink/storage/indexeddb/blob-valid-before-commit.html [ Timeout Pass ]
storage/indexeddb/modern/deleteindex-2.html [ Timeout Pass ]'''),
)
self.expectOutcome(result=WARNINGS, state_string='2 flakes')
return self.runStep()
def test_skip_for_revert_patches_on_commit_queue(self):
self.configureStep()
self.setProperty('buildername', 'Commit-Queue')
self.setProperty('fullPlatform', 'mac')
self.setProperty('configuration', 'debug')
self.setProperty('revert', True)
self.expectOutcome(result=SKIPPED, state_string='layout-tests (skipped)')
return self.runStep()
def test_skip_for_mac_wk2_passed_patch_on_commit_queue(self):
self.configureStep()
self.setProperty('patch_id', '1234')
self.setProperty('buildername', 'Commit-Queue')
self.setProperty('fullPlatform', 'mac')
self.setProperty('configuration', 'debug')
self.setProperty('passed_mac_wk2', True)
self.expectOutcome(result=SKIPPED, state_string='layout-tests (skipped)')
return self.runStep()
def test_parse_results_json_regression(self):
self.configureStep()
self.setProperty('fullPlatform', 'ios-simulator')
self.setProperty('configuration', 'release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logfiles={'json': self.jsonFileName},
logEnviron=False,
command=['python', 'Tools/Scripts/run-webkit-tests', '--no-build', '--no-show-results', '--no-new-test-results', '--clobber-old-results', '--release', '--results-directory', 'layout-test-results', '--debug-rwt-logging', '--exit-after-n-failures', '30', '--skip-failing-tests'],
)
+ 2
+ ExpectShell.log('json', stdout=self.results_json_regressions),
)
self.expectOutcome(result=FAILURE, state_string='layout-tests (failure)')
rc = self.runStep()
self.assertEqual(self.getProperty(self.property_exceed_failure_limit), True)
self.assertEqual(self.getProperty(self.property_failures),
["imported/w3c/web-platform-tests/IndexedDB/interleaved-cursors-large.html",
"imported/w3c/web-platform-tests/wasm/jsapi/interface.any.html",
"imported/w3c/web-platform-tests/wasm/jsapi/instance/constructor-bad-imports.any.html",
"imported/w3c/web-platform-tests/wasm/jsapi/global/constructor.any.html",
"imported/w3c/web-platform-tests/wasm/jsapi/global/constructor.any.worker.html",
"imported/w3c/web-platform-tests/wasm/jsapi/global/toString.any.html",
"imported/w3c/web-platform-tests/wasm/jsapi/interface.any.worker.html",
"imported/w3c/web-platform-tests/wasm/jsapi/constructor/instantiate-bad-imports.any.html",
"imported/w3c/web-platform-tests/wasm/jsapi/constructor/instantiate-bad-imports.any.worker.html",
"imported/blink/storage/indexeddb/blob-valid-before-commit.html"])
return rc
def test_parse_results_json_flakes(self):
self.configureStep()
self.setProperty('fullPlatform', 'ios-simulator')
self.setProperty('configuration', 'release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logfiles={'json': self.jsonFileName},
logEnviron=False,
command=['python', 'Tools/Scripts/run-webkit-tests', '--no-build', '--no-show-results', '--no-new-test-results', '--clobber-old-results', '--release', '--results-directory', 'layout-test-results', '--debug-rwt-logging', '--exit-after-n-failures', '30', '--skip-failing-tests'],
)
+ 0
+ ExpectShell.log('json', stdout=self.results_json_flakes),
)
self.expectOutcome(result=SUCCESS, state_string='Passed layout tests')
rc = self.runStep()
self.assertEqual(self.getProperty(self.property_exceed_failure_limit), False)
self.assertEqual(self.getProperty(self.property_failures), [])
return rc
def test_parse_results_json_flakes_and_regressions(self):
self.configureStep()
self.setProperty('fullPlatform', 'ios-simulator')
self.setProperty('configuration', 'release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logfiles={'json': self.jsonFileName},
logEnviron=False,
command=['python', 'Tools/Scripts/run-webkit-tests', '--no-build', '--no-show-results', '--no-new-test-results', '--clobber-old-results', '--release', '--results-directory', 'layout-test-results', '--debug-rwt-logging', '--exit-after-n-failures', '30', '--skip-failing-tests'],
)
+ 2
+ ExpectShell.log('json', stdout=self.results_json_mix_flakes_and_regression),
)
self.expectOutcome(result=FAILURE, state_string='layout-tests (failure)')
rc = self.runStep()
self.assertEqual(self.getProperty(self.property_exceed_failure_limit), False)
self.assertEqual(self.getProperty(self.property_failures), ['fast/scrolling/ios/reconcile-layer-position-recursive.html'])
return rc
def test_parse_results_json_with_newlines(self):
self.configureStep()
self.setProperty('fullPlatform', 'ios-simulator')
self.setProperty('configuration', 'release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logfiles={'json': self.jsonFileName},
logEnviron=False,
command=['python', 'Tools/Scripts/run-webkit-tests', '--no-build', '--no-show-results', '--no-new-test-results', '--clobber-old-results', '--release', '--results-directory', 'layout-test-results', '--debug-rwt-logging', '--exit-after-n-failures', '30', '--skip-failing-tests'],
)
+ 2
+ ExpectShell.log('json', stdout=self.results_json_with_newlines),
)
self.expectOutcome(result=FAILURE, state_string='layout-tests (failure)')
rc = self.runStep()
self.assertEqual(self.getProperty(self.property_exceed_failure_limit), False)
self.assertEqual(self.getProperty(self.property_failures), ['fast/scrolling/ios/reconcile-layer-position-recursive.html'])
return rc
def test_unexpected_error(self):
self.configureStep()
self.setProperty('fullPlatform', 'mac-highsierra')
self.setProperty('configuration', 'debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logfiles={'json': self.jsonFileName},
logEnviron=False,
command=['python', 'Tools/Scripts/run-webkit-tests', '--no-build', '--no-show-results', '--no-new-test-results', '--clobber-old-results', '--debug', '--results-directory', 'layout-test-results', '--debug-rwt-logging', '--exit-after-n-failures', '30', '--skip-failing-tests'],
)
+ ExpectShell.log('stdio', stdout='Unexpected error.')
+ 254,
)
self.expectOutcome(result=RETRY, state_string='layout-tests (retry)')
return self.runStep()
def test_failure(self):
self.configureStep()
self.setProperty('fullPlatform', 'ios-simulator')
self.setProperty('configuration', 'release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logfiles={'json': self.jsonFileName},
logEnviron=False,
command=['python', 'Tools/Scripts/run-webkit-tests', '--no-build', '--no-show-results', '--no-new-test-results', '--clobber-old-results', '--release', '--results-directory', 'layout-test-results', '--debug-rwt-logging', '--exit-after-n-failures', '30', '--skip-failing-tests'],
)
+ ExpectShell.log('stdio', stdout='9 failures found.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='layout-tests (failure)')
return self.runStep()
def test_success_wpt_import_bot(self):
self.configureStep()
self.setProperty('fullPlatform', 'ios-simulator')
self.setProperty('configuration', 'release')
self.setProperty('patch_author', 'webkit-wpt-import-bot@igalia.com')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logfiles={'json': self.jsonFileName},
logEnviron=False,
command=['python', 'Tools/Scripts/run-webkit-tests', '--no-build', '--no-show-results', '--no-new-test-results', '--clobber-old-results', '--release', '--results-directory', 'layout-test-results', '--debug-rwt-logging', 'imported/w3c/web-platform-tests'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Passed layout tests')
return self.runStep()
class TestReRunWebKitTests(TestRunWebKitTests):
def configureStep(self):
self.setupStep(ReRunWebKitTests())
self.property_exceed_failure_limit = 'second_results_exceed_failure_limit'
self.property_failures = 'second_run_failures'
ReRunWebKitTests.send_email_for_flaky_failure = lambda self, test: None
def test_flaky_failures_in_first_run(self):
self.configureStep()
self.setProperty('fullPlatform', 'ios-simulator')
self.setProperty('configuration', 'release')
self.setProperty('first_run_failures', ['test1', 'test2'])
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logfiles={'json': self.jsonFileName},
logEnviron=False,
command=['python', 'Tools/Scripts/run-webkit-tests', '--no-build', '--no-show-results', '--no-new-test-results', '--clobber-old-results', '--release', '--results-directory', 'layout-test-results', '--debug-rwt-logging', '--exit-after-n-failures', '30', '--skip-failing-tests'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Passed layout tests')
rc = self.runStep()
self.assertEqual(self.getProperty('build_summary'), 'Found flaky tests: test1, test2')
return rc
class TestRunWebKitTestsWithoutPatch(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
self.jsonFileName = 'layout-test-results/full_results.json'
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def configureStep(self):
self.setupStep(RunWebKitTestsWithoutPatch())
self.property_exceed_failure_limit = 'clean_tree_results_exceed_failure_limit'
self.property_failures = 'clean_tree_run_failures'
self.setProperty('buildername', 'iOS-13-Simulator-WK2-Tests-EWS')
self.setProperty('buildnumber', '123')
self.setProperty('workername', 'ews126')
def test_success(self):
self.configureStep()
self.setProperty('fullPlatform', 'ios-simulator')
self.setProperty('configuration', 'release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logfiles={'json': self.jsonFileName},
logEnviron=False,
command=['python',
'Tools/Scripts/run-webkit-tests',
'--no-build',
'--no-show-results',
'--no-new-test-results',
'--clobber-old-results',
'--release',
'--results-directory', 'layout-test-results',
'--debug-rwt-logging',
'--exit-after-n-failures', '30',
'--skip-failing-tests'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='layout-tests')
return self.runStep()
def test_failure(self):
self.configureStep()
self.setProperty('fullPlatform', 'ios-simulator')
self.setProperty('configuration', 'release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logfiles={'json': self.jsonFileName},
logEnviron=False,
command=['python',
'Tools/Scripts/run-webkit-tests',
'--no-build',
'--no-show-results',
'--no-new-test-results',
'--clobber-old-results',
'--release',
'--results-directory', 'layout-test-results',
'--debug-rwt-logging',
'--exit-after-n-failures', '30',
'--skip-failing-tests'],
)
+ ExpectShell.log('stdio', stdout='9 failures found.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='layout-tests (failure)')
return self.runStep()
class TestRunWebKit1Tests(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
self.jsonFileName = 'layout-test-results/full_results.json'
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(RunWebKit1Tests())
self.setProperty('fullPlatform', 'ios-11')
self.setProperty('configuration', 'debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logfiles={'json': self.jsonFileName},
logEnviron=False,
command=['python', 'Tools/Scripts/run-webkit-tests', '--no-build', '--no-show-results', '--no-new-test-results', '--clobber-old-results', '--debug', '--dump-render-tree', '--results-directory', 'layout-test-results', '--debug-rwt-logging', '--exit-after-n-failures', '30', '--skip-failing-tests'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Passed layout tests')
return self.runStep()
def test_failure(self):
self.setupStep(RunWebKit1Tests())
self.setProperty('fullPlatform', 'ios-11')
self.setProperty('configuration', 'release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logfiles={'json': self.jsonFileName},
logEnviron=False,
command=['python', 'Tools/Scripts/run-webkit-tests', '--no-build', '--no-show-results', '--no-new-test-results', '--clobber-old-results', '--release', '--dump-render-tree', '--results-directory', 'layout-test-results', '--debug-rwt-logging', '--exit-after-n-failures', '30', '--skip-failing-tests'],
)
+ ExpectShell.log('stdio', stdout='9 failures found.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='layout-tests (failure)')
return self.runStep()
def test_skip_for_revert_patches_on_commit_queue(self):
self.setupStep(RunWebKit1Tests())
self.setProperty('buildername', 'Commit-Queue')
self.setProperty('fullPlatform', 'mac')
self.setProperty('configuration', 'debug')
self.setProperty('revert', True)
self.expectOutcome(result=SKIPPED, state_string='layout-tests (skipped)')
return self.runStep()
class TestAnalyzeLayoutTestsResults(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def configureStep(self):
AnalyzeLayoutTestsResults.send_email_for_flaky_failure = lambda self, test: None
AnalyzeLayoutTestsResults.send_email_for_pre_existing_failure = lambda self, test: None
self.setupStep(AnalyzeLayoutTestsResults())
self.setProperty('first_results_exceed_failure_limit', False)
self.setProperty('second_results_exceed_failure_limit', False)
self.setProperty('clean_tree_results_exceed_failure_limit', False)
self.setProperty('clean_tree_run_failures', [])
def test_failure_introduced_by_patch(self):
self.configureStep()
self.setProperty('first_run_failures', ["jquery/offset.html"])
self.setProperty('second_run_failures', ["jquery/offset.html"])
self.expectOutcome(result=FAILURE, state_string='Found 1 new test failure: jquery/offset.html (failure)')
return self.runStep()
def test_failure_on_clean_tree(self):
self.configureStep()
self.setProperty('first_run_failures', ["jquery/offset.html"])
self.setProperty('second_run_failures', ["jquery/offset.html"])
self.setProperty('clean_tree_run_failures', ["jquery/offset.html"])
self.expectOutcome(result=SUCCESS, state_string='Passed layout tests')
rc = self.runStep()
self.assertEqual(self.getProperty('build_summary'), 'Found 1 pre-existing test failure: jquery/offset.html')
return rc
def test_flaky_and_consistent_failures_without_clean_tree_failures(self):
self.configureStep()
self.setProperty('buildername', 'iOS-13-Simulator-WK2-Tests-EWS')
self.setProperty('first_run_failures', ['test1', 'test2'])
self.setProperty('second_run_failures', ['test1'])
self.expectOutcome(result=FAILURE, state_string='Found 1 new test failure: test1 (failure)')
rc = self.runStep()
self.assertEqual(self.getProperty('bugzilla_comment_text'), None)
self.assertEqual(self.getProperty('build_finish_summary'), None)
return rc
def test_consistent_failure_without_clean_tree_failures_commit_queue(self):
self.configureStep()
self.setProperty('buildername', 'Commit-Queue')
self.setProperty('first_run_failures', ['test1'])
self.setProperty('second_run_failures', ['test1'])
self.expectOutcome(result=FAILURE, state_string='Found 1 new test failure: test1 (failure)')
rc = self.runStep()
self.assertEqual(self.getProperty('bugzilla_comment_text'), 'Found 1 new test failure: test1')
self.assertEqual(self.getProperty('build_finish_summary'), 'Found 1 new test failure: test1')
return rc
def test_flaky_and_inconsistent_failures_without_clean_tree_failures(self):
self.configureStep()
self.setProperty('first_run_failures', ['test1', 'test2'])
self.setProperty('second_run_failures', ['test3'])
self.expectOutcome(result=SUCCESS, state_string='Passed layout tests')
return self.runStep()
def test_flaky_failures_in_first_run(self):
self.configureStep()
self.setProperty('first_run_failures', ['test1', 'test2'])
self.setProperty('second_run_failures', [])
self.expectOutcome(result=SUCCESS, state_string='Passed layout tests')
rc = self.runStep()
self.assertEqual(self.getProperty('build_summary'), ' Found flaky tests: test1, test2')
return rc
def test_flaky_and_inconsistent_failures_with_clean_tree_failures(self):
self.configureStep()
self.setProperty('first_run_failures', ['test1', 'test2'])
self.setProperty('second_run_failures', ['test3'])
self.setProperty('clean_tree_run_failures', ['test1', 'test2', 'test3'])
self.expectOutcome(result=SUCCESS, state_string='Passed layout tests')
rc = self.runStep()
self.assertEqual(self.getProperty('build_summary'), 'Found 3 pre-existing test failures: test1, test2, test3 Found flaky tests: test1, test2, test3')
return rc
def test_flaky_and_consistent_failures_with_clean_tree_failures(self):
self.configureStep()
self.setProperty('first_run_failures', ['test1', 'test2'])
self.setProperty('second_run_failures', ['test1'])
self.setProperty('clean_tree_run_failures', ['test1', 'test2'])
self.expectOutcome(result=SUCCESS, state_string='Passed layout tests')
return self.runStep()
def test_mildly_flaky_patch_with_some_tree_redness_and_flakiness(self):
self.configureStep()
self.setProperty('first_run_failures', ['test1', 'test2', 'test3'])
self.setProperty('second_run_failures', ['test1', 'test2'])
self.setProperty('clean_tree_run_failures', ['test1', 'test2', 'test4'])
self.expectOutcome(result=SUCCESS, state_string='Passed layout tests')
rc = self.runStep()
self.assertEqual(self.getProperty('build_summary'), 'Found 3 pre-existing test failures: test1, test2, test4 Found flaky test: test3')
return rc
def test_first_run_exceed_failure_limit(self):
self.configureStep()
self.setProperty('first_results_exceed_failure_limit', True)
self.setProperty('first_run_failures', ['test{}'.format(i) for i in range(0, 30)])
self.setProperty('second_run_failures', [])
self.expectOutcome(result=RETRY, state_string='Unable to confirm if test failures are introduced by patch, retrying build (retry)')
return self.runStep()
def test_second_run_exceed_failure_limit(self):
self.configureStep()
self.setProperty('first_run_failures', [])
self.setProperty('second_results_exceed_failure_limit', True)
self.setProperty('second_run_failures', ['test{}'.format(i) for i in range(0, 30)])
self.expectOutcome(result=RETRY, state_string='Unable to confirm if test failures are introduced by patch, retrying build (retry)')
return self.runStep()
def test_clean_tree_exceed_failure_limit(self):
self.configureStep()
self.setProperty('first_run_failures', ['test1'])
self.setProperty('second_run_failures', ['test1'])
self.setProperty('clean_tree_results_exceed_failure_limit', True)
self.setProperty('clean_tree_run_failures', ['test{}'.format(i) for i in range(0, 30)])
self.expectOutcome(result=RETRY, state_string='Unable to confirm if test failures are introduced by patch, retrying build (retry)')
return self.runStep()
def test_clean_tree_has_lot_of_failures(self):
self.configureStep()
self.setProperty('first_results_exceed_failure_limit', True)
self.setProperty('first_run_failures', ['test{}'.format(i) for i in range(0, 30)])
self.setProperty('second_results_exceed_failure_limit', True)
self.setProperty('second_run_failures', ['test{}'.format(i) for i in range(0, 30)])
self.setProperty('clean_tree_run_failures', ['test{}'.format(i) for i in range(0, 27)])
self.expectOutcome(result=RETRY, state_string='Unable to confirm if test failures are introduced by patch, retrying build (retry)')
return self.runStep()
def test_clean_tree_has_some_failures(self):
self.configureStep()
self.setProperty('first_results_exceed_failure_limit', True)
self.setProperty('first_run_failures', ['test{}'.format(i) for i in range(0, 30)])
self.setProperty('second_results_exceed_failure_limit', True)
self.setProperty('second_run_failures', ['test{}'.format(i) for i in range(0, 30)])
self.setProperty('clean_tree_run_failures', ['test{}'.format(i) for i in range(0, 10)])
self.expectOutcome(result=FAILURE, state_string='Found 30 new test failures: test0, test1, test10, test11, test12, test13, test14, test15, test16, test17 ... (failure)')
return self.runStep()
def test_clean_tree_has_lot_of_failures_and_no_new_failure(self):
self.configureStep()
self.setProperty('first_run_failures', ['test1'])
self.setProperty('second_run_failures', ['test1'])
self.setProperty('clean_tree_run_failures', ['test{}'.format(i) for i in range(0, 20)])
self.expectOutcome(result=SUCCESS, state_string='Passed layout tests')
rc = self.runStep()
self.assertEqual(self.getProperty('build_summary'), 'Found 20 pre-existing test failures: test0, test1, test10, test11, test12, test13, test14, test15, test16, test17 ...')
return rc
def test_patch_introduces_lot_of_failures(self):
self.configureStep()
self.setProperty('buildername', 'Commit-Queue')
self.setProperty('first_results_exceed_failure_limit', True)
self.setProperty('first_run_failures', ['test{}'.format(i) for i in range(0, 300)])
self.setProperty('second_results_exceed_failure_limit', True)
self.setProperty('second_run_failures', ['test{}'.format(i) for i in range(0, 300)])
failure_message = 'Found 300 new test failures: test0, test1, test10, test100, test101, test102, test103, test104, test105, test106 ...'
self.expectOutcome(result=FAILURE, state_string=failure_message + ' (failure)')
rc = self.runStep()
self.assertEqual(self.getProperty('bugzilla_comment_text'), failure_message)
self.assertEqual(self.getProperty('build_finish_summary'), failure_message)
return rc
class TestCheckOutSpecificRevision(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(CheckOutSpecificRevision())
self.setProperty('ews_revision', '1a3425cb92dbcbca12a10aa9514f1b77c76dc26')
self.expectHidden(False)
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=1200,
logEnviron=False,
command=['git', 'checkout', '1a3425cb92dbcbca12a10aa9514f1b77c76dc26'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Checked out required revision')
return self.runStep()
def test_failure(self):
self.setupStep(CheckOutSpecificRevision())
self.setProperty('ews_revision', '1a3425cb92dbcbca12a10aa9514f1b77c76dc26')
self.expectHidden(False)
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=1200,
logEnviron=False,
command=['git', 'checkout', '1a3425cb92dbcbca12a10aa9514f1b77c76dc26'],
)
+ ExpectShell.log('stdio', stdout='Unexpected failure')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Checked out required revision (failure)')
return self.runStep()
def test_skip(self):
self.setupStep(CheckOutSpecificRevision())
self.expectHidden(True)
self.expectOutcome(result=SKIPPED, state_string='Checked out required revision (skipped)')
return self.runStep()
class TestCleanWorkingDirectory(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(CleanWorkingDirectory())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/Scripts/clean-webkit'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Cleaned working directory')
return self.runStep()
def test_failure(self):
self.setupStep(CleanWorkingDirectory())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/Scripts/clean-webkit'],
)
+ ExpectShell.log('stdio', stdout='Unexpected failure.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Cleaned working directory (failure)')
return self.runStep()
class TestUpdateWorkingDirectory(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(UpdateWorkingDirectory())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/update-webkit'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Updated working directory')
return self.runStep()
def test_failure(self):
self.setupStep(UpdateWorkingDirectory())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['perl', 'Tools/Scripts/update-webkit'],
)
+ ExpectShell.log('stdio', stdout='Unexpected failure.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Updated working directory (failure)')
return self.runStep()
class TestApplyPatch(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
def mock_start(cls, *args, **kwargs):
from buildbot.steps import shell
return shell.ShellCommand.start(cls)
ApplyPatch.start = mock_start
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(ApplyPatch())
self.assertEqual(ApplyPatch.flunkOnFailure, True)
self.assertEqual(ApplyPatch.haltOnFailure, False)
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=600,
logEnviron=False,
command=['perl', 'Tools/Scripts/svn-apply', '--force', '.buildbot-diff'],
) +
0,
)
self.expectOutcome(result=SUCCESS, state_string='Applied patch')
return self.runStep()
def test_failure(self):
self.setupStep(ApplyPatch())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=600,
logEnviron=False,
command=['perl', 'Tools/Scripts/svn-apply', '--force', '.buildbot-diff'],
) +
ExpectShell.log('stdio', stdout='Unexpected failure.') +
2,
)
self.expectOutcome(result=FAILURE, state_string='svn-apply failed to apply patch to trunk')
rc = self.runStep()
self.assertEqual(self.getProperty('bugzilla_comment_text'), None)
self.assertEqual(self.getProperty('build_finish_summary'), None)
return rc
def test_failure_on_commit_queue(self):
self.setupStep(ApplyPatch())
self.setProperty('buildername', 'Commit-Queue')
self.setProperty('patch_id', '1234')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=600,
logEnviron=False,
command=['perl', 'Tools/Scripts/svn-apply', '--force', '.buildbot-diff'],
) +
ExpectShell.log('stdio', stdout='Unexpected failure.') +
2,
)
self.expectOutcome(result=FAILURE, state_string='svn-apply failed to apply patch to trunk')
rc = self.runStep()
self.assertEqual(self.getProperty('bugzilla_comment_text'), 'Tools/Scripts/svn-apply failed to apply attachment 1234 to trunk.\nPlease resolve the conflicts and upload a new patch.')
self.assertEqual(self.getProperty('build_finish_summary'), 'Tools/Scripts/svn-apply failed to apply patch 1234 to trunk')
return rc
class TestUnApplyPatchIfRequired(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(UnApplyPatchIfRequired())
self.setProperty('patchFailedToBuild', True)
self.expectHidden(False)
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/Scripts/clean-webkit'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Unapplied patch')
return self.runStep()
def test_failure(self):
self.setupStep(UnApplyPatchIfRequired())
self.setProperty('patchFailedTests', True)
self.expectHidden(False)
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/Scripts/clean-webkit'],
)
+ ExpectShell.log('stdio', stdout='Unexpected failure.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Unapplied patch (failure)')
return self.runStep()
def test_skip(self):
self.setupStep(UnApplyPatchIfRequired())
self.expectHidden(True)
self.expectOutcome(result=SKIPPED, state_string='Unapplied patch (skipped)')
return self.runStep()
class TestCheckPatchRelevance(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_relevant_jsc_patch(self):
CheckPatchRelevance._get_patch = lambda x: 'Sample patch; file: JSTests/'
self.setupStep(CheckPatchRelevance())
self.setProperty('buildername', 'JSC-Tests-EWS')
self.assertEqual(CheckPatchRelevance.haltOnFailure, True)
self.assertEqual(CheckPatchRelevance.flunkOnFailure, True)
self.expectOutcome(result=SUCCESS, state_string='Patch contains relevant changes')
return self.runStep()
def test_relevant_wk1_patch(self):
CheckPatchRelevance._get_patch = lambda x: 'Sample patch; file: Source/WebKitLegacy'
self.setupStep(CheckPatchRelevance())
self.setProperty('buildername', 'macOS-Mojave-Release-WK1-Tests-EWS')
self.expectOutcome(result=SUCCESS, state_string='Patch contains relevant changes')
return self.runStep()
def test_relevant_bigsur_builder_patch(self):
CheckPatchRelevance._get_patch = lambda x: 'Sample patch; file: Source/xyz'
self.setupStep(CheckPatchRelevance())
self.setProperty('buildername', 'macOS-BigSur-Release-Build-EWS')
self.expectOutcome(result=SUCCESS, state_string='Patch contains relevant changes')
return self.runStep()
def test_relevant_windows_wk1_patch(self):
CheckPatchRelevance._get_patch = lambda x: 'Sample patch; file: Source/WebKitLegacy'
self.setupStep(CheckPatchRelevance())
self.setProperty('buildername', 'Windows-EWS')
self.expectOutcome(result=SUCCESS, state_string='Patch contains relevant changes')
return self.runStep()
def test_relevant_webkitpy_patch(self):
CheckPatchRelevance._get_patch = lambda x: 'Sample patch; file: Tools/Scripts/webkitpy'
self.setupStep(CheckPatchRelevance())
self.setProperty('buildername', 'WebKitPy-Tests-EWS')
self.expectOutcome(result=SUCCESS, state_string='Patch contains relevant changes')
return self.runStep()
def test_relevant_libraries_patch(self):
CheckPatchRelevance._get_patch = lambda x: 'Sample patch; file: Tools/Scripts/libraries'
self.setupStep(CheckPatchRelevance())
self.setProperty('buildername', 'WebKitPy-Tests-EWS')
self.expectOutcome(result=SUCCESS, state_string='Patch contains relevant changes')
return self.runStep()
def test_queues_without_relevance_info(self):
CheckPatchRelevance._get_patch = lambda x: 'Sample patch'
queues = ['Commit-Queue', 'Style-EWS', 'Apply-WatchList-EWS', 'GTK-Build-EWS', 'GTK-WK2-Tests-EWS',
'iOS-13-Build-EWS', 'iOS-13-Simulator-Build-EWS', 'iOS-13-Simulator-WK2-Tests-EWS',
'macOS-Mojave-Release-Build-EWS', 'macOS-Mojave-Release-WK2-Tests-EWS', 'macOS-Mojave-Debug-Build-EWS',
'WinCairo-EWS', 'WPE-EWS', 'WebKitPerl-Tests-EWS']
for queue in queues:
self.setupStep(CheckPatchRelevance())
self.setProperty('buildername', queue)
self.expectOutcome(result=SUCCESS, state_string='Patch contains relevant changes')
rc = self.runStep()
return rc
def test_non_relevant_patch_on_various_queues(self):
CheckPatchRelevance._get_patch = lambda x: 'Sample patch'
queues = ['Bindings-Tests-EWS', 'JSC-Tests-EWS', 'macOS-BigSur-Release-Build-EWS',
'macOS-Mojave-Debug-WK1-Tests-EWS', 'Services-EWS', 'WebKitPy-Tests-EWS']
for queue in queues:
self.setupStep(CheckPatchRelevance())
self.setProperty('buildername', queue)
self.expectOutcome(result=FAILURE, state_string='Patch doesn\'t have relevant changes')
rc = self.runStep()
return rc
class TestArchiveBuiltProduct(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(ArchiveBuiltProduct())
self.setProperty('fullPlatform', 'ios-simulator')
self.setProperty('configuration', 'release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/BuildSlaveSupport/built-product-archive', '--platform=ios-simulator', '--release', 'archive'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Archived built product')
return self.runStep()
def test_failure(self):
self.setupStep(ArchiveBuiltProduct())
self.setProperty('fullPlatform', 'mac-sierra')
self.setProperty('configuration', 'debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/BuildSlaveSupport/built-product-archive', '--platform=mac-sierra', '--debug', 'archive'],
)
+ ExpectShell.log('stdio', stdout='Unexpected failure.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Archived built product (failure)')
return self.runStep()
class TestUploadBuiltProduct(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(UploadBuiltProduct())
self.setProperty('fullPlatform', 'mac-sierra')
self.setProperty('configuration', 'release')
self.setProperty('architecture', 'x86_64')
self.setProperty('patch_id', '1234')
self.expectHidden(False)
self.expectRemoteCommands(
Expect('uploadFile', dict(
workersrc='WebKitBuild/release.zip', workdir='wkdir',
blocksize=1024 * 256, maxsize=None, keepstamp=False,
writer=ExpectRemoteRef(remotetransfer.FileWriter),
))
+ Expect.behavior(uploadFileWithContentsOfString('Dummy zip file content.'))
+ 0,
)
self.expectUploadedFile('public_html/archives/mac-sierra-x86_64-release/1234.zip')
self.expectOutcome(result=SUCCESS, state_string='Uploaded built product')
return self.runStep()
def test_failure(self):
self.setupStep(UploadBuiltProduct())
self.setProperty('fullPlatform', 'mac-sierra')
self.setProperty('configuration', 'release')
self.setProperty('architecture', 'x86_64')
self.setProperty('patch_id', '1234')
self.expectHidden(False)
self.expectRemoteCommands(
Expect('uploadFile', dict(
workersrc='WebKitBuild/release.zip', workdir='wkdir',
blocksize=1024 * 256, maxsize=None, keepstamp=False,
writer=ExpectRemoteRef(remotetransfer.FileWriter),
))
+ Expect.behavior(uploadFileWithContentsOfString('Dummy zip file content.'))
+ 1,
)
self.expectUploadedFile('public_html/archives/mac-sierra-x86_64-release/1234.zip')
self.expectOutcome(result=FAILURE, state_string='Failed to upload built product')
return self.runStep()
class TestDownloadBuiltProduct(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(DownloadBuiltProduct())
self.setProperty('fullPlatform', 'ios-simulator-12')
self.setProperty('configuration', 'release')
self.setProperty('architecture', 'x86_64')
self.setProperty('patch_id', '1234')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/BuildSlaveSupport/download-built-product', '--release', 'https://s3-us-west-2.amazonaws.com/ews-archives.webkit.org/ios-simulator-12-x86_64-release/1234.zip'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Downloaded built product')
return self.runStep()
def test_failure(self):
self.setupStep(DownloadBuiltProduct())
self.setProperty('fullPlatform', 'mac-sierra')
self.setProperty('configuration', 'debug')
self.setProperty('architecture', 'x86_64')
self.setProperty('patch_id', '123456')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/BuildSlaveSupport/download-built-product', '--debug', 'https://s3-us-west-2.amazonaws.com/ews-archives.webkit.org/mac-sierra-x86_64-debug/123456.zip'],
)
+ ExpectShell.log('stdio', stdout='Unexpected failure.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Failed to download built product from S3')
return self.runStep()
class TestDownloadBuiltProductFromMaster(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(DownloadBuiltProductFromMaster())
self.setProperty('fullPlatform', 'ios-simulator-12')
self.setProperty('configuration', 'release')
self.setProperty('architecture', 'x86_64')
self.setProperty('patch_id', '1234')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/BuildSlaveSupport/download-built-product', '--release', 'https://ews-build.webkit.org/archives/ios-simulator-12-x86_64-release/1234.zip'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Downloaded built product')
return self.runStep()
def test_failure(self):
self.setupStep(DownloadBuiltProductFromMaster())
self.setProperty('fullPlatform', 'mac-sierra')
self.setProperty('configuration', 'debug')
self.setProperty('architecture', 'x86_64')
self.setProperty('patch_id', '123456')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/BuildSlaveSupport/download-built-product', '--debug', 'https://ews-build.webkit.org/archives/mac-sierra-x86_64-debug/123456.zip'],
)
+ ExpectShell.log('stdio', stdout='Unexpected failure.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Failed to download built product from build master')
return self.runStep()
class TestExtractBuiltProduct(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(ExtractBuiltProduct())
self.setProperty('fullPlatform', 'ios-simulator')
self.setProperty('configuration', 'release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/BuildSlaveSupport/built-product-archive', '--platform=ios-simulator', '--release', 'extract'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Extracted built product')
return self.runStep()
def test_failure(self):
self.setupStep(ExtractBuiltProduct())
self.setProperty('fullPlatform', 'mac-sierra')
self.setProperty('configuration', 'debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/BuildSlaveSupport/built-product-archive', '--platform=mac-sierra', '--debug', 'extract'],
)
+ ExpectShell.log('stdio', stdout='Unexpected failure.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Extracted built product (failure)')
return self.runStep()
class TestTransferToS3(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(TransferToS3())
self.setProperty('fullPlatform', 'mac-highsierra')
self.setProperty('configuration', 'release')
self.setProperty('architecture', 'x86_64')
self.setProperty('patch_id', '1234')
self.expectLocalCommands(
ExpectMasterShellCommand(command=['python',
'../Shared/transfer-archive-to-s3',
'--patch_id', '1234',
'--identifier', 'mac-highsierra-x86_64-release',
'--archive', 'public_html/archives/mac-highsierra-x86_64-release/1234.zip',
])
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Transferred archive to S3')
return self.runStep()
def test_failure(self):
self.setupStep(TransferToS3())
self.setProperty('fullPlatform', 'ios-simulator-12')
self.setProperty('configuration', 'debug')
self.setProperty('architecture', 'x86_64')
self.setProperty('patch_id', '1234')
self.expectLocalCommands(
ExpectMasterShellCommand(command=['python',
'../Shared/transfer-archive-to-s3',
'--patch_id', '1234',
'--identifier', 'ios-simulator-12-x86_64-debug',
'--archive', 'public_html/archives/ios-simulator-12-x86_64-debug/1234.zip',
])
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Failed to transfer archive to S3')
return self.runStep()
class TestRunAPITests(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
self.jsonFileName = 'api_test_results.json'
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success_mac(self):
self.setupStep(RunAPITests())
self.setProperty('fullPlatform', 'mac-mojave')
self.setProperty('platform', 'mac')
self.setProperty('configuration', 'release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/Scripts/run-api-tests', '--no-build', '--release', '--verbose', '--json-output={0}'.format(self.jsonFileName)],
logfiles={'json': self.jsonFileName},
)
+ ExpectShell.log('stdio', stdout='''...
worker/0 TestWTF.WTF_Variant.OperatorAmpersand Passed
worker/0 TestWTF.WTF_Variant.Ref Passed
worker/0 TestWTF.WTF_Variant.RefPtr Passed
worker/0 TestWTF.WTF_Variant.RetainPtr Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingMakeVisitor Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingSwitchOn Passed
Ran 1888 tests of 1888 with 1888 successful
------------------------------
All tests successfully passed!
''')
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='run-api-tests')
return self.runStep()
def test_success_ios_simulator(self):
self.setupStep(RunAPITests())
self.setProperty('fullPlatform', 'ios-simulator-11')
self.setProperty('platform', 'ios')
self.setProperty('configuration', 'debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/Scripts/run-api-tests', '--no-build', '--debug', '--verbose', '--json-output={0}'.format(self.jsonFileName), '--ios-simulator'],
logfiles={'json': self.jsonFileName},
)
+ ExpectShell.log('stdio', stdout='''...
worker/0 TestWTF.WTF_Variant.OperatorAmpersand Passed
worker/0 TestWTF.WTF_Variant.Ref Passed
worker/0 TestWTF.WTF_Variant.RefPtr Passed
worker/0 TestWTF.WTF_Variant.RetainPtr Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingMakeVisitor Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingSwitchOn Passed
Ran 1888 tests of 1888 with 1888 successful
------------------------------
All tests successfully passed!
''')
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='run-api-tests')
return self.runStep()
def test_success_gtk(self):
self.setupStep(RunAPITests())
self.setProperty('fullPlatform', 'gtk')
self.setProperty('platform', 'gtk')
self.setProperty('configuration', 'release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/Scripts/run-gtk-tests', '--release', '--json-output={0}'.format(self.jsonFileName)],
logfiles={'json': self.jsonFileName},
)
+ ExpectShell.log('stdio', stdout='''...
**PASS** TransformationMatrix.Blend
**PASS** TransformationMatrix.Blend2
**PASS** TransformationMatrix.Blend4
**PASS** TransformationMatrix.Equality
**PASS** TransformationMatrix.Casting
**PASS** TransformationMatrix.MakeMapBetweenRects
**PASS** URLParserTextEncodingTest.QueryEncoding
**PASS** GStreamerTest.mappedBufferBasics
**PASS** GStreamerTest.mappedBufferReadSanity
**PASS** GStreamerTest.mappedBufferWriteSanity
**PASS** GStreamerTest.mappedBufferCachesSharedBuffers
**PASS** GStreamerTest.mappedBufferDoesNotAddExtraRefs
Ran 1316 tests of 1318 with 1316 successful
''')
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='run-api-tests')
return self.runStep()
def test_one_failure(self):
self.setupStep(RunAPITests())
self.setProperty('fullPlatform', 'mac-mojave')
self.setProperty('platform', 'mac')
self.setProperty('configuration', 'debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/Scripts/run-api-tests', '--no-build', '--debug', '--verbose', '--json-output={0}'.format(self.jsonFileName)],
logfiles={'json': self.jsonFileName},
)
+ ExpectShell.log('stdio', stdout='''
worker/0 TestWTF.WTF_Variant.OperatorAmpersand Passed
worker/0 TestWTF.WTF_Variant.Ref Passed
worker/0 TestWTF.WTF_Variant.RefPtr Passed
worker/0 TestWTF.WTF_Variant.RetainPtr Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingMakeVisitor Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingSwitchOn Passed
worker/0 exiting
Ran 1888 tests of 1888 with 1887 successful
------------------------------
Test suite failed
Crashed
TestWTF.WTF.StringConcatenate_Unsigned
**FAIL** WTF.StringConcatenate_Unsigned
Tools\\TestWebKitAPI\\Tests\\WTF\\StringConcatenate.cpp:84
Value of: makeString('hello ', static_cast<unsigned short>(42) , ' world')
Actual: hello 42 world
Expected: 'hello * world'
Which is: 74B00C9C
Testing completed, Exit status: 3
''')
+ 1,
)
self.expectOutcome(result=FAILURE, state_string='1 api test failed or timed out')
return self.runStep()
def test_multiple_failures_and_timeouts(self):
self.setupStep(RunAPITests())
self.setProperty('fullPlatform', 'mac-mojave')
self.setProperty('platform', 'mac')
self.setProperty('configuration', 'debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/Scripts/run-api-tests', '--no-build', '--debug', '--verbose', '--json-output={0}'.format(self.jsonFileName)],
logfiles={'json': self.jsonFileName},
)
+ ExpectShell.log('stdio', stdout='''...
worker/0 TestWTF.WTF_Variant.OperatorAmpersand Passed
worker/0 TestWTF.WTF_Variant.Ref Passed
worker/0 TestWTF.WTF_Variant.RefPtr Passed
worker/0 TestWTF.WTF_Variant.RetainPtr Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingMakeVisitor Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingSwitchOn Passed
worker/0 exiting
Ran 1888 tests of 1888 with 1884 successful
------------------------------
Test suite failed
Failed
TestWTF.WTF.StringConcatenate_Unsigned
**FAIL** WTF.StringConcatenate_Unsigned
Tools\\TestWebKitAPI\\Tests\\WTF\\StringConcatenate.cpp:84
Value of: makeString('hello ', static_cast<unsigned short>(42) , ' world')
Actual: hello 42 world
Expected: 'hello * world'
Which is: 74B00C9C
TestWTF.WTF_Expected.Unexpected
**FAIL** WTF_Expected.Unexpected
Tools\TestWebKitAPI\Tests\WTF\Expected.cpp:96
Value of: s1
Actual: oops
Expected: s0
Which is: oops
Timeout
TestWTF.WTF_PoisonedUniquePtrForTriviallyDestructibleArrays.Assignment
TestWTF.WTF_Lock.ContendedShortSection
Testing completed, Exit status: 3
''')
+ 4,
)
self.expectOutcome(result=FAILURE, state_string='4 api tests failed or timed out')
return self.runStep()
def test_unexpected_failure(self):
self.setupStep(RunAPITests())
self.setProperty('fullPlatform', 'mac-mojave')
self.setProperty('platform', 'mac')
self.setProperty('configuration', 'debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/Scripts/run-api-tests', '--no-build', '--debug', '--verbose', '--json-output={0}'.format(self.jsonFileName)],
logfiles={'json': self.jsonFileName},
)
+ ExpectShell.log('stdio', stdout='Unexpected failure. Failed to run api tests.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='run-api-tests (failure)')
return self.runStep()
def test_no_failures_or_timeouts_with_disabled(self):
self.setupStep(RunAPITests())
self.setProperty('fullPlatform', 'mac-mojave')
self.setProperty('platform', 'mac')
self.setProperty('configuration', 'debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/Scripts/run-api-tests', '--no-build', '--debug', '--verbose', '--json-output={0}'.format(self.jsonFileName)],
logfiles={'json': self.jsonFileName},
)
+ ExpectShell.log('stdio', stdout='''...
worker/0 TestWTF.WTF_Variant.OperatorAmpersand Passed
worker/0 TestWTF.WTF_Variant.Ref Passed
worker/0 TestWTF.WTF_Variant.RefPtr Passed
worker/0 TestWTF.WTF_Variant.RetainPtr Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingMakeVisitor Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingSwitchOn Passed
worker/0 exiting
Ran 1881 tests of 1888 with 1881 successful
------------------------------
All tests successfully passed!
''')
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='run-api-tests')
return self.runStep()
class TestRunAPITestsWithoutPatch(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
self.jsonFileName = 'api_test_results.json'
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success_mac(self):
self.setupStep(RunAPITestsWithoutPatch())
self.setProperty('fullPlatform', 'mac-mojave')
self.setProperty('platform', 'mac')
self.setProperty('configuration', 'release')
self.setProperty('buildername', 'API-Tests-macOS-EWS')
self.setProperty('buildnumber', '11525')
self.setProperty('workername', 'ews155')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python',
'Tools/Scripts/run-api-tests',
'--no-build',
'--release',
'--verbose',
'--json-output={0}'.format(self.jsonFileName)],
logfiles={'json': self.jsonFileName},
)
+ ExpectShell.log('stdio', stdout='''...
worker/0 TestWTF.WTF_Variant.OperatorAmpersand Passed
worker/0 TestWTF.WTF_Variant.Ref Passed
worker/0 TestWTF.WTF_Variant.RefPtr Passed
worker/0 TestWTF.WTF_Variant.RetainPtr Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingMakeVisitor Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingSwitchOn Passed
Ran 1888 tests of 1888 with 1888 successful
------------------------------
All tests successfully passed!
''')
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='run-api-tests-without-patch')
return self.runStep()
def test_one_failure(self):
self.setupStep(RunAPITestsWithoutPatch())
self.setProperty('fullPlatform', 'mac-mojave')
self.setProperty('platform', 'ios-simulator')
self.setProperty('configuration', 'debug')
self.setProperty('buildername', 'API-Tests-iOS-EWS')
self.setProperty('buildnumber', '123')
self.setProperty('workername', 'ews156')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python',
'Tools/Scripts/run-api-tests',
'--no-build',
'--debug',
'--verbose',
'--json-output={0}'.format(self.jsonFileName)],
logfiles={'json': self.jsonFileName},
)
+ ExpectShell.log('stdio', stdout='''
worker/0 TestWTF.WTF_Variant.OperatorAmpersand Passed
worker/0 TestWTF.WTF_Variant.Ref Passed
worker/0 TestWTF.WTF_Variant.RefPtr Passed
worker/0 TestWTF.WTF_Variant.RetainPtr Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingMakeVisitor Passed
worker/0 TestWTF.WTF_Variant.VisitorUsingSwitchOn Passed
worker/0 exiting
Ran 1888 tests of 1888 with 1887 successful
------------------------------
Test suite failed
Crashed
TestWTF.WTF.StringConcatenate_Unsigned
**FAIL** WTF.StringConcatenate_Unsigned
Tools\\TestWebKitAPI\\Tests\\WTF\\StringConcatenate.cpp:84
Value of: makeString('hello ', static_cast<unsigned short>(42) , ' world')
Actual: hello 42 world
Expected: 'hello * world'
Which is: 74B00C9C
Testing completed, Exit status: 3
''')
+ 1,
)
self.expectOutcome(result=FAILURE, state_string='1 api test failed or timed out')
return self.runStep()
def test_multiple_failures_gtk(self):
self.setupStep(RunAPITestsWithoutPatch())
self.setProperty('fullPlatform', 'gtk')
self.setProperty('platform', 'gtk')
self.setProperty('configuration', 'debug')
self.setProperty('buildername', 'API-Tests-GTK-EWS')
self.setProperty('buildnumber', '13529')
self.setProperty('workername', 'igalia4-gtk-wk2-ews')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python',
'Tools/Scripts/run-gtk-tests',
'--debug',
'--json-output={0}'.format(self.jsonFileName)],
logfiles={'json': self.jsonFileName},
)
+ ExpectShell.log('stdio', stdout='''
**PASS** GStreamerTest.mappedBufferBasics
**PASS** GStreamerTest.mappedBufferReadSanity
**PASS** GStreamerTest.mappedBufferWriteSanity
**PASS** GStreamerTest.mappedBufferCachesSharedBuffers
**PASS** GStreamerTest.mappedBufferDoesNotAddExtraRefs
Unexpected failures (3)
/TestWTF
WTF_DateMath.calculateLocalTimeOffset
/WebKit2Gtk/TestPrinting
/webkit/WebKitPrintOperation/close-after-print
/WebKit2Gtk/TestWebsiteData
/webkit/WebKitWebsiteData/databases
Unexpected passes (1)
/WebKit2Gtk/TestUIClient
/webkit/WebKitWebView/usermedia-enumeratedevices-permission-check
Ran 1296 tests of 1298 with 1293 successful
''')
+ 3,
)
self.expectOutcome(result=FAILURE, state_string='3 api tests failed or timed out')
return self.runStep()
class TestArchiveTestResults(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(ArchiveTestResults())
self.setProperty('fullPlatform', 'ios-simulator')
self.setProperty('platform', 'ios-simulator')
self.setProperty('configuration', 'release')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/BuildSlaveSupport/test-result-archive', '--platform=ios-simulator', '--release', 'archive'],
)
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Archived test results')
return self.runStep()
def test_failure(self):
self.setupStep(ArchiveTestResults())
self.setProperty('fullPlatform', 'mac-mojave')
self.setProperty('platform', 'mac')
self.setProperty('configuration', 'debug')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
logEnviron=False,
command=['python', 'Tools/BuildSlaveSupport/test-result-archive', '--platform=mac', '--debug', 'archive'],
)
+ ExpectShell.log('stdio', stdout='Unexpected failure.')
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='Archived test results (failure)')
return self.runStep()
class TestUploadTestResults(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(UploadTestResults())
self.setProperty('configuration', 'release')
self.setProperty('architecture', 'x86_64')
self.setProperty('patch_id', '1234')
self.setProperty('buildername', 'macOS-Sierra-Release-WK2-Tests-EWS')
self.setProperty('buildnumber', '12')
self.expectHidden(False)
self.expectRemoteCommands(
Expect('uploadFile', dict(
workersrc='layout-test-results.zip', workdir='wkdir',
blocksize=1024 * 256, maxsize=None, keepstamp=False,
writer=ExpectRemoteRef(remotetransfer.FileWriter),
))
+ Expect.behavior(uploadFileWithContentsOfString('Dummy zip file content.'))
+ 0,
)
self.expectUploadedFile('public_html/results/macOS-Sierra-Release-WK2-Tests-EWS/r1234-12.zip')
self.expectOutcome(result=SUCCESS, state_string='Uploaded test results')
return self.runStep()
def test_success_with_identifier(self):
self.setupStep(UploadTestResults(identifier='clean-tree'))
self.setProperty('configuration', 'release')
self.setProperty('architecture', 'x86_64')
self.setProperty('patch_id', '271211')
self.setProperty('buildername', 'iOS-12-Simulator-WK2-Tests-EWS')
self.setProperty('buildnumber', '120')
self.expectHidden(False)
self.expectRemoteCommands(
Expect('uploadFile', dict(
workersrc='layout-test-results.zip', workdir='wkdir',
blocksize=1024 * 256, maxsize=None, keepstamp=False,
writer=ExpectRemoteRef(remotetransfer.FileWriter),
))
+ Expect.behavior(uploadFileWithContentsOfString('Dummy zip file content.'))
+ 0,
)
self.expectUploadedFile('public_html/results/iOS-12-Simulator-WK2-Tests-EWS/r271211-120-clean-tree.zip')
self.expectOutcome(result=SUCCESS, state_string='Uploaded test results')
return self.runStep()
class TestExtractTestResults(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(ExtractTestResults())
self.setProperty('configuration', 'release')
self.setProperty('patch_id', '1234')
self.setProperty('buildername', 'macOS-Sierra-Release-WK2-Tests-EWS')
self.setProperty('buildnumber', '12')
self.expectLocalCommands(
ExpectMasterShellCommand(command=['unzip',
'-q',
'public_html/results/macOS-Sierra-Release-WK2-Tests-EWS/r1234-12.zip',
'-d',
'public_html/results/macOS-Sierra-Release-WK2-Tests-EWS/r1234-12',
])
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Extracted test results')
self.expectAddedURLs([call('view layout test results', 'https://ews-build.s3-us-west-2.amazonaws.com/macOS-Sierra-Release-WK2-Tests-EWS/r2468-12/results.html')])
return self.runStep()
def test_success_with_identifier(self):
self.setupStep(ExtractTestResults(identifier='rerun'))
self.setProperty('configuration', 'release')
self.setProperty('patch_id', '1234')
self.setProperty('buildername', 'iOS-12-Simulator-WK2-Tests-EWS')
self.setProperty('buildnumber', '12')
self.expectLocalCommands(
ExpectMasterShellCommand(command=['unzip',
'-q',
'public_html/results/iOS-12-Simulator-WK2-Tests-EWS/r1234-12-rerun.zip',
'-d',
'public_html/results/iOS-12-Simulator-WK2-Tests-EWS/r1234-12-rerun',
])
+ 0,
)
self.expectOutcome(result=SUCCESS, state_string='Extracted test results')
self.expectAddedURLs([call('view layout test results', 'https://ews-build.s3-us-west-2.amazonaws.com/iOS-12-Simulator-WK2-Tests-EWS/r1234-12/results.html')])
return self.runStep()
def test_failure(self):
self.setupStep(ExtractTestResults())
self.setProperty('configuration', 'debug')
self.setProperty('patch_id', '1234')
self.setProperty('buildername', 'macOS-Sierra-Release-WK2-Tests-EWS')
self.setProperty('buildnumber', '12')
self.expectLocalCommands(
ExpectMasterShellCommand(command=['unzip',
'-q',
'public_html/results/macOS-Sierra-Release-WK2-Tests-EWS/r1234-12.zip',
'-d',
'public_html/results/macOS-Sierra-Release-WK2-Tests-EWS/r1234-12',
])
+ 2,
)
self.expectOutcome(result=FAILURE, state_string='failed (2) (failure)')
self.expectAddedURLs([call('view layout test results', 'https://ews-build.s3-us-west-2.amazonaws.com/macOS-Sierra-Release-WK2-Tests-EWS/r1234-12/results.html')])
return self.runStep()
class TestPrintConfiguration(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success_mac(self):
self.setupStep(PrintConfiguration())
self.setProperty('buildername', 'macOS-High-Sierra-Release-WK2-Tests-EWS')
self.setProperty('platform', 'mac-highsierra')
self.expectRemoteCommands(
ExpectShell(command=['hostname'], workdir='wkdir', timeout=60, logEnviron=False) + 0
+ ExpectShell.log('stdio', stdout='ews150.apple.com'),
ExpectShell(command=['df', '-hl'], workdir='wkdir', timeout=60, logEnviron=False) + 0
+ ExpectShell.log('stdio', stdout='''Filesystem Size Used Avail Capacity iused ifree %iused Mounted on
/dev/disk1s1 119Gi 95Gi 23Gi 81% 937959 9223372036853837848 0% /
/dev/disk1s4 119Gi 20Ki 23Gi 1% 0 9223372036854775807 0% /private/var/vm
/dev/disk0s3 119Gi 22Gi 97Gi 19% 337595 4294629684 0% /Volumes/Data'''),
ExpectShell(command=['date'], workdir='wkdir', timeout=60, logEnviron=False) + 0
+ ExpectShell.log('stdio', stdout='Tue Apr 9 15:30:52 PDT 2019'),
ExpectShell(command=['sw_vers'], workdir='wkdir', timeout=60, logEnviron=False) + 0
+ ExpectShell.log('stdio', stdout='''ProductName: Mac OS X
ProductVersion: 10.13.4
BuildVersion: 17E199'''),
ExpectShell(command=['xcodebuild', '-sdk', '-version'], workdir='wkdir', timeout=60, logEnviron=False)
+ ExpectShell.log('stdio', stdout='''MacOSX10.13.sdk - macOS 10.13 (macosx10.13)
SDKVersion: 10.13
Path: /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.13.sdk
PlatformVersion: 1.1
PlatformPath: /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform
ProductBuildVersion: 17E189
ProductCopyright: 1983-2018 Apple Inc.
ProductName: Mac OS X
ProductUserVisibleVersion: 10.13.4
ProductVersion: 10.13.4
Xcode 9.4.1
Build version 9F2000''')
+ 0,
ExpectShell(command=['uptime'], workdir='wkdir', timeout=60, logEnviron=False) + 0
+ ExpectShell.log('stdio', stdout=' 6:31 up 1 day, 19:05, 24 users, load averages: 4.17 7.23 5.45'),
)
self.expectOutcome(result=SUCCESS, state_string='OS: High Sierra (10.13.4), Xcode: 9.4.1')
return self.runStep()
def test_success_ios_simulator(self):
self.setupStep(PrintConfiguration())
self.setProperty('buildername', 'macOS-Sierra-Release-WK2-Tests-EWS')
self.setProperty('platform', 'ios-simulator-12')
self.expectRemoteCommands(
ExpectShell(command=['hostname'], workdir='wkdir', timeout=60, logEnviron=False) + 0
+ ExpectShell.log('stdio', stdout='ews152.apple.com'),
ExpectShell(command=['df', '-hl'], workdir='wkdir', timeout=60, logEnviron=False) + 0
+ ExpectShell.log('stdio', stdout='''Filesystem Size Used Avail Capacity iused ifree %iused Mounted on
/dev/disk1s1 119Gi 95Gi 23Gi 81% 937959 9223372036853837848 0% /
/dev/disk1s4 119Gi 20Ki 23Gi 1% 0 9223372036854775807 0% /private/var/vm
/dev/disk0s3 119Gi 22Gi 97Gi 19% 337595 4294629684 0% /Volumes/Data'''),
ExpectShell(command=['date'], workdir='wkdir', timeout=60, logEnviron=False) + 0
+ ExpectShell.log('stdio', stdout='Tue Apr 9 15:30:52 PDT 2019'),
ExpectShell(command=['sw_vers'], workdir='wkdir', timeout=60, logEnviron=False) + 0
+ ExpectShell.log('stdio', stdout='''ProductName: Mac OS X
ProductVersion: 10.14.5
BuildVersion: 18F132'''),
ExpectShell(command=['xcodebuild', '-sdk', '-version'], workdir='wkdir', timeout=60, logEnviron=False)
+ ExpectShell.log('stdio', stdout='''iPhoneSimulator12.2.sdk - Simulator - iOS 12.2 (iphonesimulator12.2)
SDKVersion: 12.2
Path: /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator12.2.sdk
PlatformVersion: 12.2
PlatformPath: /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform
BuildID: 15C4BAF8-4632-11E9-86EB-BA47F1FFAC3C
ProductBuildVersion: 16E226
ProductCopyright: 1983-2019 Apple Inc.
ProductName: iPhone OS
ProductVersion: 12.2
Xcode 10.2
Build version 10E125''')
+ 0,
ExpectShell(command=['uptime'], workdir='wkdir', timeout=60, logEnviron=False) + 0
+ ExpectShell.log('stdio', stdout=' 6:31 up 1 day, 19:05, 24 users, load averages: 4.17 7.23 5.45'),
)
self.expectOutcome(result=SUCCESS, state_string='OS: Mojave (10.14.5), Xcode: 10.2')
return self.runStep()
def test_success_webkitpy(self):
self.setupStep(PrintConfiguration())
self.setProperty('platform', '*')
self.expectRemoteCommands(
ExpectShell(command=['hostname'], workdir='wkdir', timeout=60, logEnviron=False) + 0,
ExpectShell(command=['df', '-hl'], workdir='wkdir', timeout=60, logEnviron=False) + 0,
ExpectShell(command=['date'], workdir='wkdir', timeout=60, logEnviron=False) + 0,
ExpectShell(command=['sw_vers'], workdir='wkdir', timeout=60, logEnviron=False) + 0
+ ExpectShell.log('stdio', stdout='''ProductName: Mac OS X
ProductVersion: 10.13.6
BuildVersion: 17G7024'''),
ExpectShell(command=['xcodebuild', '-sdk', '-version'], workdir='wkdir', timeout=60, logEnviron=False) + 0
+ ExpectShell.log('stdio', stdout='''Xcode 10.2\nBuild version 10E125'''),
ExpectShell(command=['uptime'], workdir='wkdir', timeout=60, logEnviron=False) + 0
+ ExpectShell.log('stdio', stdout=' 6:31 up 22 seconds, 12:05, 2 users, load averages: 3.17 7.23 5.45'),
)
self.expectOutcome(result=SUCCESS, state_string='OS: High Sierra (10.13.6), Xcode: 10.2')
return self.runStep()
def test_success_linux_wpe(self):
self.setupStep(PrintConfiguration())
self.setProperty('platform', 'wpe')
self.expectRemoteCommands(
ExpectShell(command=['hostname'], workdir='wkdir', timeout=60, logEnviron=False) + 0
+ ExpectShell.log('stdio', stdout='ews190'),
ExpectShell(command=['df', '-hl'], workdir='wkdir', timeout=60, logEnviron=False) + 0
+ ExpectShell.log('stdio', stdout='''Filesystem Size Used Avail Capacity iused ifree %iused Mounted on
/dev/disk0s3 119Gi 22Gi 97Gi 19% 337595 4294629684 0% /'''),
ExpectShell(command=['date'], workdir='wkdir', timeout=60, logEnviron=False) + 0
+ ExpectShell.log('stdio', stdout='Tue Apr 9 15:30:52 PDT 2019'),
ExpectShell(command=['uname', '-a'], workdir='wkdir', timeout=60, logEnviron=False) + 0
+ ExpectShell.log('stdio', stdout='''Linux kodama-ews 5.0.4-arch1-1-ARCH #1 SMP PREEMPT Sat Mar 23 21:00:33 UTC 2019 x86_64 GNU/Linux'''),
ExpectShell(command=['uptime'], workdir='wkdir', timeout=60, logEnviron=False) + 0
+ ExpectShell.log('stdio', stdout=' 6:31 up 22 seconds, 12:05, 2 users, load averages: 3.17 7.23 5.45'),
)
self.expectOutcome(result=SUCCESS, state_string='Printed configuration')
return self.runStep()
def test_success_linux_gtk(self):
self.setupStep(PrintConfiguration())
self.setProperty('platform', 'gtk')
self.expectRemoteCommands(
ExpectShell(command=['hostname'], workdir='wkdir', timeout=60, logEnviron=False) + 0,
ExpectShell(command=['df', '-hl'], workdir='wkdir', timeout=60, logEnviron=False) + 0,
ExpectShell(command=['date'], workdir='wkdir', timeout=60, logEnviron=False) + 0,
ExpectShell(command=['uname', '-a'], workdir='wkdir', timeout=60, logEnviron=False) + 0,
ExpectShell(command=['uptime'], workdir='wkdir', timeout=60, logEnviron=False) + 0,
)
self.expectOutcome(result=SUCCESS, state_string='Printed configuration')
return self.runStep()
def test_success_win(self):
self.setupStep(PrintConfiguration())
self.setProperty('platform', 'win')
self.expectRemoteCommands(
ExpectShell(command=['hostname'], workdir='wkdir', timeout=60, logEnviron=False) + 0,
ExpectShell(command=['df', '-hl'], workdir='wkdir', timeout=60, logEnviron=False) + 0,
)
self.expectOutcome(result=SUCCESS, state_string='Printed configuration')
return self.runStep()
def test_failure(self):
self.setupStep(PrintConfiguration())
self.setProperty('platform', 'ios-12')
self.expectRemoteCommands(
ExpectShell(command=['hostname'], workdir='wkdir', timeout=60, logEnviron=False) + 0,
ExpectShell(command=['df', '-hl'], workdir='wkdir', timeout=60, logEnviron=False) + 0,
ExpectShell(command=['date'], workdir='wkdir', timeout=60, logEnviron=False) + 0,
ExpectShell(command=['sw_vers'], workdir='wkdir', timeout=60, logEnviron=False) + 1
+ ExpectShell.log('stdio', stdout='''Upon execvpe sw_vers ['sw_vers'] in environment id 7696545650400
:Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/twisted/internet/process.py", line 445, in _fork
environment)
File "/usr/lib/python2.7/site-packages/twisted/internet/process.py", line 523, in _execChild
os.execvpe(executable, args, environment)
File "/usr/lib/python2.7/os.py", line 355, in execvpe
_execvpe(file, args, env)
File "/usr/lib/python2.7/os.py", line 382, in _execvpe
func(fullname, *argrest)
OSError: [Errno 2] No such file or directory'''),
ExpectShell(command=['xcodebuild', '-sdk', '-version'], workdir='wkdir', timeout=60, logEnviron=False)
+ ExpectShell.log('stdio', stdout='''Upon execvpe xcodebuild ['xcodebuild', '-sdk', '-version'] in environment id 7696545612416
:Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/twisted/internet/process.py", line 445, in _fork
environment)
File "/usr/lib/python2.7/site-packages/twisted/internet/process.py", line 523, in _execChild
os.execvpe(executable, args, environment)
File "/usr/lib/python2.7/os.py", line 355, in execvpe
_execvpe(file, args, env)
File "/usr/lib/python2.7/os.py", line 382, in _execvpe
func(fullname, *argrest)
OSError: [Errno 2] No such file or directory''')
+ 1,
ExpectShell(command=['uptime'], workdir='wkdir', timeout=60, logEnviron=False) + 0,
)
self.expectOutcome(result=FAILURE, state_string='Failed to print configuration')
return self.runStep()
class TestFindModifiedChangeLogs(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_modified_changelogs(self):
self.setupStep(FindModifiedChangeLogs())
self.assertEqual(FindModifiedChangeLogs.haltOnFailure, False)
self.setProperty('buildername', 'Commit-Queue')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=180,
logEnviron=False,
command=['git', 'diff', '-r', '--name-status', '--no-renames', '--no-ext-diff', '--full-index']) +
ExpectShell.log('stdio', stdout='''M Source/WebCore/ChangeLog
M Source/WebCore/layout/blockformatting/BlockFormattingContext.h
M Source/WebCore/layout/blockformatting/BlockMarginCollapse.cpp
M Tools/ChangeLog
M Tools/TestWebKitAPI/CMakeLists.txt''') +
0,
)
self.expectOutcome(result=SUCCESS, state_string='Found modified ChangeLogs')
rc = self.runStep()
self.assertEqual(self.getProperty('modified_changelogs'), ['Source/WebCore/ChangeLog', 'Tools/ChangeLog'])
self.assertEqual(self.getProperty('bugzilla_comment_text'), None)
self.assertEqual(self.getProperty('build_finish_summary'), None)
return rc
def test_success_added_changelog(self):
self.setupStep(FindModifiedChangeLogs())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=180,
logEnviron=False,
command=['git', 'diff', '-r', '--name-status', '--no-renames', '--no-ext-diff', '--full-index']) +
ExpectShell.log('stdio', stdout='''A Tools/Scripts/ChangeLog
M Tools/Scripts/run-api-tests''') +
0,
)
self.expectOutcome(result=SUCCESS, state_string='Found modified ChangeLogs')
rc = self.runStep()
self.assertEqual(self.getProperty('modified_changelogs'), ['Tools/Scripts/ChangeLog'])
self.assertEqual(self.getProperty('bugzilla_comment_text'), None)
self.assertEqual(self.getProperty('build_finish_summary'), None)
return rc
def test_failure(self):
self.setupStep(FindModifiedChangeLogs())
self.setProperty('patch_id', '1234')
self.setProperty('buildername', 'Commit-Queue')
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=180,
logEnviron=False,
command=['git', 'diff', '-r', '--name-status', '--no-renames', '--no-ext-diff', '--full-index']) +
ExpectShell.log('stdio', stdout='Unexpected failure') +
2,
)
self.expectOutcome(result=FAILURE, state_string='Failed to find any modified ChangeLog in Patch 1234')
rc = self.runStep()
self.assertEqual(self.getProperty('bugzilla_comment_text'), 'Unable to find any modified ChangeLog in Attachment 1234')
self.assertEqual(self.getProperty('build_finish_summary'), 'Unable to find any modified ChangeLog in Patch 1234')
return rc
class TestCreateLocalGITCommit(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(CreateLocalGITCommit())
self.assertEqual(CreateLocalGITCommit.haltOnFailure, False)
self.setProperty('buildername', 'Commit-Queue')
self.setProperty('modified_changelogs', ['Tools/Scripts/ChangeLog', 'Source/WebCore/ChangeLog'])
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=300,
logEnviron=False,
command='perl Tools/Scripts/commit-log-editor --print-log Tools/Scripts/ChangeLog Source/WebCore/ChangeLog | git commit --all -F -') +
0,
)
self.expectOutcome(result=SUCCESS, state_string='Created local git commit')
rc = self.runStep()
self.assertEqual(self.getProperty('bugzilla_comment_text'), None)
self.assertEqual(self.getProperty('build_finish_summary'), None)
return rc
def test_failure_no_changelog(self):
self.setupStep(CreateLocalGITCommit())
self.setProperty('patch_id', '1234')
self.expectOutcome(result=FAILURE, state_string='No modified ChangeLog file found for Patch 1234')
return self.runStep()
def test_failure(self):
self.setupStep(CreateLocalGITCommit())
self.setProperty('patch_id', '1234')
self.setProperty('buildername', 'Commit-Queue')
self.setProperty('modified_changelogs', ['Tools/Scripts/ChangeLog'])
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=300,
logEnviron=False,
command='perl Tools/Scripts/commit-log-editor --print-log Tools/Scripts/ChangeLog | git commit --all -F -') +
ExpectShell.log('stdio', stdout='Unexpected failure') +
2,
)
self.expectOutcome(result=FAILURE, state_string='Failed to create git commit')
rc = self.runStep()
self.assertEqual(self.getProperty('bugzilla_comment_text'), 'Failed to create git commit for Attachment 1234')
self.assertEqual(self.getProperty('build_finish_summary'), 'Failed to create git commit for Patch 1234')
return rc
class TestValidateCommiterAndReviewer(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
def mock_load_contributors(cls, *args, **kwargs):
return {'aakash_jain@apple.com': {'name': 'Aakash Jain', 'status': 'reviewer'},
'committer@webkit.org': {'name': 'WebKit Committer', 'status': 'committer'}}
ValidateCommiterAndReviewer.load_contributors = mock_load_contributors
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(ValidateCommiterAndReviewer())
self.setProperty('patch_id', '1234')
self.setProperty('patch_committer', 'committer@webkit.org')
self.setProperty('patch_reviewer', 'aakash_jain@apple.com')
self.expectHidden(False)
self.assertEqual(ValidateCommiterAndReviewer.haltOnFailure, False)
self.expectOutcome(result=SUCCESS, state_string='Validated commiter and reviewer')
return self.runStep()
def test_success_no_reviewer(self):
self.setupStep(ValidateCommiterAndReviewer())
self.setProperty('patch_id', '1234')
self.setProperty('patch_committer', 'aakash_jain@apple.com')
self.expectHidden(False)
self.expectOutcome(result=SUCCESS, state_string='Validated committer')
return self.runStep()
def test_failure_load_contributors(self):
self.setupStep(ValidateCommiterAndReviewer())
self.setProperty('patch_id', '1234')
self.setProperty('patch_committer', 'abc@webkit.org')
ValidateCommiterAndReviewer.load_contributors = lambda x: {}
self.expectHidden(False)
self.expectOutcome(result=FAILURE, state_string='Failed to get contributors information')
return self.runStep()
def test_failure_invalid_committer(self):
self.setupStep(ValidateCommiterAndReviewer())
self.setProperty('patch_id', '1234')
self.setProperty('patch_committer', 'abc@webkit.org')
self.expectHidden(False)
self.expectOutcome(result=FAILURE, state_string='abc@webkit.org does not have committer permissions')
return self.runStep()
def test_failure_invalid_reviewer(self):
self.setupStep(ValidateCommiterAndReviewer())
self.setProperty('patch_id', '1234')
self.setProperty('patch_committer', 'aakash_jain@apple.com')
self.setProperty('patch_reviewer', 'committer@webkit.org')
self.expectHidden(False)
self.expectOutcome(result=FAILURE, state_string='committer@webkit.org does not have reviewer permissions')
return self.runStep()
def test_load_contributors_from_disk(self):
ValidateCommiterAndReviewer._addToLog = lambda cls, logtype, log: sys.stdout.write(log)
contributors = ValidateCommiterAndReviewer().load_contributors_from_disk()
self.assertEqual(contributors['Aakash Jain']['nicks'], ['aakash_jain'])
class TestCheckPatchStatusOnEWSQueues(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
CheckPatchStatusOnEWSQueues.get_patch_status = lambda cls, patch_id, queue: SUCCESS
self.setupStep(CheckPatchStatusOnEWSQueues())
self.setProperty('patch_id', '1234')
self.expectOutcome(result=SUCCESS, state_string='Checked patch status on other queues')
rc = self.runStep()
self.assertEqual(self.getProperty('passed_mac_wk2'), True)
return rc
def test_failure(self):
self.setupStep(CheckPatchStatusOnEWSQueues())
self.setProperty('patch_id', '1234')
CheckPatchStatusOnEWSQueues.get_patch_status = lambda cls, patch_id, queue: FAILURE
self.expectOutcome(result=SUCCESS, state_string='Checked patch status on other queues')
rc = self.runStep()
self.assertEqual(self.getProperty('passed_mac_wk2'), None)
return rc
class TestPushCommitToWebKitRepo(BuildStepMixinAdditions, unittest.TestCase):
def setUp(self):
self.longMessage = True
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_success(self):
self.setupStep(PushCommitToWebKitRepo())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=300,
logEnviron=False,
command=['git', 'svn', 'dcommit', '--rmdir']) +
ExpectShell.log('stdio', stdout='Committed r256729') +
0,
)
self.expectOutcome(result=SUCCESS, state_string='Committed r256729')
return self.runStep()
def test_failure(self):
self.setupStep(PushCommitToWebKitRepo())
self.expectRemoteCommands(
ExpectShell(workdir='wkdir',
timeout=300,
logEnviron=False,
command=['git', 'svn', 'dcommit', '--rmdir']) +
ExpectShell.log('stdio', stdout='Unexpected failure') +
2,
)
self.expectOutcome(result=FAILURE, state_string='Failed to push commit to Webkit repository')
return self.runStep()
if __name__ == '__main__':
unittest.main()
|
import numpy as np
from neupy import algorithms, layers
from base import BaseTestCase
from helpers import simple_classification
class L2RegularizationTestCase(BaseTestCase):
def test_l2_regularization(self):
network = layers.join(
layers.Input(10),
layers.Relu(5, weight=2, bias=2),
)
regularizer = algorithms.l2(0.01, exclude=['bias'])
regularization_cost = self.eval(regularizer(network))
self.assertAlmostEqual(regularization_cost, 2.0)
def test_l2_regularization_with_bias(self):
network = layers.join(
layers.Input(10),
layers.Relu(5, weight=2, bias=2),
)
regularizer = algorithms.l2(0.01, exclude=[])
regularization_cost = self.eval(regularizer(network))
self.assertAlmostEqual(regularization_cost, 2.2)
def test_l2_repr(self):
l2_repr = repr(algorithms.l2(0.01, exclude=['bias']))
self.assertEqual(l2_repr, "l2(0.01, exclude=['bias'])")
l2_repr = repr(algorithms.l2(decay_rate=0.01, exclude=['bias']))
self.assertEqual(l2_repr, "l2(decay_rate=0.01, exclude=['bias'])")
def test_training_with_l2_regularization(self):
x_train, x_test, y_train, y_test = simple_classification()
mnet = algorithms.Momentum(
[
layers.Input(10),
layers.Sigmoid(20),
layers.Sigmoid(1)
],
step=0.35,
momentum=0.99,
batch_size=None,
verbose=False,
nesterov=True,
regularizer=algorithms.l2(0.001),
)
mnet.train(x_train, y_train, x_test, y_test, epochs=40)
self.assertGreater(0.15, mnet.errors.valid[-1])
class L1RegularizationTestCase(BaseTestCase):
def test_l1_regularization(self):
weight = 2 * np.sign(np.random.random((10, 5)) - 0.5)
network = layers.join(
layers.Input(10),
layers.Relu(5, weight=weight, bias=2),
)
regularizer = algorithms.l1(0.01)
regularization_cost = self.eval(regularizer(network))
self.assertAlmostEqual(regularization_cost, 1.0)
class MaxNormRegularizationTestCase(BaseTestCase):
def test_max_norm_regularization(self):
weight = np.arange(20).reshape(4, 5)
network = layers.join(
layers.Input(4),
layers.Relu(5, weight=weight, bias=100),
)
regularizer = algorithms.maxnorm(0.01)
regularization_cost = self.eval(regularizer(network))
self.assertAlmostEqual(regularization_cost, 0.19)
|
#!/usr/bin/python3
import os
from os import path
import sys
temp_dir = 'temp'
build_dir = 'out'
run_dir = 'out/run'
jar_out_dir = 'out/jar'
jmod_out_dir = 'out/jmod'
module_dir='.'
dependency_dirs=['dependencies/jars']
jmod_dirs=['dependencies/native/jmods']
timestamp_cachefile=path.join(temp_dir, 'file_timestamp_cache.json')
maven_deps = []
java_exec='java'
javac_exec='javac'
jlink_exec='jlink'
jar_exec='jar'
jmod_exec='jmod'
python_exec='python'
maven_exec='mvn'
this_file = path.realpath(__file__)
this_dir = path.dirname(this_file)
root_dir = path.dirname(path.abspath(__file__))
os.chdir(root_dir)
if 'win' in sys.platform:
this_os_arch='windows-x64'
elif 'linux' in sys.platform:
this_os_arch='linux-x64'
elif 'darwin' in sys.platform:
this_os_arch='osx-x64'
|
import argparse
import numpy as np
import timeit
import hashlib
import struct
import numba
import os
import sys
# to make sure relative import works in order to import test data
current_script_path = sys.argv[0]
package_home_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..'))
if package_home_path not in sys.path:
sys.path.append(package_home_path)
os.chdir(package_home_path)
import pyteiser.glob_var as glob_var
import pyteiser.structures as structures
import pyteiser.IO as IO
import pyteiser.matchmaker as matchmaker
def handler():
parser = argparse.ArgumentParser()
parser.add_argument("--seedfile", type=str)
parser.add_argument("--rna_fastafile", type=str)
parser.add_argument("--profiles_full_file", type=str)
parser.add_argument("--profiles_filtered_file", help="", type=str)
parser.add_argument("--indices_mode", help="compression in the index mode", type=bool)
parser.set_defaults(
rna_fastafile='tests/data/test_seqs.fa',
rna_bin_file='tests/data/test_seqs.bin',
profiles_full_file='tests/data/profiles.bin',
profiles_filtered_file='/Users/student/Documents/hani/programs/pyteiser/data/passed_profiles/passed_profiles_4-7_4-9_4-6_14-20_combined/test_1_2_profiles_unique_fold_filtered.bin',
compressed_profiles_file='/Users/student/Documents/hani/programs/pyteiser/data/test_profiles/compressed_by_indices_profiles.bin',
indices_mode=False,
)
args = parser.parse_args()
return args
def test_filtered_profiles(args):
original_profiles_array = IO.unpack_profiles_file(args.profiles_full_file,
args.indices_mode,
do_print = True)
with open(args.profiles_filtered_file, 'rb') as rf:
bitstring = rf.read()
filtered_profiles_array = IO.decompress_profiles_indices(bitstring)
print(original_profiles_array.shape)
print(original_profiles_array)
print(original_profiles_array[6:16,].sum())
print(filtered_profiles_array.shape)
print(filtered_profiles_array)
print(filtered_profiles_array.sum())
def main():
args = handler()
#test_bins_fasta(args)
test_filtered_profiles(args)
if __name__ == "__main__":
main()
|
import pulsar as psr
def load_ref_system():
""" Returns benzaldehyde as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C 0.3179 1.0449 -0.0067
C 1.6965 0.8596 -0.0102
C 2.2283 -0.4253 -0.0050
C 1.3808 -1.5297 0.0037
C 0.0035 -1.3492 0.0073
C -0.5347 -0.0596 0.0021
C -2.0103 0.0989 0.0061
O -2.5724 1.1709 0.0021
H 2.3631 1.7283 -0.0171
H 3.3139 -0.5693 -0.0078
H 1.8000 -2.5413 0.0078
H -0.6626 -2.2203 0.0142
H -2.6021 -0.8324 0.0131
H -0.1030 2.0579 -0.0108
""")
|
# coding: utf-8
"""
ChannelDiagnosticDescriptor.py
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from pprint import pformat
from six import iteritems
class ChannelDiagnosticDescriptor(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ChannelDiagnosticDescriptor - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'controller': 'str', # (required parameter)
'drive_channel': 'int', # (required parameter)
'devices': 'list[ChannelDiagDevice]', # (required parameter)
'max_time_interval': 'int', # (required parameter)
'diag_params': 'ChannelDiagTypeParams'
}
self.attribute_map = {
'controller': 'controller', # (required parameter)
'drive_channel': 'driveChannel', # (required parameter)
'devices': 'devices', # (required parameter)
'max_time_interval': 'maxTimeInterval', # (required parameter)
'diag_params': 'diagParams'
}
self._controller = None
self._drive_channel = None
self._devices = None
self._max_time_interval = None
self._diag_params = None
@property
def controller(self):
"""
Gets the controller of this ChannelDiagnosticDescriptor.
A reference indicating the controller that is to perform the test.
:return: The controller of this ChannelDiagnosticDescriptor.
:rtype: str
:required/optional: required
"""
return self._controller
@controller.setter
def controller(self, controller):
"""
Sets the controller of this ChannelDiagnosticDescriptor.
A reference indicating the controller that is to perform the test.
:param controller: The controller of this ChannelDiagnosticDescriptor.
:type: str
"""
self._controller = controller
@property
def drive_channel(self):
"""
Gets the drive_channel of this ChannelDiagnosticDescriptor.
The number of the drive channel to test.
:return: The drive_channel of this ChannelDiagnosticDescriptor.
:rtype: int
:required/optional: required
"""
return self._drive_channel
@drive_channel.setter
def drive_channel(self, drive_channel):
"""
Sets the drive_channel of this ChannelDiagnosticDescriptor.
The number of the drive channel to test.
:param drive_channel: The drive_channel of this ChannelDiagnosticDescriptor.
:type: int
"""
self._drive_channel = drive_channel
@property
def devices(self):
"""
Gets the devices of this ChannelDiagnosticDescriptor.
A variable-length array of values, each value indicating a device on the referenced channel to be included in the test. To specify \"all devices,\" all that is needed is a single instance of ChannelDiagDevice with deviceType set to CHANNEL_DEVICE_ALL.
:return: The devices of this ChannelDiagnosticDescriptor.
:rtype: list[ChannelDiagDevice]
:required/optional: required
"""
return self._devices
@devices.setter
def devices(self, devices):
"""
Sets the devices of this ChannelDiagnosticDescriptor.
A variable-length array of values, each value indicating a device on the referenced channel to be included in the test. To specify \"all devices,\" all that is needed is a single instance of ChannelDiagDevice with deviceType set to CHANNEL_DEVICE_ALL.
:param devices: The devices of this ChannelDiagnosticDescriptor.
:type: list[ChannelDiagDevice]
"""
self._devices = devices
@property
def max_time_interval(self):
"""
Gets the max_time_interval of this ChannelDiagnosticDescriptor.
The maximum number of seconds the test is allowed to run before it is forcibly terminated.
:return: The max_time_interval of this ChannelDiagnosticDescriptor.
:rtype: int
:required/optional: required
"""
return self._max_time_interval
@max_time_interval.setter
def max_time_interval(self, max_time_interval):
"""
Sets the max_time_interval of this ChannelDiagnosticDescriptor.
The maximum number of seconds the test is allowed to run before it is forcibly terminated.
:param max_time_interval: The max_time_interval of this ChannelDiagnosticDescriptor.
:type: int
"""
self._max_time_interval = max_time_interval
@property
def diag_params(self):
"""
Gets the diag_params of this ChannelDiagnosticDescriptor.
A sub-structure indicating both the type of diagnostic test to perform and the associated test-type-dependent parameters.
:return: The diag_params of this ChannelDiagnosticDescriptor.
:rtype: ChannelDiagTypeParams
:required/optional: required
"""
return self._diag_params
@diag_params.setter
def diag_params(self, diag_params):
"""
Sets the diag_params of this ChannelDiagnosticDescriptor.
A sub-structure indicating both the type of diagnostic test to perform and the associated test-type-dependent parameters.
:param diag_params: The diag_params of this ChannelDiagnosticDescriptor.
:type: ChannelDiagTypeParams
"""
self._diag_params = diag_params
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
if self is None:
return None
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if self is None or other is None:
return None
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
# Mathematics > Geometry > Points on a Rectangle
# Determine if a set of points coincides with the edges of a non-degenerate rectangle.
#
# https://www.hackerrank.com/challenges/points-on-rectangle/problem
# https://www.hackerrank.com/contests/101hack42/challenges/points-on-rectangle
#
# le rectangle a ses côtés parralèles aux axes, ça simplifie grandement les choses !
# ces côtés sont définis par les valeurs min et max de x,y
# le rectangle a ses côtés parralèles aux axes, ça simplifie grandement les choses !
# ces côtés sont définis par les valeurs min et max de x,y
for _ in range(int(input())):
n = int(input())
x, y = [0] * n, [0] * n
for i in range(n):
x[i], y[i] = map(int, input().split())
x0, y0 = min(x), min(y)
x1, y1 = max(x), max(y)
# il faut que tous les points soient sur x0, y0, x1 ou y1
ok = all((x[i] == x0) or (y[i] == y0) or (x[i] == x1) or (y[i] == y1)
for i in range(n))
print(["NO", "YES"][ok])
|
import re
from hashlib import md5
from xml.dom import minidom
from . import JarProvider
class FeedTheBeast(JarProvider):
base = 'http://www.creeperrepo.net/'
def work(self):
self.get(self.base+'getdate', self.handle_date)
def handle_date(self, data):
hash = md5()
hash.update('mcepoch1' + data)
self.token = hash.hexdigest()
self.get(self.base+'static/FTB2/modpacks.xml', self.handle_packs)
def handle_packs(self, data):
attr = lambda n, name: n.attributes[name].value
dom = minidom.parseString(data)
for node in dom.getElementsByTagName('modpack'):
filename = attr(node, 'serverPack')
if filename == "":
continue
artifact = attr(node, 'name')
artifact = re.sub(' Pack$', '', artifact)
artifact = re.sub('^Feed The Beast ', '', artifact)
artifact = re.sub('^FTB ', '', artifact)
url = self.base + 'direct/FTB2/' + self.token + '/'
url+= '^'.join((
'modpacks',
attr(node, 'dir'),
attr(node, 'version').replace('.', '_'),
filename))
self.add(('Feed The Beast', artifact), ('ftb', None), url)
self.commit()
ref = FeedTheBeast
|
import csv
import os
import time
import numpy as np # type: ignore # this has some nice mathematics related functions
# using so called sparse linear algebra make stuff run way faster (ignoring zeros)
from scipy.sparse import csr_matrix, diags # type: ignore
# this guy can solve equation faster by taking advantage of the sparsity
# (it ignores zeros in the matrices)
from scipy.sparse.linalg import spsolve # type: ignore
from experiment_data_handler import ExperimentalData
from interpolations import predefined_interp_class_factory
class Simulation: # In later objects abreviated as Sim
"""
theta = 0.0 - fully explicit 1st order, numerically unstable.
theta = 0.5 - midpoint (Crank-Nicolson) 2nd order, numerically stable (probably the best choice).
theta = 1.0 - fully implicit 1st order, numerically stable.
"""
def __init__(self,
N: int,
dt: float,
theta: float,
robin_alpha: float,
x0: float,
length: float,
material,
experiment_data_path: str = "DATA.csv") -> None:
"""
Args:
N ... number of elements in the model
dt ... fixed time step
theta ... defining the explicitness/implicitness of the simulation
robin_alpha ... coefficient of heat convection
x0 ... where is the place of our interest in the object
length ... how long is the object
material ... object containing material properties
experiment_data_path ... from where the data should be taken
"""
self.N = int(N)
self.dt = dt
self.theta = theta
self.Exp_data = ExperimentalData(experiment_data_path)
self.rho = material.rho
self.cp = material.cp
self.lmbd = material.lmbd
self.length = length
self.robin_alpha = robin_alpha
self.x0 = x0
# Placeholder for the fixed simulation time points
self.t = np.arange(self.Exp_data.t_data[0], self.Exp_data.t_data[-1] + self.dt, self.dt)
# Current time for quick lookup in the callback
self.current_t = 0.0
# maximum allowed index when simulating
self.max_step_idx = len(self.t) - 1
# current time step index
self.current_step_idx = 0
# checkpoint time step index
self.checkpoint_step_idx = 0
# Placeholder for interpolated body temperature
self.T_data = np.interp(self.t, self.Exp_data.t_data, self.Exp_data.T_data)
# Placeholder for interpolated heat_flux
self.HeatFlux = np.interp(self.t, self.Exp_data.t_data, self.Exp_data.q_data)
# Placeholder for interpolated ambient temperature
self.T_amb = np.interp(self.t, self.Exp_data.t_data, self.Exp_data.T_amb_data)
# size of one element
self.dx = self.length/N
# x-positions of the nodes (temperatures)
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html
self.x = np.linspace(0, self.length, N+1)
# error_value of the simulation
self.error_norm = 0.0
# Whether we have plotted the heat flux, which should be done only once
self.heat_flux_already_plotted = False
# temperature fields
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.empty.html
# placeholder for actual temperatures in last evaluated step
self.T = np.empty(N+1)
# initialize temperature field
self.T.fill(self.Exp_data.T_data[0])
# placeholder for temperatures saved in checkpoint
self.T_checkpoint = np.empty(N+1)
# Placeholder for temperature probes data
self.T_x0 = len(self.t)*[0.0]
# Setup the right interpolation object and save initial T_x0
self.T_x0_interpolator = predefined_interp_class_factory(self.x0, self.x)
self.T_x0[0] = self.T_x0_interpolator(self.T) # type: ignore
# TODO: make it allow multiple probes at the same time
# Finite element method: matrix assembly using 1st order continuous Galerkin elements
# Tridiagonal sparse mass matrix (contains information about heat capacity
# of the elements and how their temperatures react to incoming heat)
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
self.M = csr_matrix(self.dx*self.rho*self.cp*diags([1/6, 4/6, 1/6], [-1, 0, 1], shape=(N+1, N+1)))
# Applying corrections to the edge elements (the value is halved there)
self.M[0, 0] /= 2
self.M[-1, -1] /= 2
# Tridiagonal sparse stiffness matrix (contains information about heat
# conductivity and how the elements affect each other)
self.K = csr_matrix((1/self.dx)*self.lmbd*diags([-1, 2, -1], [-1, 0, 1], shape=(N+1, N+1)))
# We have to make some changes to the matrix K because we affect
# the body from outside
# Here we will push Heat to the body - Neumann Boundary Condition
self.K[0, 0] /= 2
# Here we know the body is in contact with air so it will cool
# accordingly - Robin Boundary Condition
self.K[N, N] /= 2
# Preparing variables to store the values of some properties, that are
# constant during the simulation
# placeholder for matrix A
self.A = self.M + self.dt*self.theta*self.K
# implicit portion of T_body contribution (Robin BC Nth node)
self.A[-1, -1] += self.dt*self.theta*self.robin_alpha
# Matrix b to calculate boundary vector b
self.b_base = self.M - self.dt*(1-self.theta)*self.K
# allocate memory for vector b
self.b = np.empty(N+1)
def __repr__(self) -> str:
"""
Defining what should be displayed when we print the
object of this class.
Very useful for debugging purposes.
"""
return f"""
self.N: {self.N},
self.dt: {self.dt},
self.theta: {self.theta},
self.length: {self.length},
self.rho: {self.rho},
self.cp: {self.cp},
self.lmbd: {self.lmbd},
self.length: {self.length},
self.robin_alpha: {self.robin_alpha},
self.x0: {self.x0},
"""
# Function that calculates new timestep (integration step)
def evaluate_one_step(self) -> None:
"""
Simulating one step of the simulation
Is using already initialised instance variables
"""
# Evaluate new boundary vector (BC means boundary condition)
# Assemble vector b (dot() is matrix multiplication)
self.b = self.b_base.dot(self.T)
# Apply explicit portion of HeatFlux (Neumann BC 1st node)
self.b[0] += self.dt*(1-self.theta)*self.HeatFlux[self.current_step_idx]
# Apply implicit portion of HeatFlux (Neumann BC 1st node)
self.b[0] += self.dt*self.theta*self.HeatFlux[self.current_step_idx+1]
# Apply explicit contribution of the body temperature (Robin BC Nth node)
self.b[-1] -= self.dt*(1-self.theta)*self.robin_alpha*self.T[-1]
# Apply explicit contribution of the ambient temperature (Robin BC Nth node)
self.b[-1] += self.dt*(1-self.theta)*self.robin_alpha*self.T_amb[self.current_step_idx]
# Apply implicit contribution of the ambient temperature (Robin BC Nth node)
self.b[-1] += self.dt*self.theta*self.robin_alpha*self.T_amb[self.current_step_idx+1]
# solve the equation self.A*self.T=b
self.T = spsolve(self.A, self.b) # solve new self.T for the new step
self.current_step_idx += 1 # move to new timestep
self.T_x0[self.current_step_idx] = self.T_x0_interpolator(self.T) # type: ignore
# Incrementing time information for the callback
# We are assuming the self.dt stays the same all the time
# If self.dt would be changeable, this would need to be reverted
# to the previous state of "self.current_t += self.dt",
# which however had other disadvantages (as we do not want
# the time being incremented when we do inverse problem probes)
# - it was firstly solved by passing the boolean value whether
# to increment time, but then there were issues with incompatible
# signatures of evaluate_one_step() methods in Simulation and
# InverseSimulation()
self.current_t = self.dt * self.current_step_idx
def after_simulation_action(self, SimController=None):
"""
Defines what should happen after the simulation is over
Has access to all the variables from SimController, so can
freely communicate with the GUI
Args:
SimController ... whole simulation controller object
- containing all the references to GUI
"""
# Assigning the error value
self.error_norm = self._calculate_final_error()
print("Error norm after simulation: {}".format(self.error_norm))
def save_results(self) -> None:
"""
Outputs the (semi)results of a simulation into a CSV file and names it
accordingly.
"""
# TODO: discuss the possible filename structure
file_name = "Classic-{}.csv".format(int(time.time()))
base_path = os.path.dirname(os.path.realpath(__file__))
absolute_path = os.path.join(base_path, file_name)
time_data = self.t
temp_data = self.T_x0
with open(absolute_path, "w") as csv_file:
csv_writer = csv.writer(csv_file)
headers = ["Time [s]", "Temperature [C]"]
csv_writer.writerow(headers)
for time_value, temp_value in zip(time_data, temp_data):
csv_writer.writerow([time_value, temp_value])
def plot(self, temperature_plot, heat_flux_plot):
"""
Defining the way simulation data should be plotted
Args:
temperature_plot ... reference to temperature plot
heat_flux_plot ... reference to heat flux plot
"""
# Displaying only the data that is calculated ([:self.current_step_idx])
temperature_plot.plot(x_values=self.t[:self.current_step_idx],
y_values=self.T_x0[:self.current_step_idx],
x_experiment_values=self.Exp_data.t_data,
y_experiment_values=self.Exp_data.T_data)
# Plotting the heat flux only once, because it is not changing
if not self.heat_flux_already_plotted:
heat_flux_plot.plot(x_values=None,
y_values=None,
x_experiment_values=self.Exp_data.t_data,
y_experiment_values=self.Exp_data.q_data)
self.heat_flux_already_plotted = True
def _calculate_final_error(self) -> float:
"""
Determining error value for this simulation at its end
"""
error = np.sum(abs(self.T_x0 - self.T_data))/len(self.t[1:])
return round(error, 3)
@property
def simulation_has_finished(self) -> bool:
"""
Deciding if the simulation has already finished or not according to
the current step index and the maximum step index
Is being used to determine when to stop calling the main
evaluate_one_step method() by some higher function
Is implemented as a property and not as a function, as it does not
take any parameters, does not do any calculations and is
just returning simple comparison
- https://www.python-course.eu/python3_properties.php
"""
# Returning True when the current index is equal or higher than
# the maximal one
return self.current_step_idx >= self.max_step_idx
|
"""
Serializes numpy and JSON-like objects
"""
import pickle
import base64
import hashlib
import json
import pyarrow as pa
def pa_serialize(obj):
return pa.serialize(obj).to_buffer()
def pa_deserialize(binary):
return pa.deserialize(binary)
_SERIALIZER = pa_serialize
_DESERIALIZER = pa_deserialize
# _SERIALIZER = pickle.dumps
# _DESERIALIZER = pickle.loads
def set_global_serializer(serializer, deserializer):
"""
Call at the start of a script
"""
assert callable(serializer) and callable(deserializer)
global _SERIALIZER, _DESERIALIZER
_SERIALIZER = serializer
_DESERIALIZER = deserializer
def serialize(obj):
"""
We can improve this function if we *really* need more memory efficiency
"""
return _SERIALIZER(obj)
def deserialize(binary):
"""
We can improve this function if we *really* need more memory efficiency
"""
return _DESERIALIZER(binary)
def string_hash(s):
assert isinstance(s, str)
return binary_hash(s.encode('utf-8'))
def binary_hash(binary):
"""
Low collision hash of any binary string
For designating the 16-char object key in Redis.
Runs at 200 mu-second per hash on Macbook pro.
Only contains characters from [a-z][A-Z]+_
"""
s = hashlib.md5(binary).digest()
s = base64.b64encode(s)[:16]
s = s.decode('utf-8')
# return s.replace('/','_')
return s
def pyobj_hash(obj):
return binary_hash(serialize(obj))
def bytes2str(bytestring):
if isinstance(bytestring, str):
return bytestring
else:
return bytestring.decode('UTF-8')
def str2bytes(string):
if isinstance(string, bytes):
return string
else:
return string.encode('UTF-8')
|
import queue
import server_wrapper
import binascii
from socket import *
from pytun import *
from threading import Thread
'''
me = '192.168.77.10'
he = '192.168.77.2'
'''
tun = TunTapDevice(flags=IFF_TUN | IFF_NO_PI)
tun.addr = '192.168.77.10'
tun.netmask = '255.255.255.0'
tun.mtu = 150
tun.up()
class Packet:
def __init__(self, payload, question, query, addr):
self.payload = payload
self.question = question
self.query = query
self.addr = addr
def read_tunnel(contents):
while True:
data = tun.read(tun.mtu)
contents.put(data)
def send_packet(s, contents, packets):
while True:
if contents.empty():
continue
p = packets.get()
data = contents.get()
s.sendto(server_wrapper.server_encrypt(p.query, data, p.question), p.addr)
def recv_packet(s: socket, packets):
while True:
try:
data, addr = s.recvfrom(2048)
data, question, query = server_wrapper.server_decrypt(data)
p = Packet(data, question, query, addr)
if packets.full():
p2 = packets.get()
s.sendto(server_wrapper.server_encrypt(p2.query, b'hi', p2.question), p2.addr)
packets.put(p)
if question != 'aGVsbG8=.group-7.cs305.fun' and question:
tun.write(data)
else:
print('receive empty packet')
except binascii.Error:
continue
if __name__ == '__main__':
addr = ('0.0.0.0', 53)
s = socket(AF_INET, SOCK_DGRAM)
s.bind(addr)
packets = queue.Queue(1)
contents = queue.Queue(-1)
recv_thread = Thread(target=recv_packet, args=(s, packets,))
send_thread = Thread(target=send_packet, args=(s, contents, packets,))
read_thread = Thread(target=read_tunnel, args=(contents,))
recv_thread.start()
send_thread.start()
read_thread.start()
|
"""
This package is developed jointly between BuildSim team and Professor Adrian Chong.
The original script and example can be found at GITHUB page: https://github.com/adChong/bc-stan
The paper published can be found at: https://www.sciencedirect.com/science/article/pii/S0378778818307539?via%3Dihub
""" |
# -*- coding: utf-8 -*-
# vi: set ft=python sw=4 :
"""SLRD key data type module.
Todo:
- docs
- implement
"""
from super_type import SuperType
class KeyType(SuperType):
"""SLRD key data type.
This data type stores references to corresponding value files.
"""
def __init__(self, raw_yaml=None, **kwargs):
"""Initialization method.
:param raw_yaml:
:param kwargs:
:type raw_yaml:
:type kwargs:
:raise: <???>
"""
|
"""
File: devices/views/configurator.py
Purpose:
This code is a class based view used to render and provide
functions for the device manual configuration view.
Functions allow users to send show commands manually to a device
and send configuration commands manually
"""
from django.contrib import messages
from django.shortcuts import render, redirect
from devices.controllers import cisco_controller as controller
from devices .models import Device
from django.views import View
class DeviceConfig(View):
template = 'device_config.html'
success_redirect = 'devices:Device-Config'
exception_redirect = 'devices:Device-Manager'
# get device and template
# **kwargs = devices primary key
# returns device config page
def get(self, request, **kwargs):
device_id = self.kwargs['device_id']
device = Device.get_device(device_id)
args = {'device': device}
return render(request, self.template, args)
# post show and config commands
# **kwargs = devices primary key
# returns device config page
def post(self, request, **kwargs):
device_id = self.kwargs['device_id']
# send show command - return output
if 'show' in request.POST:
d = Device.get_device(device_id)
cmd = request.POST.get('txt_show')
output = controller.retrieve(d, cmd)
args = {'device': d, 'output': output}
return render(request, self.template, args)
# send configuration
if 'send' in request.POST:
d = Device.get_device(device_id)
config = request.POST.get('txt_config')
cmd = config.split("\n")
controller.configure(d, cmd)
messages.success(request, 'Configuration Sent')
return redirect(self.success_redirect, device_id)
return redirect(self.exception_redirect) |
from app.data.components import Component, Type
tags = ['base', 'target', 'weapon', 'uses', 'exp', 'class_change', 'extra', 'utility', 'special', 'formula', 'aoe', 'aesthetic', 'advanced']
class ItemComponent(Component):
item = None
def get_items_using(expose: Type, value, db) -> list:
affected_items = []
for item in db.items:
for component in item.components:
if component.expose == expose and component.value == value:
affected_items.append(item)
return affected_items
def swap_values(affected_items: list, expose: Type, old_value, new_value):
for item in affected_items:
for component in item.components:
if component.expose == expose and component.value == old_value:
component.value = new_value
|
"""Schema Validation."""
from .validators import Validator
class Scheme(object):
"""Scheme Validator."""
def __init__(self, scheme_spec):
self.validator = Validator.create_validator(scheme_spec)
def validate(self, value):
return self.validator.validate(value)
|
#!/usr/bin/env python
"""
Simple script to create a new incident.
"""
from __future__ import print_function
import time
import logging
import resilient
logging.basicConfig()
class ExampleArgumentParser(resilient.ArgumentParser):
"""Arguments for this command-line application, extending the standard Resilient arguments"""
def __init__(self, config_file=None):
super(ExampleArgumentParser, self).__init__(config_file=config_file)
self.add_argument('--name', '-n',
required=True,
help="The incident name.")
self.add_argument('--description', '-d',
required=True,
help="The incident description.")
self.add_argument('--itype', '-t',
action='append',
help="The incident type(s). Multiple arguments may be supplied.")
def main():
"""
program main
"""
parser = ExampleArgumentParser(config_file=resilient.get_config_file())
opts = parser.parse_args()
inc_name = opts["name"]
inc_desc = opts["description"]
inc_types = opts["itype"]
# Create SimpleClient for a REST connection to the Resilient services
client = resilient.get_client(opts)
# Discovered Date will be set to the current time
time_now = int(time.time() * 1000)
# Construct the basic incident DTO that will be posted
new_incident = {"name": inc_name,
"description": inc_desc,
"incident_type_ids": inc_types,
"discovered_date": time_now}
try:
uri = '/incidents'
# Create the incident
incident = client.post(uri, new_incident)
inc_id = incident["id"]
print("Created incident {}".format(inc_id))
except resilient.SimpleHTTPException as ecode:
print("create failed : {}".format(ecode))
if __name__ == "__main__":
main()
|
# flask_web/app.py
"""
1. Creates the basic flask application
2. Connects the app to the database
3. Creates database tables, if they don't exist
"""
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from pathlib import Path
import os
app = Flask(__name__)
sqlite_location = '/db/digitalesWartenDB.sqlite'
if 'SQLITE_LOCATION' in os.environ:
sqlite_location = os.environ['SQLITE_LOCATION']
app.config['SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{sqlite_location}'
db = SQLAlchemy(app)
db_file = Path(sqlite_location)
if not db_file.is_file():
db_file.parent.absolute().mkdir(parents=True, exist_ok=True)
from models.entry import Entry
from models.queue import Queue
from models.place import Place, generate_default_place
db.create_all()
db.session.add(generate_default_place())
db.session.commit()
|
#!/usr/bin/env python
"""
Takes LDpred.py (or LD_pruning_thres.py) effect estimates, and (validation) genotypes in PLINK bed format as input.
The script then works out overlap and outputs predictions or risk scores as well as some prediction
accuracy statistics.
Note that for maximal accuracy all SNPs with LDpred weights should be included in the validation dataset.
If they are a subset of the validation dataset, then we suggest recalculate LDpred for the overlapping SNPs.
Usage:
validate --vgf=PLINK_VAL_GENOTYPE_FILE --rf=RESULT_FILE_PREFIX --out=OUTPUT_FILE_PREFIX [--res_format=LDPRED
--split_by_chrom --pf=PHEN_FILE --pf_format=STANDARD --cov_file=COVARIATE_FILE --pcs_file=PCS_FILE
--PS=FRACTIONS_CAUSAL --TS=PVAL_THRESHOLDS --adjust_for_sex --adjust_for_covariates --adjust_for_pcs]
- PLINK_VAL_GENOTYPE_FILE: PLINK formatted genotypes for which we want to calculate risk scores.
- RESULT_FILE_PREFIX: SNP weights file, e.g. LDpred SNP weights.
- OUTPUT_FILE_PREFIX: The prefix of output file.
- RESULT_FILE_FORMAT: The format to expect the results to be in. The default format is LDPRED, which refers to the format which
running LDpred output. LDPRED-INF and P+T (LD-pruning + p-value thresholding) are also implemented.
- PHEN_FILE: Is a file with individual IDs and phenotypes
- PVAL_THRESHOLDS: This option is only valid if a P+T result file prefix is supplied. It's a list of p-value thresholds,
separated by a comma (without space), to be used for LDpred. Default values are
--TS=1,0.3,0.1,0.03,0.01,0.003,0.001,0.0003,0.0001,3E-5,1E-5,1E-6,1E-7,1E-8
- FRACTIONS_CAUSAL: This option is only valid if a LDPRED result file prefix is supplied. A list of comma separated
(without space) values between 1 and 0, excluding 0. 1 corresponds to the infinitesimal model
and will yield results similar to LDpred-inf. Default values are
--PS=1,0.3,0.1,0.03,0.01,0.003,0.001,0.0003,0.0001
2015 (c) Bjarni J Vilhjalmsson: bjarni.vilhjalmsson@gmail.com
"""
import getopt
import sys
import os
import traceback
import scipy as sp
from scipy import linalg
from plinkio import plinkfile
import itertools as it
import time
import h5py
ok_nts = ['A', 'T', 'C', 'G']
opp_strand_dict = {'A':'T', 'G':'C', 'T':'A', 'C':'G'}
def parse_parameters():
"""
Parse the parameters into a dict, etc.
"""
long_options_list = ['vgf=', 'rf=', 'res_format=', 'out=', 'indiv_filter=', 'split_by_chrom', 'pf=', 'pf_format=', 'cov_file=',
'pcs_file=', 'PS=', 'TS=', 'adjust_for_sex', 'adjust_for_covariates', 'adjust_for_pcs', 'h', 'help']
p_dict = {'vgf':None, 'rf':None, 'out':None, 'res_format':'LDPRED', 'indiv_filter':None, 'split_by_chrom':False,
'pf':None, 'pf_format':'STANDARD', 'cov_file':None, 'pcs_file':None, 'PS':[1, 0.3, 0.1, 0.03, 0.01, 0.003, 0.001],
'TS':[1, 0.3, 0.1, 0.03, 0.01, 0.003, 0.001, 3 * 1E-4, 1E-4, 3 * 1E-5, 1E-5, 1E-6, 1E-7, 1E-8],
'adjust_for_sex':False, 'adjust_for_covariates':False, 'adjust_for_pcs':False}
if len(sys.argv) > 1:
try:
opts, args = getopt.getopt(sys.argv[1:], "h", long_options_list)
except:
print "Some problems with parameters. Please read the usage documentation carefully."
print "Use the -h option for usage information."
# traceback.print_exc()
# print __doc__
sys.exit(2)
for opt, arg in opts:
if opt == "-h" or opt == "--h" or opt == '--help':
print __doc__
sys.exit(0)
elif opt in ("--vgf"): p_dict['vgf'] = arg
elif opt in ("--rf"): p_dict['rf'] = arg
elif opt in ("--res_format"): p_dict['res_format'] = arg
elif opt in ("--indiv_filter"): p_dict['indiv_filter'] = arg
elif opt in ("--out"): p_dict['out'] = arg
elif opt in ("--split_by_chrom"): p_dict['split_by_chrom'] = True
elif opt in ("--PS"): p_dict['PS'] = map(float, arg.split(','))
elif opt in ("--TS"): p_dict['TS'] = map(float, arg.split(','))
elif opt in ("--pf"): p_dict['pf'] = arg
elif opt in ("--pf_format"): p_dict['pf_format'] = arg
elif opt in ("--cov_file"): p_dict['cov_file'] = arg
elif opt in ("--pcs_file"): p_dict['pcs_file'] = arg
elif opt in ("--adjust_for_sex"): p_dict['adjust_for_sex'] = True
elif opt in ("--adjust_for_covariates"): p_dict['adjust_for_covariates'] = True
elif opt in ("--adjust_for_pcs"): p_dict['adjust_for_pcs'] = True
else:
print "Unkown option:", opt
print "Use -h option for usage information."
sys.exit(2)
else:
print __doc__
sys.exit(0)
return p_dict
def get_prs(genotype_file, rs_id_map, phen_map=None):
plinkf = plinkfile.PlinkFile(genotype_file)
samples = plinkf.get_samples()
# 1. Figure out indiv filter and get true phenotypes
indiv_filter = sp.zeros(len(samples), dtype='bool8')
true_phens = []
iids = []
if phen_map is not None:
pcs = []
sex = []
covariates = []
phen_iids = set(phen_map.keys())
for samp_i, sample in enumerate(samples):
if sample.iid in phen_iids:
indiv_filter[samp_i] = True
true_phens.append(phen_map[sample.iid]['phen'])
iids.append(sample.iid)
if 'pcs' in phen_map[sample.iid].keys():
pcs.append(phen_map[sample.iid]['pcs'])
if 'sex' in phen_map[sample.iid].keys():
sex.append(phen_map[sample.iid]['sex'])
if 'covariates' in phen_map[sample.iid].keys():
# Temp hack...
# if phen_map[sample.iid]['sex']==1:
# covariates.append([phen_map[sample.iid]['covariates'][0],0])
# else:
# covariates.append([0,phen_map[sample.iid]['covariates'][0]])
covariates.append(phen_map[sample.iid]['covariates'])
if len(pcs) > 0:
assert len(pcs) == len(true_phens), 'PC information missing for some individuals with phenotypes'
if len(sex) > 0:
assert len(sex) == len(true_phens), 'Sex information missing for some individuals with phenotypes'
if len(covariates) > 0:
assert len(covariates) == len(true_phens), 'Covariates missing for some individuals with phenotypes'
else:
for samp_i, sample in enumerate(samples):
if sample.affection != 2:
indiv_filter[samp_i] = True
true_phens.append(sample.affection)
iids.append(sample.iid)
num_individs = sp.sum(indiv_filter)
assert num_individs > 0, 'Issues in parsing the phenotypes and/or PCs?'
assert not sp.any(sp.isnan(true_phens)), 'Phenotypes appear to have some NaNs, or parsing failed.'
print '%d individuals have phenotype and genotype information.' % num_individs
num_non_matching_nts = 0
num_flipped_nts = 0
raw_effects_prs = sp.zeros(num_individs)
pval_derived_effects_prs = sp.zeros(num_individs)
# If these indices are not in order then we place them in the right place while parsing SNPs.
print 'Iterating over BED file to calculate risk scores.'
locus_list = plinkf.get_loci()
snp_i = 0
for locus, row in it.izip(locus_list, plinkf):
upd_pval_beta = 0
try:
# Check rs-ID
# sid = '%d_%d'%(locus.chromosome,locus.bp_position)
sid = locus.name
rs_info = rs_id_map[sid]
except Exception: # Move on if rsID not found.
continue
if rs_info['upd_pval_beta'] == 0:
continue
# Check whether the nucleotides are OK, and potentially flip it.
ss_nt = rs_info['nts']
g_nt = [locus.allele1, locus.allele2]
flip_nts = False
os_g_nt = sp.array([opp_strand_dict[g_nt[0]], opp_strand_dict[g_nt[1]]])
if not (sp.all(g_nt == ss_nt) or sp.all(os_g_nt == ss_nt)):
# Opposite strand nucleotides
flip_nts = (g_nt[1] == ss_nt[0] and g_nt[0] == ss_nt[1]) or (os_g_nt[1] == ss_nt[0] and os_g_nt[0] == ss_nt[1])
if flip_nts:
raw_beta = -rs_info['raw_beta']
upd_pval_beta = -rs_info['upd_pval_beta']
num_flipped_nts += 1
else:
# print "Nucleotides don't match after all?: sid=%s, g_nt=%s, ss_nt=%s" % (locus.name, str(g_nt), str(ss_nt))
num_non_matching_nts += 1
continue
else:
raw_beta = rs_info['raw_beta']
upd_pval_beta = rs_info['upd_pval_beta']
# Parse SNP, and fill in the blanks if necessary.
snp = sp.array(row, dtype='int8')[indiv_filter]
bin_counts = row.allele_counts()
if bin_counts[-1] > 0:
mode_v = sp.argmax(bin_counts[:2])
snp[snp == 3] = mode_v
# Normalize SNP
# n_snp = (snp - sp.mean(snp))/sp.std(snp)
# Update scores and move on.
raw_effects_prs += snp * raw_beta
assert not sp.any(sp.isnan(raw_effects_prs)), 'Raw effects PRS is corrupted'
pval_derived_effects_prs += snp * upd_pval_beta
assert not sp.any(sp.isnan(pval_derived_effects_prs)), 'Weighted effects PRS is corrupted'
if snp_i > 0 and snp_i % 100000 == 0:
print snp_i
print 'Number of non-matching NTs: %d' % num_non_matching_nts
raw_eff_r2 = (sp.corrcoef(raw_effects_prs, true_phens)[0, 1]) ** 2
pval_eff_r2 = (sp.corrcoef(pval_derived_effects_prs, true_phens)[0, 1]) ** 2
print 'Raw effects PRS r2: %0.4f' % raw_eff_r2
print 'Weigted effects PRS r2: %0.4f' % pval_eff_r2
snp_i += 1
plinkf.close()
print "DONE!"
print 'Number of non-matching NTs: %d' % num_non_matching_nts
print 'Number of flipped NTs: %d' % num_flipped_nts
raw_eff_corr = sp.corrcoef(raw_effects_prs, true_phens)[0, 1]
raw_eff_r2 = raw_eff_corr ** 2
pval_eff_corr = sp.corrcoef(pval_derived_effects_prs, true_phens)[0, 1]
pval_eff_r2 = pval_eff_corr ** 2
print 'Raw effects PRS correlation: %0.4f' % raw_eff_corr
print 'Raw effects PRS r2: %0.4f' % raw_eff_r2
print 'Weigted effects PRS correlation: %0.4f' % pval_eff_corr
print 'Weigted effects PRS r2: %0.4f' % pval_eff_r2
ret_dict = {'raw_effects_prs':raw_effects_prs.copy(), 'pval_derived_effects_prs':pval_derived_effects_prs.copy(),
'true_phens':true_phens[:], 'iids':iids}
if len(pcs) > 0:
ret_dict['pcs'] = pcs
if len(sex) > 0:
ret_dict['sex'] = sex
if len(covariates) > 0:
ret_dict['covariates'] = covariates
return ret_dict
def parse_phen_file(pf, pf_format):
print pf
phen_map = {}
if pf != None:
if pf_format == 'FAM':
"""
Individual's family ID ('FID')
Individual's within-family ID ('IID'; cannot be '0')
Within-family ID of father ('0' if father isn't in dataset)
Within-family ID of mother ('0' if mother isn't in dataset)
Sex code ('1' = male, '2' = female, '0' = unknown)
Phenotype value ('1' = control, '2' = case, '-9'/'0'/non-numeric = missing data if case/control)
"""
print 'Parsing phenotypes'
with open(pf, 'r') as f:
for line in f:
l = line.split()
iid = l[1]
# iid = iid[:-4]
sex = int(l[4])
phen = float(l[5])
if sex != 0 and phen != -9:
phen_map[iid] = {'phen':phen, 'sex':sex}
iids = set(phen_map.keys())
if pf_format == 'STANDARD':
"""
IID PHE
"""
print 'Parsing phenotypes'
with open(pf, 'r') as f:
for line in f:
l = line.split()
iid = l[0]
phen = float(l[1])
phen_map[iid] = {'phen':phen}
iids = set(phen_map.keys())
# elif pf_format=='Other':
# print 'Parse phenotype file: %s'%p_dict['pf']
# phen_map ={}
# """
# FID IID SEX PHE SCZ BIP BOTH
# 00C04395 00C04395 2 SCZ 2 -9 2
# 00C04941 00C04941 2 SCZ 2 -9 2
# 01C05110 01C05110 2 SCZ 2 -9 2
# 01C05278 01C05278 2 SCZ 2 -9 2
# 01C05402 01C05402 1 SCZ 2 -9 2
# 01C05566 01C05566 1 BIP -9 2 2
#
# """
# with open(p_dict['pf'],'r') as f:
# print f.next()
# for line in f:
# l = line.split()
# if p_dict['phen']=='SCZ':
# phen = float(l[4])
# elif p_dict['phen']=='BIP':
# phen = float(l[5])
# elif p_dict['phen']=='BOTH':
# phen = float(l[6])
#
# if phen ==-9:
# continue
#
# fid = l[0]
# iid = l[1]
# sex = int(l[2])
# phen_map[iid] = {'fid':fid, 'sex':sex, 'phen':phen}
elif pf_format == 'S2':
"""
IID Age Sex Height_Inches
"""
with open(pf, 'r') as f:
print f.next()
for line in f:
l = line.split()
iid = l[0]
age = float(l[1])
if l[2] == 'Male':
sex = 1
elif l[2] == 'Female':
sex = 2
else:
raise Exception('Sex missing')
phen = float(l[3])
phen_map[iid] = {'phen':phen, 'age':age, 'sex':sex}
return phen_map
def parse_ldpred_res(file_name):
rs_id_map = {}
"""
chrom pos sid nt1 nt2 raw_beta ldpred_inf_beta ldpred_beta
1, 798959, rs11240777, C, T, -1.1901e-02, 3.2443e-03, 2.6821e-04
"""
with open(file_name, 'r') as f:
f.next()
for line in f:
l = line.split()
chrom_str = l[0]
chrom = int(chrom_str[6:])
pos = int(l[1])
rs_id = l[2].strip()
nt1 = l[3].strip()
nt2 = l[4].strip()
nts = [nt1, nt2]
raw_beta = float(l[5])
upd_pval_beta = float(l[6])
rs_id_map[rs_id] = {'chrom':chrom, 'pos':pos, 'nts':nts, 'raw_beta':raw_beta,
'upd_pval_beta':upd_pval_beta}
return rs_id_map
def parse_pt_res(file_name):
rs_id_map = {}
"""
chrom pos sid nt1 nt2 raw_beta raw_pval_beta upd_beta upd_pval_beta
1 798959 rs11240777 C T -1.1901e-02 -1.1901e-02 2.6821e-04 2.6821e-04
"""
with open(file_name, 'r') as f:
f.next()
for line in f:
l = line.split()
chrom_str = l[0]
chrom = int(chrom_str[6:])
pos = int(l[1])
rs_id = l[2].strip()
nt1 = l[3].strip()
nt2 = l[4].strip()
nts = [nt1, nt2]
raw_beta = float(l[5])
upd_pval_beta = float(l[8])
if raw_beta != 0:
rs_id_map[rs_id] = {'chrom':chrom, 'pos':pos, 'nts':nts, 'raw_beta':raw_beta,
'upd_pval_beta':upd_pval_beta}
non_zero_chromosomes.add(chrom)
return rs_id_map
def calc_risk_scores(bed_file, rs_id_map, phen_map, out_file=None, split_by_chrom=False, adjust_for_sex=False,
adjust_for_covariates=False, adjust_for_pcs=False):
print 'Parsing PLINK bed file: %s' % bed_file
num_individs = len(phen_map)
assert num_individs > 0, 'No individuals found. Problems parsing the phenotype file?'
if split_by_chrom:
raw_effects_prs = sp.zeros(num_individs)
pval_derived_effects_prs = sp.zeros(num_individs)
for i in range(1, 23):
if i in non_zero_chromosomes:
genotype_file = bed_file + '_%i_keep' % i
if os.path.isfile(genotype_file + '.bed'):
print 'Working on chromosome %d' % i
prs_dict = get_prs(genotype_file, rs_id_map, phen_map)
raw_effects_prs += prs_dict['raw_effects_prs']
pval_derived_effects_prs += prs_dict['pval_derived_effects_prs']
# raw_eff_r2 = (sp.corrcoef(raw_effects_prs, prs_dict['true_phens'])[0,1])**2
# pval_eff_r2 = (sp.corrcoef(pval_derived_effects_prs, prs_dict['true_phens'])[0,1])**2
# print 'Overall raw effects PRS r2: %0.4f'%raw_eff_r2
# print 'Overall weigted effects PRS r2: %0.4f'%pval_eff_r2
else:
print 'Skipping chromosome'
else:
prs_dict = get_prs(bed_file, rs_id_map, phen_map)
raw_effects_prs = prs_dict['raw_effects_prs']
pval_derived_effects_prs = prs_dict['pval_derived_effects_prs']
true_phens = prs_dict['true_phens']
# Report prediction accuracy
raw_eff_corr = sp.corrcoef(raw_effects_prs, prs_dict['true_phens'])[0, 1]
raw_eff_r2 = raw_eff_corr ** 2
pval_eff_corr = sp.corrcoef(pval_derived_effects_prs, prs_dict['true_phens'])[0, 1]
pval_eff_r2 = pval_eff_corr ** 2
print 'Final raw effects PRS correlation: %0.4f' % raw_eff_corr
print 'Final raw effects PRS r2: %0.4f' % raw_eff_r2
print 'Final weighted effects PRS correlation: %0.4f' % pval_eff_corr
print 'Final weighted effects PRS r2: %0.4f' % pval_eff_r2
res_dict = {'pred_r2':pval_eff_r2}
raw_effects_prs.shape = (len(raw_effects_prs), 1)
pval_derived_effects_prs.shape = (len(pval_derived_effects_prs), 1)
true_phens = sp.array(true_phens)
true_phens.shape = (len(true_phens), 1)
# Store covariate weights, slope, etc.
weights_dict = {}
# Store Adjusted predictions
adj_pred_dict = {}
# Direct effect
Xs = sp.hstack([pval_derived_effects_prs, sp.ones((len(true_phens), 1))])
(betas, rss00, r, s) = linalg.lstsq(sp.ones((len(true_phens), 1)), true_phens)
(betas, rss, r, s) = linalg.lstsq(Xs, true_phens)
pred_r2 = 1 - rss / rss00
# print 'Fitted effects (betas) for PRS, and intercept on true phenotype:',betas
weights_dict['unadjusted'] = {'Intercept':betas[1][0], 'ldpred_prs_effect':betas[0][0]}
# print pred_r2
# Adjust for sex
if adjust_for_sex and 'sex' in prs_dict and len(prs_dict['sex']) > 0:
sex = sp.array(prs_dict['sex'])
sex.shape = (len(sex), 1)
(betas, rss0, r, s) = linalg.lstsq(sp.hstack([sex, sp.ones((len(true_phens), 1))]), true_phens)
(betas, rss, r, s) = linalg.lstsq(sp.hstack([raw_effects_prs, sex, sp.ones((len(true_phens), 1))]), true_phens)
Xs = sp.hstack([pval_derived_effects_prs, sex, sp.ones((len(true_phens), 1))])
(betas, rss_pd, r, s) = linalg.lstsq(Xs, true_phens)
weights_dict['sex_adj'] = {'Intercept':betas[2][0], 'ldpred_prs_effect':betas[0][0], 'sex':betas[1][0]}
print 'Fitted effects (betas) for PRS, sex, and intercept on true phenotype:', betas
adj_pred_dict['sex_adj'] = sp.dot(Xs, betas)
pred_r2 = 1 - rss / rss0
print 'Sex adjusted prediction accuracy (R^2) for the whole genome PRS with raw effects was: %0.4f (%0.6f)' % (pred_r2, (1 - pred_r2) / sp.sqrt(num_individs))
pred_r2 = 1 - rss / rss00
print 'Sex adjusted prediction + Sex (R^2) for the whole genome PRS with raw effects was: %0.4f (%0.6f)' % (pred_r2, (1 - pred_r2) / sp.sqrt(num_individs))
pred_r2 = 1 - rss_pd / rss0
print 'Sex adjusted prediction accuracy (R^2) for the whole genome PRS with weighted effects was: %0.4f (%0.6f)' % (pred_r2, (1 - pred_r2) / sp.sqrt(num_individs))
res_dict['PC_adj_pred_r2'] = pred_r2
pred_r2 = 1 - rss_pd / rss00
print 'Sex adjusted prediction + Sex (R^2) for the whole genome PRS with weighted effects was: %0.4f (%0.6f)' % (pred_r2, (1 - pred_r2) / sp.sqrt(num_individs))
res_dict['PC_adj_pred_r2+PC'] = pred_r2
# Adjust for PCs
if adjust_for_pcs and 'pcs' in prs_dict and len(prs_dict['pcs']) > 0:
pcs = prs_dict['pcs']
(betas, rss0, r, s) = linalg.lstsq(sp.hstack([pcs, sp.ones((len(true_phens), 1))]), true_phens)
(betas, rss, r, s) = linalg.lstsq(sp.hstack([raw_effects_prs, pcs, sp.ones((len(true_phens), 1))]), true_phens)
Xs = sp.hstack([pval_derived_effects_prs, sp.ones((len(true_phens), 1)), pcs])
(betas, rss_pd, r, s) = linalg.lstsq(Xs, true_phens)
weights_dict['pc_adj'] = {'Intercept':betas[1][0], 'ldpred_prs_effect':betas[0][0], 'pcs':betas[2][0]}
adj_pred_dict['pc_adj'] = sp.dot(Xs, betas)
pred_r2 = 1 - rss / rss0
print 'PC adjusted prediction accuracy (R^2) for the whole genome PRS with raw effects was: %0.4f (%0.6f)' % (pred_r2, (1 - pred_r2) / sp.sqrt(num_individs))
pred_r2 = 1 - rss / rss00
print 'PC adjusted prediction + PCs (R^2) for the whole genome PRS with raw effects was: %0.4f (%0.6f)' % (pred_r2, (1 - pred_r2) / sp.sqrt(num_individs))
pred_r2 = 1 - rss_pd / rss0
print 'PC adjusted prediction accuracy (R^2) for the whole genome PRS with weighted effects was: %0.4f (%0.6f)' % (pred_r2, (1 - pred_r2) / sp.sqrt(num_individs))
res_dict['PC_adj_pred_r2'] = pred_r2
pred_r2 = 1 - rss_pd / rss00
print 'PC adjusted prediction + PCs (R^2) for the whole genome PRS with weighted effects was: %0.4f (%0.6f)' % (pred_r2, (1 - pred_r2) / sp.sqrt(num_individs))
res_dict['PC_adj_pred_r2+PC'] = pred_r2
# Adjust for both PCs and Sex
if adjust_for_sex and 'sex' in prs_dict and len(prs_dict['sex']) > 0:
sex = sp.array(prs_dict['sex'])
sex.shape = (len(sex), 1)
(betas, rss0, r, s) = linalg.lstsq(sp.hstack([sex, pcs, sp.ones((len(true_phens), 1))]), true_phens)
(betas, rss, r, s) = linalg.lstsq(sp.hstack([raw_effects_prs, sex, pcs, sp.ones((len(true_phens), 1))]), true_phens)
Xs = sp.hstack([pval_derived_effects_prs, sex, sp.ones((len(true_phens), 1)), pcs])
(betas, rss_pd, r, s) = linalg.lstsq(Xs, true_phens)
weights_dict['sex_pc_adj'] = {'Intercept':betas[2][0], 'ldpred_prs_effect':betas[0][0], 'sex':betas[1][0], 'pcs':betas[3][0]}
adj_pred_dict['sex_pc_adj'] = sp.dot(Xs, betas)
pred_r2 = 1 - rss / rss0
print 'PCs+Sex adjusted prediction accuracy (R^2) for the whole genome PRS with raw effects was: %0.4f (%0.6f)' % (pred_r2, (1 - pred_r2) / sp.sqrt(num_individs))
pred_r2 = 1 - rss / rss00
print 'PCs+Sex adjusted prediction and PCs+Sex (R^2) for the whole genome PRS with raw effects was: %0.4f (%0.6f)' % (pred_r2, (1 - pred_r2) / sp.sqrt(num_individs))
pred_r2 = 1 - rss_pd / rss0
print 'PCs+Sex adjusted prediction accuracy (R^2) for the whole genome PRS with weighted effects was: %0.4f (%0.6f)' % (pred_r2, (1 - pred_r2) / sp.sqrt(num_individs))
res_dict['PC_Sex_adj_pred_r2'] = pred_r2
pred_r2 = 1 - rss_pd / rss00
print 'PCs+Sex adjusted prediction and PCs+Sex (R^2) for the whole genome PRS with weighted effects was: %0.4f (%0.6f)' % (pred_r2, (1 - pred_r2) / sp.sqrt(num_individs))
res_dict['PC_Sex_adj_pred_r2+PC_Sex'] = pred_r2
# Adjust for covariates
if adjust_for_covariates and 'covariates' in prs_dict and len(prs_dict['covariates']) > 0:
covariates = prs_dict['covariates']
(betas, rss0, r, s) = linalg.lstsq(sp.hstack([covariates, sp.ones((len(true_phens), 1))]), true_phens)
(betas, rss, r, s) = linalg.lstsq(sp.hstack([raw_effects_prs, covariates, sp.ones((len(true_phens), 1))]), true_phens)
Xs = sp.hstack([pval_derived_effects_prs, covariates, sp.ones((len(true_phens), 1))])
(betas, rss_pd, r, s) = linalg.lstsq(Xs, true_phens)
adj_pred_dict['cov_adj'] = sp.dot(Xs, betas)
pred_r2 = 1 - rss / rss0
print 'Cov adjusted prediction accuracy (R^2) for the whole genome PRS with raw effects was: %0.4f (%0.6f)' % (pred_r2, (1 - pred_r2) / sp.sqrt(num_individs))
pred_r2 = 1 - rss / rss00
print 'Cov adjusted prediction + Cov (R^2) for the whole genome PRS with raw effects was: %0.4f (%0.6f)' % (pred_r2, (1 - pred_r2) / sp.sqrt(num_individs))
pred_r2 = 1 - rss_pd / rss0
print 'Cov adjusted prediction accuracy (R^2) for the whole genome PRS with weighted effects was: %0.4f (%0.6f)' % (pred_r2, (1 - pred_r2) / sp.sqrt(num_individs))
res_dict['Cov_adj_pred_r2'] = pred_r2
pred_r2 = 1 - rss_pd / rss00
print 'Cov adjusted prediction + Cov (R^2) for the whole genome PRS with weighted effects was: %0.4f (%0.6f)' % (pred_r2, (1 - pred_r2) / sp.sqrt(num_individs))
res_dict['Cov_adj_pred_r2+Cov'] = pred_r2
if adjust_for_pcs and 'pcs' in prs_dict and len(prs_dict['pcs']) and 'sex' in prs_dict and len(prs_dict['sex']) > 0:
pcs = prs_dict['pcs']
sex = sp.array(prs_dict['sex'])
sex.shape = (len(sex), 1)
(betas, rss0, r, s) = linalg.lstsq(sp.hstack([covariates, sex, pcs, sp.ones((len(true_phens), 1))]), true_phens)
(betas, rss, r, s) = linalg.lstsq(sp.hstack([raw_effects_prs, covariates, sex, pcs, sp.ones((len(true_phens), 1))]), true_phens)
Xs = sp.hstack([pval_derived_effects_prs, covariates, sex, pcs, sp.ones((len(true_phens), 1))])
(betas, rss_pd, r, s) = linalg.lstsq(Xs, true_phens)
adj_pred_dict['cov_sex_pc_adj'] = sp.dot(Xs, betas)
pred_r2 = 1 - rss / rss0
print 'Cov+PCs+Sex adjusted prediction accuracy (R^2) for the whole genome PRS with raw effects was: %0.4f (%0.6f)' % (pred_r2, (1 - pred_r2) / sp.sqrt(num_individs))
pred_r2 = 1 - rss / rss00
print 'Cov+PCs+Sex adjusted prediction and PCs+Sex (R^2) for the whole genome PRS with raw effects was: %0.4f (%0.6f)' % (pred_r2, (1 - pred_r2) / sp.sqrt(num_individs))
pred_r2 = 1 - rss_pd / rss0
print 'Cov+PCs+Sex adjusted prediction accuracy (R^2) for the whole genome PRS with weighted effects was: %0.4f (%0.6f)' % (pred_r2, (1 - pred_r2) / sp.sqrt(num_individs))
res_dict['Cov_PC_Sex_adj_pred_r2'] = pred_r2
pred_r2 = 1 - rss_pd / rss00
print 'Cov+PCs+Sex adjusted prediction and PCs+Sex (R^2) for the whole genome PRS with weighted effects was: %0.4f (%0.6f)' % (pred_r2, (1 - pred_r2) / sp.sqrt(num_individs))
res_dict['Cov_PC_Sex_adj_pred_r2+Cov_PC_Sex'] = pred_r2
# print sp.corrcoef(true_phens.T,adj_pred_dict['cov_sex_pc_adj'].T)**2
# Now calibration
y_norm = (true_phens - sp.mean(true_phens)) / sp.std(true_phens)
denominator = sp.dot(raw_effects_prs.T, raw_effects_prs)
numerator = sp.dot(raw_effects_prs.T, y_norm)
regression_slope = (numerator / denominator)[0][0]
print 'The slope for predictions with raw effects is:', regression_slope
denominator = sp.dot(pval_derived_effects_prs.T, pval_derived_effects_prs)
numerator = sp.dot(pval_derived_effects_prs.T, y_norm)
regression_slope = (numerator / denominator)[0][0]
print 'The slope for predictions with weighted effects is:', regression_slope
# print sp.corrcoef(prs_dict['raw_effects_prs'], prs_dict['true_phens'])[0,1]
# print sp.corrcoef(prs_dict['pval_derived_effects_prs'], prs_dict['true_phens'])[0,1]
num_individs = len(prs_dict['pval_derived_effects_prs'])
# Write PRS out to file.
if out_file != None:
with open(out_file, 'w') as f:
out_str = 'IID, true_phens, raw_effects_prs, pval_derived_effects_prs'
if 'sex' in prs_dict:
out_str = out_str + ', sex'
if 'pcs' in prs_dict:
pcs_str = ', '.join(['PC%d' % (1 + pc_i) for pc_i in range(len(prs_dict['pcs'][0]))])
out_str = out_str + ', ' + pcs_str
out_str += '\n'
f.write(out_str)
for i in range(num_individs):
out_str = '%s, %0.6e, %0.6e, %0.6e, ' % (prs_dict['iids'][i], prs_dict['true_phens'][i], raw_effects_prs[i],
pval_derived_effects_prs[i])
if 'sex' in prs_dict:
out_str = out_str + '%d, ' % prs_dict['sex'][i]
if 'pcs' in prs_dict:
pcs_str = ', '.join(map(str, prs_dict['pcs'][i]))
out_str = out_str + pcs_str
out_str += '\n'
f.write(out_str)
if len(adj_pred_dict.keys()) > 0:
with open(out_file + '.adj', 'w') as f:
adj_prs_labels = adj_pred_dict.keys()
out_str = 'IID, true_phens, raw_effects_prs, pval_derived_effects_prs, ' + ', '.join(adj_prs_labels)
out_str += '\n'
f.write(out_str)
for i in range(num_individs):
out_str = '%s, %0.6e, %0.6e, %0.6e' % (prs_dict['iids'][i], prs_dict['true_phens'][i], raw_effects_prs[i],
pval_derived_effects_prs[i])
for adj_prs in adj_prs_labels:
out_str += ', %0.4f' % adj_pred_dict[adj_prs][i]
out_str += '\n'
f.write(out_str)
if weights_dict != None:
oh5f = h5py.File(out_file + '.weights.hdf5', 'w')
for k1 in weights_dict.keys():
kg = oh5f.create_group(k1)
for k2 in weights_dict[k1]:
kg.create_dataset(k2, data=sp.array(weights_dict[k1][k2]))
oh5f.close()
return res_dict
def main():
p_dict = parse_parameters()
non_zero_chromosomes = set()
# Parse phenotypes
if p_dict['pf'] is None:
if p_dict['vgf'] is not None:
phen_map = parse_phen_file(p_dict['vgf'] + '.fam', 'FAM')
else:
raise Exception('Validation phenotypes were not found.')
else:
phen_map = parse_phen_file(p_dict['pf'], p_dict['pf_format'])
iids = set(phen_map.keys())
if p_dict['cov_file'] != None:
print 'Parsing additional covariates'
with open(p_dict['cov_file'], 'r') as f:
num_missing = 0
for line in f:
l = line.split()
iid = l[0]
if iid in phen_map:
covariates = map(float, l[1:])
phen_map[iid]['covariates'] = covariates
else:
num_missing += 1
if num_missing > 0:
print 'Unable to find %d iids in phen file!' % num_missing
if p_dict['pcs_file']:
print 'Parsing PCs'
with open(p_dict['pcs_file'], 'r') as f:
num_missing = 0
for line in f:
l = line.split()
iid = l[1]
if iid in phen_map:
pcs = map(float, l[2:])
phen_map[iid]['pcs'] = pcs
else:
num_missing += 1
if num_missing > 0:
print 'Unable to find %d iids in phen file!' % num_missing
num_individs = len(phen_map)
assert num_individs > 0, 'No phenotypes were found!'
res_dict = {}
if p_dict['res_format'] == 'LDPRED':
weights_file = '%s_LDpred-inf.txt' % (p_dict['rf'])
if os.path.isfile(weights_file):
print ''
print 'Calculating LDpred-inf risk scores'
rs_id_map = parse_ldpred_res(weights_file)
out_file = '%s_LDpred-inf.txt' % (p_dict['out'])
calc_risk_scores(p_dict['vgf'], rs_id_map, phen_map, out_file=out_file, split_by_chrom=p_dict['split_by_chrom'],
adjust_for_sex=p_dict['adjust_for_sex'], adjust_for_covariates=p_dict['adjust_for_covariates'],
adjust_for_pcs=p_dict['adjust_for_pcs'])
for p in p_dict['PS']:
weights_file = '%s_LDpred_p%0.4e.txt' % (p_dict['rf'], p)
if os.path.isfile(weights_file):
print ''
print 'Calculating LDpred risk scores using p=%0.3e' % p
rs_id_map = parse_ldpred_res(weights_file)
out_file = '%s_LDpred_p%0.4e.txt' % (p_dict['out'], p)
method_str = 'LDpred_p%0.4e' % (p)
res_dict[method_str] = calc_risk_scores(p_dict['vgf'], rs_id_map, phen_map, out_file=out_file,
split_by_chrom=p_dict['split_by_chrom'], adjust_for_sex=p_dict['adjust_for_sex'],
adjust_for_covariates=p_dict['adjust_for_covariates'],
adjust_for_pcs=p_dict['adjust_for_pcs'])
# Plot results?
elif p_dict['res_format'] == 'P+T':
weights_file = '%s_all_snps.txt' % (p_dict['rf'])
if os.path.isfile(weights_file):
print ''
print 'Calculating risk scores using all SNPs'
rs_id_map = parse_ldpred_res(weights_file)
out_file = '%s_all_snps.txt' % (p_dict['out'])
res_dict['all_snps'] = calc_risk_scores(p_dict['vgf'], rs_id_map, phen_map, out_file=out_file,
split_by_chrom=p_dict['split_by_chrom'], adjust_for_sex=p_dict['adjust_for_sex'],
adjust_for_covariates=p_dict['adjust_for_covariates'],
adjust_for_pcs=p_dict['adjust_for_pcs'])
for p_thres in p_dict['TS']:
weights_file = '%s_P+T_p%0.4e.txt' % (p_dict['rf'], p_thres)
print weights_file
if os.path.isfile(weights_file):
print ''
print 'Calculating P+T risk scores using p-value threshold of %0.3e' % p_thres
rs_id_map = parse_pt_res(weights_file)
out_file = '%s_P+T_p%0.4e.txt' % (p_dict['out'], p_thres)
method_str = 'P+T_p%0.4e' % (p_thres)
res_dict[method_str] = calc_risk_scores(p_dict['vgf'], rs_id_map, phen_map, out_file=out_file,
split_by_chrom=p_dict['split_by_chrom'], adjust_for_sex=p_dict['adjust_for_sex'],
adjust_for_covariates=p_dict['adjust_for_covariates'],
adjust_for_pcs=p_dict['adjust_for_pcs'])
# Plot results?
else:
raise NotImplementedError('Results file format missing or unknown: %s' % p_dict['res_format'])
if __name__ == '__main__':
main()
|
"""TOTTO table processsing.
Processing the data format in TOTTO into the HybridQA one
"""
import copy
import hashlib
import json
import multiprocessing
from multiprocessing import Pool
import re
import pprint
import nltk
def clean_cell_text(string):
"""Strip off the weird tokens."""
string = string.replace('"', '')
string = string.rstrip('^')
string = string.replace('–', '-')
string = string.replace('( ', '(')
string = string.replace(' )', ')')
string = string.replace('"', '')
string = string.replace(u'\u00a0', u' ')
string = string.replace('\n', ' ')
string = string.rstrip('^')
string = string.replace('\u200e', '')
string = string.replace('\ufeff', '')
string = string.replace(u'\u2009', u' ')
string = string.replace(u'\u2010', u' - ')
string = string.replace(u'\u2011', u' - ')
string = string.replace(u'\u2012', u' - ')
string = string.replace(u'\u2013', u' - ')
string = string.replace(u'\u2014', u' - ')
string = string.replace(u'\u2015', u' - ')
string = string.replace(u'\u2018', u'')
string = string.replace(u'\u2019', u'')
string = string.replace(u'\u201c', u'')
string = string.replace(u'\u201d', u'')
string = re.sub(r' +', ' ', string)
string = string.strip()
string = nltk_tokenize(string)
return string
def hash_string(string):
sha = hashlib.sha256()
sha.update(string.encode())
return sha.hexdigest()[:16]
def nltk_tokenize(string):
toks = nltk.word_tokenize(string)
return ' '.join(toks)
def transform(original_table, debug=False):
start_index = 0
while start_index < len(original_table):
if len(original_table[start_index]
) <= 1 or not original_table[start_index][0]['is_header']:
start_index += 1
else:
break
if start_index >= len(original_table):
raise NotImplementedError()
if debug:
print('starting from {}th row'.format(start_index))
reserved = {}
headers = []
total_columns = 1000
while start_index < len(original_table):
row = copy.copy(original_table[start_index])
tmp = []
if headers and (not reserved):
break
for j in range(0, total_columns):
if j < len(tmp):
continue
if j in reserved:
tmp.append(reserved[j][0])
reserved[j][1] -= 1
if reserved[j][1] == 0:
del reserved[j]
else:
if (not row) and (not headers):
# The first row needs to determine the maximum column number
total_columns = len(tmp)
break
else:
if not row:
raise NotImplementedError()
cell = row.pop(0)
value = clean_cell_text(cell['value'])
if cell['is_header']:
tmp.extend([value] * cell['column_span'])
if cell['row_span'] > 1 and cell['row_span'] < 10:
reserved[j] = [value, cell['row_span'] - 1]
if cell['column_span'] > 1:
for k in range(1, cell['column_span']):
reserved[j + k] = [value, cell['row_span'] - 1]
if not headers:
headers.extend(tmp)
else:
if len(headers) == len(tmp):
for i in range(len(headers)):
headers[i] += ' - ' + tmp[i]
start_index += 1
if debug:
print('Finished with headers: {}'.format(headers))
if start_index >= len(original_table):
raise NotImplementedError()
total_columns = len(headers)
rows = []
reserved = {}
mapping = {}
for i in range(start_index, len(original_table)):
row = copy.copy(original_table[i])
tmp = []
counter = 0
for j in range(total_columns):
if j < len(tmp):
continue
if j in reserved:
tmp.append(reserved[j][0])
reserved[j][1] -= 1
if reserved[j][1] == 0:
del reserved[j]
else:
if not row:
raise NotImplementedError()
mapping[(i, counter)] = (len(rows), j)
cell = row.pop(0)
counter += 1
value = clean_cell_text(cell['value'])
tmp.extend([value] * cell['column_span'])
if cell['row_span'] > 1 and cell['row_span'] < 10:
reserved[j] = [value, cell['row_span'] - 1]
if cell['column_span'] > 1:
for k in range(1, cell['column_span']):
reserved[j + k] = [value, cell['row_span'] - 1]
if row:
raise NotImplementedError()
rows.append(tmp)
least_row = min([len(_) for _ in rows])
max_row = max([len(_) for _ in rows])
if least_row != max_row or least_row != total_columns:
raise NotImplementedError()
return headers, rows, mapping
def get_table_sent(entry):
try:
header, data, mapping = transform(entry['table'])
table_idx = entry['table_page_title'].replace(' ', '_') + '_{}'.format(
entry['example_id'])
table = (table_idx, {
'header': header,
'data': data,
'url': entry['table_webpage_url'],
'title': entry['table_page_title'],
'section_title': entry['table_section_title'],
'section_text': entry['table_section_text'],
'intro': ''
})
positive_cell = []
for cell in entry['highlighted_cells']:
if tuple(cell) in mapping:
index = mapping[tuple(cell)]
positive_cell.append([data[index[0]][index[1]], index, '', 'table'])
questions = []
if positive_cell:
for example in entry['sentence_annotations']:
sentence = clean_cell_text(example['final_sentence'])
hash_code = hash_string(sentence)
questions.append({
'table_id': table_idx,
'question': sentence,
'answer-text': 'none',
'answer-node': positive_cell,
'version': 'TOTTO',
'question_id': hash_code,
'where': 'table'
})
return table, questions
except NotImplementedError:
return None, None
if __name__ == '__main__':
filepath = 'totto_data/totto_dev_data.jsonl'
pair = []
with open(filepath, 'r') as f:
for line in f:
pair.append(json.loads(line))
filepath = 'totto_data/totto_train_data.jsonl'
with open(filepath, 'r') as f:
for line in f:
pair.append(json.loads(line))
print('Finish loading local data')
debug = False
if not debug:
output_dict = {}
sentences = []
cpu_cores = multiprocessing.cpu_count()
print('using {} cores'.format(cpu_cores))
pool = Pool(cpu_cores)
results = pool.map(get_table_sent, pair)
print('Finish the running')
processed_ids = set()
error = 0
for r in results:
if r[0] and r[1]:
output_dict[r[0][0]] = r[0][1]
for question in r[1]:
if question['question_id'] not in processed_ids:
processed_ids.add(question['question_id'])
sentences.append(question)
else:
print("find duplicate")
else:
error += 1
print('failing conversion rate = {}'.format(error / len(pair)))
print('successful tables = {} and questions = {}'.format(len(output_dict), len(sentences)))
with open('aws-files/totto_tables.json', 'w') as f:
json.dump(output_dict, f, indent=2)
with open('aws-files/totto_train.json', 'w') as f:
json.dump(sentences, f, indent=2)
else:
for entry in pair:
if 'Steve Barnes' in entry['table_page_title']:
table, sents = get_table_sent(entry)
pprint.pprint(entry)
pprint.pprint(table) |
import platform
import schedule
from time import sleep
from queue import Queue
from sys import version
from threading import Thread
from os import access, R_OK, getenv
from distro import linux_distribution
from os.path import isdir, abspath, dirname, join
from argparse import ArgumentParser, RawTextHelpFormatter
from logging import getLogger, StreamHandler, Formatter, DEBUG
from varken import structures # Needed to check version of python
from varken.ombi import OmbiAPI
from varken.unifi import UniFiAPI
from varken import VERSION, BRANCH
from varken.sonarr import SonarrAPI
from varken.radarr import RadarrAPI
from varken.iniparser import INIParser
from varken.dbmanager import DBManager
from varken.helpers import GeoIPHandler
from varken.tautulli import TautulliAPI
from varken.sickchill import SickChillAPI
from varken.varkenlogger import VarkenLogger
PLATFORM_LINUX_DISTRO = ' '.join(x for x in linux_distribution() if x)
def thread():
while schedule.jobs:
job = QUEUE.get()
a = job()
print(a)
if a is not None:
schedule.clear(a)
QUEUE.task_done()
if __name__ == "__main__":
parser = ArgumentParser(prog='varken',
description='Command-line utility to aggregate data from the plex ecosystem into InfluxDB',
formatter_class=RawTextHelpFormatter)
parser.add_argument("-d", "--data-folder", help='Define an alternate data folder location')
parser.add_argument("-D", "--debug", action='store_true', help='Use to enable DEBUG logging')
opts = parser.parse_args()
DATA_FOLDER = abspath(join(dirname(__file__), 'data'))
templogger = getLogger('temp')
templogger.setLevel(DEBUG)
tempch = StreamHandler()
tempformatter = Formatter('%(asctime)s : %(levelname)s : %(module)s : %(message)s', '%Y-%m-%d %H:%M:%S')
tempch.setFormatter(tempformatter)
templogger.addHandler(tempch)
if opts.data_folder:
ARG_FOLDER = opts.data_folder
if isdir(ARG_FOLDER):
DATA_FOLDER = ARG_FOLDER
if not access(DATA_FOLDER, R_OK):
templogger.error("Read permission error for %s", DATA_FOLDER)
exit(1)
else:
templogger.error("%s does not exist", ARG_FOLDER)
exit(1)
# Set Debug to True if DEBUG env is set
enable_opts = ['True', 'true', 'yes']
debug_opts = ['debug', 'Debug', 'DEBUG']
if not opts.debug:
opts.debug = True if any([getenv(string, False) for true in enable_opts
for string in debug_opts if getenv(string, False) == true]) else False
# Initiate the logger
vl = VarkenLogger(data_folder=DATA_FOLDER, debug=opts.debug)
vl.logger.info('Starting Varken...')
vl.logger.info('Data folder is "%s"', DATA_FOLDER)
vl.logger.info(u"%s %s (%s%s)", platform.system(), platform.release(), platform.version(),
' - ' + PLATFORM_LINUX_DISTRO if PLATFORM_LINUX_DISTRO else '')
vl.logger.info(u"Python %s", version)
vl.logger.info("Varken v%s-%s", VERSION, BRANCH)
CONFIG = INIParser(DATA_FOLDER)
DBMANAGER = DBManager(CONFIG.influx_server)
QUEUE = Queue()
if CONFIG.sonarr_enabled:
for server in CONFIG.sonarr_servers:
SONARR = SonarrAPI(server, DBMANAGER)
if server.queue:
at_time = schedule.every(server.queue_run_seconds).seconds
at_time.do(QUEUE.put, SONARR.get_queue).tag(f"sonarr-{server.id}-get_queue")
if server.missing_days > 0:
at_time = schedule.every(server.missing_days_run_seconds).seconds
at_time.do(QUEUE.put, SONARR.get_missing).tag(f"sonarr-{server.id}-get_missing")
if server.future_days > 0:
at_time = schedule.every(server.future_days_run_seconds).seconds
at_time.do(QUEUE.put, SONARR.get_future).tag(f"sonarr-{server.id}-get_future")
if CONFIG.tautulli_enabled:
GEOIPHANDLER = GeoIPHandler(DATA_FOLDER)
schedule.every(12).to(24).hours.do(QUEUE.put, GEOIPHANDLER.update)
for server in CONFIG.tautulli_servers:
TAUTULLI = TautulliAPI(server, DBMANAGER, GEOIPHANDLER)
if server.get_activity:
at_time = schedule.every(server.get_activity_run_seconds).seconds
at_time.do(QUEUE.put, TAUTULLI.get_activity).tag(f"tautulli-{server.id}-get_activity")
if server.get_stats:
at_time = schedule.every(server.get_stats_run_seconds).seconds
at_time.do(QUEUE.put, TAUTULLI.get_stats).tag(f"tautulli-{server.id}-get_stats")
if CONFIG.radarr_enabled:
for server in CONFIG.radarr_servers:
RADARR = RadarrAPI(server, DBMANAGER)
if server.get_missing:
at_time = schedule.every(server.get_missing_run_seconds).seconds
at_time.do(QUEUE.put, RADARR.get_missing).tag(f"radarr-{server.id}-get_missing")
if server.queue:
at_time = schedule.every(server.queue_run_seconds).seconds
at_time.do(QUEUE.put, RADARR.get_queue).tag(f"radarr-{server.id}-get_queue")
if CONFIG.ombi_enabled:
for server in CONFIG.ombi_servers:
OMBI = OmbiAPI(server, DBMANAGER)
if server.request_type_counts:
at_time = schedule.every(server.request_type_run_seconds).seconds
at_time.do(QUEUE.put, OMBI.get_request_counts).tag(f"ombi-{server.id}-get_request_counts")
if server.request_total_counts:
at_time = schedule.every(server.request_total_run_seconds).seconds
at_time.do(QUEUE.put, OMBI.get_all_requests).tag(f"ombi-{server.id}-get_all_requests")
if server.issue_status_counts:
at_time = schedule.every(server.issue_status_run_seconds).seconds
at_time.do(QUEUE.put, OMBI.get_issue_counts).tag(f"ombi-{server.id}-get_issue_counts")
if CONFIG.sickchill_enabled:
for server in CONFIG.sickchill_servers:
SICKCHILL = SickChillAPI(server, DBMANAGER)
if server.get_missing:
at_time = schedule.every(server.get_missing_run_seconds).seconds
at_time.do(QUEUE.put, SICKCHILL.get_missing).tag(f"sickchill-{server.id}-get_missing")
if CONFIG.unifi_enabled:
for server in CONFIG.unifi_servers:
UNIFI = UniFiAPI(server, DBMANAGER)
at_time = schedule.every(server.get_usg_stats_run_seconds).seconds
at_time.do(QUEUE.put, UNIFI.get_usg_stats).tag(f"unifi-{server.id}-get_usg_stats")
# Run all on startup
SERVICES_ENABLED = [CONFIG.ombi_enabled, CONFIG.radarr_enabled, CONFIG.tautulli_enabled, CONFIG.unifi_enabled,
CONFIG.sonarr_enabled, CONFIG.sickchill_enabled]
if not [enabled for enabled in SERVICES_ENABLED if enabled]:
vl.logger.error("All services disabled. Exiting")
exit(1)
WORKER = Thread(target=thread)
WORKER.start()
schedule.run_all()
while schedule.jobs:
schedule.run_pending()
sleep(1)
|
from TestHelperSuperClass import testHelperSuperClass
from githubAPICalls import githubAPICallsClass
class mockAppParamsClass():
APIAPP_GITHUBREPOLOCATION = 'https://api.github.com/repos/rmetcalf9/dockPondSampleEBOs'
class mockAppObjClass():
appParams = mockAppParamsClass()
class test_appCassandraDatastoreClass(testHelperSuperClass):
def testListEBOs(self):
pass #removed due to API call limit
#api = githubAPICallsClass(mockAppObjClass)
#EBOList = api.getEBOList()
#if not 'Animals' in EBOList:
# self.assertFalse(True, msg='Could not find EBO Animals in github repo')
|
from __future__ import absolute_import
import math
import chainer
import numpy as np
from ..links import CLink
from ..links import BinaryLinear
from ..links import BatchNormalization
from ..links import BST
from ..utils import binary_util as bu
class LinearBNBST(chainer.Chain, CLink):
def __init__(self, out_channels):
super(LinearBNBST, self).__init__()
self.cname = "l_b_linear_bn_bst"
with self.init_scope():
self.bl = BinaryLinear(out_channels)
self.bn = BatchNormalization(out_channels)
self.bst = BST()
def __call__(self, h):
return self.bst(self.bn(self.bl(h)))
def generate_c(self, link_idx, inp_shape):
name = self.cname + str(link_idx)
text = []
# BinaryLinear bl
l = self.bl
lName = l.name
lname = name + '_' + lName
for p in l.params():
pname = p.name
if pname == 'W':
text += [bu.np_to_uint8C(bu.binarize_real(p.data.T),
lname + '_' + pname, 'col_major', pad='1')]
num_classes = p.data.shape[0]
fc_size = p.data.shape[1]
elif pname == 'b':
text += [bu.np_to_floatC(p.data, lname +
'_' + pname, 'row_major')]
# BatchNormalization bn
l = self.bn
lName = l.name
lname = name + '_' + lName
for p in l.params():
pname = p.name
if pname == 'gamma':
text += [bu.np_to_floatC(p.data, lname +
'_' + pname, 'row_major')]
elif pname == 'beta':
text += [bu.np_to_floatC(p.data, lname +
'_' + pname, 'row_major')]
for p in l._persistent:
pname = p
persistent = l.__dict__[p]
if pname == 'avg_mean':
text += [bu.np_to_floatC(persistent,
lname + '_mean', 'row_major')]
elif pname == 'avg_var':
text += [bu.np_to_floatC(np.sqrt(persistent,
dtype=persistent.dtype), lname + '_std', 'row_major')]
text = "\n".join(text) + '\n'
m = 1
n = fc_size
k = num_classes
ftext = "void {name}(float* input, uint8_t* output){{\n"
ftext += " blinear_layer(input, {name}_bl_W, output, {name}_bl_b, {name}_bn_gamma, {name}_bn_beta, {name}_bn_mean, {name}_bn_std, {m}, {n}, {k}); \n}}\n\n"
ftext = ftext.format(name=name, m=m, n=n, k=k)
text += ftext
return text
def param_mem(self):
mem = 0.
for p in self.bl.params():
if p.name == 'W':
m, w = p.data.shape
mem += m * math.ceil(w / 8.)
#Bias + BN
mem += 5 * m * 32
return mem
def temp_mem(self, inp_shape):
m = inp_shape[0]
w = np.prod(inp_shape[1:])
res_w = math.ceil(w / 8.)
return m * res_w
|
import torch
import torch.nn as nn
import numpy as np
import random
from tqdm import tqdm
import os
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, confusion_matrix
from dataloader import DatasetLoader
from model_arch import Model
from config import *
def train():
'''
This is wrapper method for training the model.
'''
# Dataset Loaders
train_loader = DatasetLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
print("### Training Dataset loaded from ", train_dataset)
test_loader = DatasetLoader(test_dataset, batch_size=batch_size, shuffle=False, drop_last=True)
print("### Testing Dataset loaded from ", test_dataset)
# Model Initialization
model = Model()
print("### Model Initialized")
if model_last_state_epoch != 0:
assert model_last_state != '', "Model last state must be given"
model.load_state_dict(torch.load(model_last_state))
print("### Model Loaded from epoch ", model_last_state_epoch)
model.cuda()
# Loss Function Initialization
loss_func = nn.BCELoss()
print("### Loss Function Initialized")
# Optimizer Initialization
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
print("### Optimizer initialized")
# Seed Selection
torch.manual_seed(seed_value)
np.random.seed(seed_value)
random.seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
print("### Seed selection done")
# Training Initialization
print("### Starting Training ...")
for epoch in tqdm(range(model_last_state_epoch, epoch_num)):
model.train()
for i, (image, target) in enumerate(tqdm(train_loader.load_dataset())):
image = image.cuda()
output = model(image)
optimizer.zero_grad()
loss = loss_func(output.squeeze(), target.float().cuda())
loss.backward()
optimizer.step()
if i % 300 == 0:
print("### Training Loss: ", loss.item())
with open(os.path.join(logs_save_loc, 'train_loss.txt'), 'a') as f:
f.write('Training Loss at Epoch {} Iteration {}: {} \n'.format(epoch, i, loss.item()))
f.close()
torch.save(model.state_dict(), os.path.join(model_save_loc, 'model_{}.pth'.format(epoch)))
print("############################ Evaluation #############################")
model.eval()
actual_target = []
predicted_target = []
with open(os.path.join(logs_save_loc, 'eval_accuracy.txt'), 'a') as f:
for i, (image, target) in enumerate(tqdm(test_loader.load_dataset())):
image = image.cuda()
output = model(image)
output = torch.where(output < 0.15, torch.zeros_like(output), torch.ones_like(output))
actual_target.extend(target.float().cpu().tolist())
predicted_target.extend(output.squeeze().cpu().tolist())
acc = accuracy_score(actual_target, predicted_target, normalize=True)
tn, fp, fn, tp = confusion_matrix(actual_target, predicted_target, labels=[0, 1]).ravel()
apcer = fp/(tn + fp)
bpcer = fn/(fn + tp)
acer = (apcer + bpcer)/2
print("Accuracy: %.4f, TN: %i, FP: %i, FN: %i, TP: %i, APCER: %.4f, BPCER: %.4f, ACER: %.4f" % (acc, tn, fp, fn, tp, apcer, bpcer, acer))
f.write("Epoch : %i \n Accuracy: %.4f, TN: %i, FP: %i, FN: %i, TP: %i, APCER: %.4f, BPCER: %.4f, ACER: %.4f \n" % (epoch, acc, tn, fp, fn, tp, apcer, bpcer, acer))
f.close()
|
""" API endpoint to return all the information necessary to construct the
script chart.
"""
# TODO:
# These changes all need to be made in coordination with frontend changes
# * remove empty lists from response
# * change top-level key from 'mss' to 'examples'
# * remove unnecessary fields from response
# all that's required, I think, is:
# - height, width, top, left
# - maybe that's it (becasue ms_id / ms_slug is already available)?
# - consider pre-composing the url(s), or are these best composed
# on the frontend?
# pylint: disable=import-error
# drf
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
# dash
from scripts.models import Coordinates
@api_view(http_method_names=['GET'])
def get_letters(request):
"""
Query Params:
ms_ids (required): pipe ("|") delimited list of manuscript IDs
letter_ids (required): pipe ("|") delimited list of letter IDs
count (optional): number of examples to return (defaults to 3)
Returns:
{
"mss" {
"<ms_id>": {
"<letter_id>": {
"id": coords.id,
"binaryurl": coords.binary_url,
"height": coords.height,
"width": coords.width,
"top": coords.top,
"left": coords.left,
"letter": coords.letter.id,
"page": coords.page.id,
"pageurl": coords.page.url,
"pageheight": coords.page.height,
"pagewidth": coords.page.width,
},
...
},
...
}
}
"""
if 'ms_ids' not in request.query_params:
return Response(
{'error': 'No Manuscripts Specified!'},
status=status.HTTP_400_BAD_REQUEST)
if 'letter_ids' not in request.query_params:
return Response(
{'error': 'No Letters Specified!'},
status=status.HTTP_400_BAD_REQUEST)
count = int(request.query_params.get('count', 3))
ms_ids = request.query_params['ms_ids'].split('|')
letter_ids = request.query_params['letter_ids'].split('|')
examples = Coordinates.objects.select_related('page').filter(
manuscript_id__in=ms_ids, letter_id__in=letter_ids,
priority__lte=count)
# the frontend depends on empty lists for missing values, so...
# examples_dict = defaultdict(lambda: defaultdict(list))
examples_dict = dict(
(int(ms_id), dict((int(letter_id), []) for letter_id in letter_ids))
for ms_id in ms_ids
)
for example in examples:
examples_dict[example.manuscript_id][example.letter_id].append({
"id": example.id,
"binaryurl": example.binary_url,
"height": example.height,
"width": example.width,
"top": example.top,
"left": example.left,
"letter": example.letter_id,
"page": example.page.number,
"pageurl": example.page.url,
"pageheight": example.page.height,
"pagewidth": example.page.width,
})
return Response({'mss': examples_dict})
|
import webbrowser
# Graphics
HelpBar = '--------------------------------'
# Graphics
# WhiteList/Commands
WhiteList = [
"https://google.com",
"https://irssi.org/",
]
Commands = [
"pyb.help",
"pyb.whitelist",
"pyb.info",
]
# WhiteList/Commands
# Main
print('\x1b[1;35;49m')
print" _____ _ _ _ _"
print"| _ |_ _| |_ ___ ___ _ _ _ ___| | | |"
print"| __| | | . | _| . | | | |_ -| | | |"
print"|__| |_ |___|_| |___|_____|___|_____|"
print" |___| Written by Rory W."
print('\x1b[0m')
print('Please enter a full url, example: ' + '\033[0;32;49m' + 'https' + '\x1b[0m' + '://' + '\x1b[0;36;49m' + 'google' + '\x1b[0m' + '.' + '\x1b[0;35;49m' + 'com' + '\x1b[0m')
print('You can also enter a ' + '\x1b[3;37;43m' + 'COMMAND' + '\x1b[0m' + ',' + ' example: ' + '\x1b[0;35;49m' + 'pyb' + '.' + '\x1b[0;32;49m' + 'help' + '\x1b[0m')
while True:
x = raw_input('Enter ' + '\x1b[6;30;42m' + 'URL' + '\x1b[0m' + '/' + '\x1b[3;37;43m' + 'COMMAND' + '\x1b[0m' + ': ')
#whitelist
if x == WhiteList[0]:
print('You are at ' + '\x1b[0;30;42m' + x + '\x1b[0m')
webbrowser.open(x, new=2)
elif x == WhiteList[1]:
print('You are at ' + '\x1b[0;30;42m' + x + '\x1b[0m')
webbrowser.open(x, new=2)
# Commands
elif x == Commands[0]:
print(HelpBar)
print('*' + 'HELP/COMMANDS MENU' + '*' + '\n\npyb.<COMMAND>\n')
print('\t.whitelist\n\t.info')
elif x == Commands[1]:
print(WhiteList[0:2])
elif x == Commands[2]:
print('\x1b[1;35;49m' + 'Pybrows' + '\x1b[0m' + '2018')
else:
print('This url was ether entered incorrectely or was blocked')
print('Your input: ' + '\x1b[0;37;41m' + x + '\x1b[0m')
# Main
# _____ _
#| _ |_ _| |_ ___ ___ _ _ _ ___
#| __| | | . | _| . | | | |_ -|
#|__| |_ |___|_| |___|_____|___|
# |___|
|
#!/usr/bin/env python3
import sys
import argparse
import os
import platform
import shutil
import hashlib
import json
from datetime import datetime
from logging import Logger, basicConfig, getLogger, INFO
from os import getenv
from pathlib import Path
from subprocess import (DEVNULL, PIPE, STDOUT, CalledProcessError,
TimeoutExpired, call, check_call, check_output, run)
from tempfile import TemporaryDirectory
from typing import Any, Iterator, List, MutableMapping, Union, Optional
from enum import Enum
import toml
from problem import Problem, find_problem_dir
logger: Logger = getLogger(__name__)
def main(args: List[str]):
try:
import colorlog
except ImportError:
basicConfig(
format="%(asctime)s [%(levelname)s] %(message)s",
datefmt="%H:%M:%S",
level=getenv('LOG_LEVEL', 'INFO'),
)
logger.warn('Please install colorlog: pip3 install colorlog')
else:
handler = colorlog.StreamHandler()
formatter = colorlog.ColoredFormatter(
"%(log_color)s%(asctime)s [%(levelname)s] %(message)s",
datefmt="%H:%M:%S",
log_colors={
'DEBUG': 'cyan',
'INFO': 'white',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white',
})
handler.setFormatter(formatter)
basicConfig(
level=getenv('LOG_LEVEL', 'INFO'),
handlers=[handler]
)
parser = argparse.ArgumentParser(description='Testcase Generator')
parser.add_argument('toml', nargs='*', help='Toml File')
parser.add_argument('-p', '--problem', nargs='*',
help='Generate problem', default=[])
parser.add_argument('--dev', action='store_true', help='Developer Mode')
parser.add_argument('--test', action='store_true', help='CI Mode')
parser.add_argument('--htmldir', help='Generate HTML', default=None)
parser.add_argument('--clean', action='store_true', help='Clean in/out')
parser.add_argument('--compile-checker',
action='store_true', help='Deprecated: Compile Checker')
parser.add_argument('--only-html', action='store_true', help='HTML generator Mode')
opts = parser.parse_args(args)
if opts.dev + opts.test + opts.clean + opts.only_html >= 2:
raise ValueError('at most one of --dev, --test, --clean, --only-html can be used')
if opts.compile_checker:
logger.warning(
'--compile-checker is deprecated. Checker is compiled in default')
rootdir: Path = Path(__file__).parent
problems: List[Problem] = list()
for tomlpath in opts.toml:
tomlfile = toml.load(opts.toml)
problems.append(Problem(rootdir, Path(tomlpath).parent))
for problem_name in opts.problem:
problem_dir = find_problem_dir(rootdir, problem_name)
if problem_dir is None:
raise ValueError('Cannot find problem: {}'.format(problem_name))
problems.append(Problem(rootdir, problem_dir))
if len(problems) == 0:
logger.warning('No problems')
if opts.htmldir:
logger.info('Make htmldir')
Path(opts.htmldir).mkdir(exist_ok=True, parents=True)
# suppress the annoying dialog appears when an application crashes on Windows
if platform.uname().system == 'Windows':
import ctypes
SEM_NOGPFAULTERRORBOX = 2 # https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863(v=vs.85).aspx
ctypes.windll.kernel32.SetErrorMode(SEM_NOGPFAULTERRORBOX)
mode = Problem.Mode.DEFAULT
if opts.dev:
mode = Problem.Mode.DEV
if opts.test:
mode = Problem.Mode.TEST
if opts.clean:
mode = Problem.Mode.CLEAN
if opts.only_html:
mode = Problem.Mode.HTML
for problem in problems:
problem.generate(mode, Path(opts.htmldir) if opts.htmldir else None)
if __name__ == '__main__':
main(sys.argv[1:])
|
# Copyright 2019 Cambridge Quantum Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods to allow conversion between Cirq and t|ket> data types
"""
from typing import List, Generator, Iterator, Dict, Union
import cirq
from cirq.google import XmonDevice
from cirq.devices import UnconstrainedDevice
from cirq import Qid, LineQubit, GridQubit
from cirq.ops import NamedQubit
from pytket._circuit import Circuit, Op, OpType, Qubit
from pytket._routing import SquareGrid, Architecture, PhysicalCircuit
from .qubits import _indexed_qubits_from_circuit
from sympy import pi, Expr
cirq_common = cirq.ops.common_gates
cirq_pauli = cirq.ops.pauli_gates
# map cirq common gates to pytket gates
_cirq2ops_mapping = {
cirq_common.CNOT : OpType.CX,
cirq_common.H : OpType.H,
cirq_common.MeasurementGate : OpType.Measure,
cirq_common.XPowGate: OpType.Rx,
cirq_common.YPowGate: OpType.Ry,
cirq_common.ZPowGate: OpType.Rz,
cirq_common.S: OpType.S,
cirq_common.SWAP: OpType.SWAP,
cirq_common.T : OpType.T,
# cirq_pauli._PauliX : OpType.X,
# cirq_pauli._PauliY : OpType.Y,
# cirq_pauli._PauliZ : OpType.Z,
cirq_pauli.X : OpType.X,
cirq_pauli.Y : OpType.Y,
cirq_pauli.Z : OpType.Z,
cirq_common.CZPowGate : OpType.CRz,
cirq_common.CZ : OpType.CZ,
cirq_common.ISwapPowGate : OpType.ISWAP,
cirq.ops.parity_gates.ZZPowGate : OpType.ZZPhase,
cirq.ops.parity_gates.XXPowGate : OpType.XXPhase,
cirq.ops.parity_gates.YYPowGate : OpType.YYPhase,
cirq.ops.PhasedXPowGate : OpType.PhasedX
}
# reverse mapping for convenience
_ops2cirq_mapping = dict((reversed(item) for item in _cirq2ops_mapping.items()))
# _ops2cirq_mapping[OpType.X] = cirq_pauli.X
# _ops2cirq_mapping[OpType.Y] = cirq_pauli.Y
# _ops2cirq_mapping[OpType.Z] = cirq_pauli.Z
# spot special rotation gates
_constant_gates = (cirq_common.CNOT,
cirq_common.H,
cirq_common.S,
cirq_common.SWAP,
cirq_common.T,
cirq_pauli.X,
cirq_pauli.Y,
cirq_pauli.Z,
cirq_common.CZ)
_rotation_types = (cirq_common.XPowGate, cirq_common.YPowGate, cirq_common.ZPowGate, cirq_common.CZPowGate, cirq_common.ISwapPowGate, cirq.ops.parity_gates.ZZPowGate,cirq.ops.parity_gates.XXPowGate,cirq.ops.parity_gates.YYPowGate)
def get_grid_qubits(arc: SquareGrid, nodes: Iterator[int]) -> List[cirq.GridQubit]:
"""Gets a list of :py:class:GridQubit` s corresponding to the qubit nodes provided on the given Architecture.
:param arc: The grid Architecture
:param nodes: An iterator of node index values
:return: The list of qubits
"""
return [cirq.GridQubit(*arc.qind_to_squind(i)) for i in nodes]
def cirq_to_tk(circuit: cirq.Circuit) -> Circuit:
"""Converts a Cirq :py:class:`Circuit` to a :math:`\\mathrm{t|ket}\\rangle` :py:class:`Circuit` object.
:param circuit: The input Cirq :py:class:`Circuit`
:raises NotImplementedError: If the input contains a Cirq :py:class:`Circuit` operation which is not yet supported by pytket
:return: The :math:`\\mathrm{t|ket}\\rangle` :py:class:`Circuit` corresponding to the input circuit
"""
qubit_list = _indexed_qubits_from_circuit(circuit)
n_qubits = len(circuit.all_qubits())
tkcirc = Circuit(n_qubits)
qreg = tkcirc.q_regs["q"]
qmap = {q : qreg[i] for i, q in enumerate(qubit_list)}
for moment in circuit:
for op in moment.operations:
gate = op.gate
gatetype = type(gate)
qb_lst = [qmap[q] for q in op.qubits]
n_qubits = len(op.qubits)
if gatetype == cirq_common.HPowGate and gate.exponent == 1:
gate = cirq_common.H
elif gatetype == cirq_common.CNotPowGate and gate.exponent == 1:
gate = cirq_common.CNOT
elif gatetype == cirq_pauli._PauliX and gate.exponent == 1:
gate = cirq_pauli.X
elif gatetype == cirq_pauli._PauliY and gate.exponent == 1:
gate = cirq_pauli.Y
elif gatetype == cirq_pauli._PauliZ and gate.exponent == 1:
gate = cirq_pauli.Z
if gate in _constant_gates:
try:
optype = _cirq2ops_mapping[gate]
except KeyError as error:
raise NotImplementedError("Operation not supported by tket: " + str(op.gate)) from error
params = []
elif isinstance(gate, cirq_common.MeasurementGate) :
creg = tkcirc.add_c_register(gate.key, 1)
tkcirc.add_measure(*qb_lst, creg[0])
continue
elif isinstance(gate, cirq.PhasedXPowGate) :
optype = OpType.PhasedX
pe = gate.phase_exponent
e = gate.exponent
params = [e, pe]
else:
try:
optype = _cirq2ops_mapping[gatetype]
except KeyError as error:
raise NotImplementedError("Operation not supported by tket: " + str(op.gate)) from error
params = [gate.exponent]
tkcirc.add_gate(optype, params, qb_lst, [])
return tkcirc
def tk_to_cirq(tkcirc: Union[Circuit,PhysicalCircuit], indexed_qubits: List[Qid] = None) -> cirq.Circuit:
"""Converts a :math:`\\mathrm{t|ket}\\rangle` :py:class:`Circuit` object to a Cirq :py:class:`Circuit`.
:param tkcirc: The input :math:`\\mathrm{t|ket}\\rangle` :py:class:`Circuit`
:return: The Cirq :py:class:`Circuit` corresponding to the input circuit
"""
oplst = []
for command in tkcirc:
op = command.op
optype = op.get_type()
try:
gatetype = _ops2cirq_mapping[optype]
except KeyError as error:
raise NotImplementedError("Cannot convert tket Op to Cirq gate: " + op.get_name()) from error
if len(command.controls) != 0 :
raise NotImplementedError("Cannot convert conditional gate to Cirq")
qids = [_convert_qubit(qbit, indexed_qubits) for qbit in command.qubits]
if optype == OpType.Measure:
bit = command.bits[0]
cirqop = cirq_common.measure(qids[0],key=bit.reg.name)
else:
params = op.get_params()
if len(params)==0 :
cirqop = gatetype(*qids)
elif optype == OpType.PhasedX :
cirqop = gatetype(phase_exponent=params[1],exponent=params[0])(*qids)
else:
cirqop = gatetype(exponent=params[0])(*qids)
oplst.append(cirqop)
return cirq.Circuit.from_ops(*oplst)
def _convert_qubit(qb: Qubit, indexed_qubits: List[Qid]) -> cirq.Qid :
if qb.reg.name == "q" :
if indexed_qubits :
return indexed_qubits[qb.index]
return LineQubit(qb.index)
else :
return NamedQubit(qb.__repr__()) |
#!/usr/bin/python3
import logging
import sys
from argparsers import parse_args
from downloaders.downloader_factory import DownloaderFactory
from utils.utils import prepare_download_folder
LOGGER = logging.getLogger(__name__)
def main():
args = parse_args()
configure_logging(args.is_debug)
prepare_download_folder(args.folder)
downloader = DownloaderFactory.get_downloader(args)
if not downloader:
raise NotImplementedError("No suitable downloader was found!")
downloader.download(args)
def configure_logging(is_debug=False):
log_format = "%(asctime)s [%(name)s] [%(levelname)s] %(message)s"
logging.basicConfig(format=log_format,
filename='pymage.log',
level=logging.DEBUG if is_debug else logging.INFO)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(logging.Formatter(log_format))
console_handler.setLevel(logging.DEBUG)
root_logger = logging.getLogger()
root_logger.addHandler(console_handler)
LOGGER.info("******* Pymage Downloader *******")
LOGGER.debug("Ready to DEBUG!")
if __name__ == '__main__':
main()
|
import os
from pathlib import Path
from shutil import rmtree
from jinja2 import Environment, FileSystemLoader, select_autoescape
from qt_material import export_theme
root_path = os.path.abspath('../')
theme_path = os.path.join(root_path, "theme")
dark_theme_path = os.path.join(theme_path, "dark")
light_theme_path = os.path.join(theme_path, "light")
qss_path_dark = os.path.join(root_path, "style-dark.qss")
qss_path_light = os.path.join(root_path, "style-light.qss")
qrc_path = os.path.join(root_path, "style.qrc")
if Path(theme_path).is_dir():
rmtree(theme_path)
export_theme(theme='dark_blue.xml', qss=qss_path_dark, output=dark_theme_path, prefix=':theme/dark/')
export_theme(theme='light_blue.xml', qss=qss_path_light, output=light_theme_path, prefix=':theme/light/')
p = Path(theme_path)
files = ['/'.join(a.parts[-4:]) for a in p.glob("**/*.svg")]
env = Environment(loader=FileSystemLoader("."), autoescape=select_autoescape(['html', 'xml']))
template = env.get_template("styles.qrc")
render = template.render(files=files)
with open(qrc_path, 'w') as f:
f.write(render)
|
#!/usr/bin/env python
# wujian@2020
import numpy as np
import scipy.signal as ss
import scipy.integrate as si
class MCRA(object):
"""
OM-LSA (Optimally Modified Log-Spectral Amplitude Estimator) with MCRA
Reference:
1) Cohen I, Berdugo B. Speech enhancement for non-stationary noise environments[J].
Signal processing, 2001, 81(11): 2403-2418.
"""
def __init__(self,
alpha=0.92,
delta=5,
beta=0.7,
alpha_s=0.9,
alpha_d=0.85,
alpha_p=0.2,
gmin_db=-10,
xi_min_db=-18,
w_mcra=1,
w_local=1,
w_global=15,
h_mcra="hann",
h_local="hann",
h_global="hann",
q_max=0.95,
zeta_min_db=-10,
zeta_max_db=-5,
zeta_p_max_db=10,
zeta_p_min_db=0,
L=125,
M=128):
self.delta = delta
self.alpha = {"s": alpha_s, "d": alpha_d, "p": alpha_p, "t": alpha}
self.gmin = 10**(gmin_db / 10)
self.beta = beta
self.w_m = ss.get_window(h_mcra, w_mcra * 2 + 1)
self.w_g = ss.get_window(h_global, w_global * 2 + 1)
self.w_l = ss.get_window(h_local, w_local * 2 + 1)
self.xi_min = 10**(xi_min_db / 10)
self.zeta_min = 10**(zeta_min_db / 10)
self.zeta_max = 10**(zeta_max_db / 10)
self.zeta_p_min = 10**(zeta_p_min_db / 10)
self.zeta_p_max = 10**(zeta_p_max_db / 10)
self.L = L
self.M = M
self.q_max = q_max
def run(self, stft, eps=1e-7):
"""
Arguments:
stft: complex STFT, T x F
Return:
gain: real array, T x F
"""
T, F = stft.shape
def expint(v):
return si.quad(lambda t: np.exp(-t) / t, v, np.inf)[0]
exp_para = np.vectorize(expint)
obs_power = np.abs(stft)**2
gh1 = 1
p_hat = np.ones(F)
zeta = np.ones(F)
zeta_peak = 0
beg = 10
lambda_d_hat = obs_power[0]
G = []
for t in range(T):
# >>> eq.10
# a posteriori SNR
gamma = obs_power[t] / np.maximum(lambda_d_hat, eps)
gamma = np.maximum(gamma, eps)
# <<< eq.10
# >>> eq.18: a priori SNR
xi_hat = self.alpha["t"] * gh1**2 * gamma + (
1 - self.alpha["t"]) * np.maximum(gamma - 1, 0)
xi_hat = np.maximum(xi_hat, self.xi_min)
# <<< eq.18
# >>> eq.15
v = gamma * xi_hat / (1 + xi_hat)
gh1 = xi_hat * np.exp(0.5 * exp_para(v)) / (1 + xi_hat)
# <<< eq.15
# >>> eq.32
var_sf = np.convolve(obs_power[t], self.w_m, mode="same")
# <<< eq.32
if t == 0:
var_s = obs_power[t]
var_s_min = var_s
var_s_tmp = var_s
else:
# >>> eq.33
var_s = self.alpha["s"] * var_s + (1 -
self.alpha["s"]) * var_sf
# <<< eq.33
if (t + 1) % self.L == beg:
# >>> eq.34 & eq.35
var_s_min = np.minimum(var_s_tmp, var_s)
var_s_tmp = var_s
# <<< eq.34 & eq.35
else:
# >>> eq.36 & eq.37
var_s_min = np.minimum(var_s_min, var_s)
var_s_tmp = np.minimum(var_s_tmp, var_s)
# <<< eq.36 & eq.37
# >>> eq.39
var_sr = var_s / np.maximum(eps, var_s_min)
sr_ind = var_sr > self.delta
# <<< eq.39
# >>> eq.40
p_hat = self.alpha["p"] * p_hat + (1 - self.alpha["p"]) * sr_ind
# >>> eq.40
# >>> eq.31
alpha_d_hat = self.alpha["d"] + (1 - self.alpha["d"]) * p_hat
# <<< eq.31
# >>> eq.30
lambda_d_hat = alpha_d_hat * lambda_d_hat + (
1 - alpha_d_hat) * obs_power[t]
# <<< eq.30
# >>> eq.23
zeta = self.beta * zeta + (1 - self.beta) * xi_hat
# <<< eq.23
# >>> eq.24
zeta_g = np.convolve(zeta, self.w_g, mode="same")
zeta_l = np.convolve(zeta, self.w_l, mode="same")
# <<< eq.24
# >>> eq.25
var_p_g = np.zeros(F)
pg_idx = np.logical_and(zeta_g > self.zeta_min,
zeta_g < self.zeta_max)
var_p_g[pg_idx] = np.log10(
zeta_g[pg_idx] / self.zeta_min) / np.log10(
self.zeta_max / self.zeta_min)
pg_idx = zeta_g >= self.zeta_max
var_p_g[pg_idx] = 1
# <<< eq.25
# >>> eq.25
var_p_l = np.zeros(F)
pl_idx = np.logical_and(zeta_l > self.zeta_min,
zeta_l < self.zeta_max)
var_p_l[pl_idx] = np.log10(
zeta_l[pl_idx] / self.zeta_min) / np.log10(
self.zeta_max / self.zeta_min)
pl_idx = zeta_l >= self.zeta_max
var_p_l[pl_idx] = 1
# <<< eq.25
# >>> eq.26
zeta_frame_cur = np.mean(zeta[:self.M // 2 + 1])
# <<< eq.26
# >>> eq.27
if t == 0:
zeta_frame_pre = zeta_frame_cur
if zeta_frame_cur > self.zeta_min:
if zeta_frame_cur > zeta_frame_pre:
zeta_peak = min(max(zeta_frame_cur, self.zeta_p_min),
self.zeta_p_max)
p_frame = 1
elif zeta_frame_cur <= self.zeta_min * zeta_peak:
p_frame = 0
elif zeta_frame_cur >= self.zeta_max * zeta_peak:
p_frame = 1
else:
p_frame = np.log10(zeta_frame_cur /
(self.zeta_min * zeta_peak))
p_frame = p_frame / np.log10(self.zeta_max / self.zeta_min)
else:
p_frame = 0
zeta_frame_pre = zeta_frame_cur
# <<< eq.27
# >>> eq.28
q_hat = np.minimum(self.q_max, 1 - var_p_l * p_frame * var_p_g)
# <<< eq.28
# >>> eq.9
p_inv = 1 + q_hat * (1 + xi_hat) * np.exp(-v) / (1 + q_hat)
p = 1 / p_inv
# <<< eq.10
# >>> eq.16
gain = gh1**p * self.gmin**(1 - p)
G.append(gain)
# <<< eq.16
return np.stack(G)
class iMCRA(object):
"""
OM-LSA (Optimally Modified Log-Spectral Amplitude Estimator) with iMCRA
Reference:
1) Cohen I. Noise spectrum estimation in adverse environments: Improved minima controlled
recursive averaging[J]. IEEE Transactions on speech and audio processing, 2003, 11(5):
466-475.
"""
def __init__(self,
alpha=0.92,
alpha_s=0.9,
alpha_d=0.85,
b_min=1.66,
gamma0=4.6,
gamma1=3,
zeta0=1.67,
xi_min_db=-18,
gmin_db=-10,
w_mcra=1,
h_mcra="hann",
beta=1.47,
V=15,
U=8):
self.alpha = {"s": alpha_s, "d": alpha_d, "t": alpha}
self.beta = beta
self.gamma0, self.gamma1 = gamma0, gamma1
self.zeta0 = zeta0
self.b_min = 1 / b_min
self.xi_min = 10**(xi_min_db / 10)
self.gain_min = 10**(gmin_db / 10)
self.w_m = ss.get_window(h_mcra, w_mcra * 2 + 1)
self.V = V
self.U = U
def run(self, stft, eps=1e-7):
"""
Arguments:
stft: complex STFT, T x F
Return:
gain: real array, T x F
"""
T, F = stft.shape
obs_power = np.abs(stft)**2
lambda_d_hat = obs_power[0]
gh1 = 1
def expint(v):
return si.quad(lambda t: np.exp(-t) / t, v, np.inf)[0]
exp_para = np.vectorize(expint)
s_min_sw_hat = []
s_min_sw = []
G = []
for t in range(T):
lambda_d = lambda_d_hat * self.beta
# >>> eq.3: posteriori SNR
gamma = obs_power[t] / np.maximum(lambda_d, eps)
# <<< eq.3
gain = gh1**2 * gamma
# >>> eq.32 : a priori SNR
xi_hat = self.alpha["t"] * gain + (
1 - self.alpha["t"]) * np.maximum(gamma - 1, 0)
xi_hat = np.maximum(xi_hat, self.xi_min)
# <<< eq.32
# >>> eq.33
v = gamma * xi_hat / (1 + xi_hat)
gh1 = xi_hat / (1 + xi_hat) * np.exp(0.5 * exp_para(v))
# <<< eq.33
# >>> eq.14
var_sf = np.convolve(obs_power[t], self.w_m, mode="same")
# <<< eq.14
if t == 0:
var_s = var_sf
var_s_hat = var_sf
var_s_min = var_sf
var_s_min_sw = var_sf
else:
# >>> eq.15
var_s = self.alpha["s"] * var_s + (1 -
self.alpha["s"]) * var_sf
# <<< eq.15
var_s_min = np.minimum(var_s_min, var_s)
var_s_min_sw = np.minimum(var_s_min_sw, var_s)
# >>> eq.21
gamma_min = obs_power[t] * self.b_min / np.maximum(var_s_min, eps)
zeta = var_sf * self.b_min / np.maximum(var_s_min, eps)
indicator = np.logical_and(gamma_min < self.gamma0,
zeta < self.zeta0)
# <<< eq.21
# >>> eq.26
ind_conv = np.convolve(indicator, self.w_m, mode="same")
ind_nz_idx = (ind_conv > 0)
obs_conv = np.convolve(obs_power[t] * indicator,
self.w_m,
mode="same")
var_sf_hat = var_s_hat.copy()
var_sf_hat[
ind_nz_idx] = obs_conv[ind_nz_idx] / ind_conv[ind_nz_idx]
# <<< eq.26
if t == 0:
var_s_min_hat = var_s
var_s_min_sw_hat = var_sf
else:
# <<< eq.27
var_s_hat = self.alpha["s"] * var_s_hat + (
1 - self.alpha["s"]) * var_sf_hat
# >>> eq.27
var_s_min_hat = np.minimum(var_s_min_hat, var_s_hat)
var_s_min_sw_hat = np.minimum(var_s_min_sw_hat, var_s_hat)
# >>> eq.28
gamma_min_hat = obs_power[t] * self.b_min / np.maximum(
var_s_min_hat, eps)
zeta_hat = var_s * self.b_min / np.maximum(var_s_min_hat, eps)
# <<< eq.28
# >>> eq.29
qhat_idx_c1 = gamma_min_hat < self.gamma1
qhat_idx_c2 = gamma_min_hat > 1
# 1 < gamma_min_hat < self.gamma1
qhat_idx_c3 = np.logical_and(qhat_idx_c2, qhat_idx_c1)
q_hat = np.zeros(F)
qhat_idx = np.logical_and(qhat_idx_c3, zeta_hat < self.zeta0)
# (0, 1)
q_hat[qhat_idx] = (self.gamma1 -
gamma_min_hat[qhat_idx]) / (self.gamma1 - 1)
# <<< eq.29
# >>> eq.7
p_hat = np.zeros(F)
p_hat_den = 1 + q_hat[qhat_idx] * (1 + xi_hat[qhat_idx]) / (
1 - q_hat[qhat_idx]) * np.exp(-v[qhat_idx])
# (0, 1)
p_hat[qhat_idx] = 1 / p_hat_den
phat_idx = np.logical_and(gamma_min_hat >= self.gamma1,
zeta_hat >= self.zeta0)
p_hat[phat_idx] = 1
# <<< eq.7
# >>> eq.11
alpha_d_hat = self.alpha["d"] + (1 - self.alpha["d"]) * p_hat
# <<< eq.11
# >>> eq.10
lambda_d_hat = alpha_d_hat * lambda_d_hat + (
1 - alpha_d_hat) * obs_power[t]
# <<< eq.10
s_min_sw.append(var_s_min_sw)
s_min_sw_hat.append(var_s_min_sw_hat)
if (t + 1) % self.V == 0:
# U x F
u_s_min_sw = np.stack(s_min_sw[-self.U:])
u_s_min_sw_hat = np.stack(s_min_sw_hat[-self.U:])
var_s_min = np.min(u_s_min_sw, 0)
var_s_min_hat = np.min(u_s_min_sw_hat, 0)
var_s_min_sw = var_s
var_s_min_sw_hat = var_s_hat
# >>> gain function
gain = gh1**p_hat * self.gain_min**(1 - p_hat)
G.append(gain)
# <<< gain function
return np.stack(G) |
from .vis import flow_to_color, flow_err_to_color, flow_max_rad, tensor_to_color, chw_to_hwc, group_color
from .show_result import ShowResultTool, ShowFlow
from .save_result import SaveResultTool
from .vis_hooks import DistFlowVisHook |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from libcloud.test import unittest
from libcloud.container.base import ContainerCluster, ContainerImage, Container
from libcloud.container.drivers.ecs import ElasticContainerDriver
from libcloud.container.utils.docker import RegistryClient
from libcloud.utils.py3 import httplib
from libcloud.test.secrets import CONTAINER_PARAMS_ECS
from libcloud.test.file_fixtures import ContainerFileFixtures
from libcloud.test import MockHttp
class ElasticContainerDriverTestCase(unittest.TestCase):
def setUp(self):
ElasticContainerDriver.connectionCls.conn_classes = (
ECSMockHttp, ECSMockHttp)
ECSMockHttp.type = None
ECSMockHttp.use_param = 'a'
ElasticContainerDriver.ecrConnectionClass.conn_classes = (
ECSMockHttp, ECSMockHttp)
self.driver = ElasticContainerDriver(*CONTAINER_PARAMS_ECS)
def test_list_clusters(self):
clusters = self.driver.list_clusters()
self.assertEqual(len(clusters), 1)
self.assertEqual(clusters[0].id, 'arn:aws:ecs:us-east-1:012345678910:cluster/default')
self.assertEqual(clusters[0].name, 'default')
def test_create_cluster(self):
cluster = self.driver.create_cluster('my-cluster')
self.assertEqual(cluster.name, 'my-cluster')
def test_destroy_cluster(self):
self.assertTrue(
self.driver.destroy_cluster(
ContainerCluster(
id='arn:aws:ecs:us-east-1:012345678910:cluster/jim',
name='jim',
driver=self.driver)))
def test_list_containers(self):
containers = self.driver.list_containers()
self.assertEqual(len(containers), 1)
def test_list_containers_for_cluster(self):
cluster = self.driver.list_clusters()[0]
containers = self.driver.list_containers(cluster=cluster)
self.assertEqual(len(containers), 1)
def test_deploy_container(self):
container = self.driver.deploy_container(
name='jim',
image=ContainerImage(
id=None,
name='mysql',
path='mysql',
version=None,
driver=self.driver
)
)
self.assertEqual(container.id, 'arn:aws:ecs:ap-southeast-2:647433528374:container/e443d10f-dea3-481e-8a1e-966b9ad4e498')
def test_get_container(self):
container = self.driver.get_container(
'arn:aws:ecs:us-east-1:012345678910:container/76c980a8-2454-4a9c-acc4-9eb103117273'
)
self.assertEqual(container.id, 'arn:aws:ecs:ap-southeast-2:647433528374:container/d56d4e2c-9804-42a7-9f2a-6029cb50d4a2')
self.assertEqual(container.name, 'simple-app')
self.assertEqual(container.image.name, 'simple-app')
def test_start_container(self):
container = self.driver.start_container(
Container(
id=None,
name=None,
image=None,
state=None,
ip_addresses=None,
driver=self.driver,
extra={
'taskDefinitionArn': ''
}
)
)
self.assertFalse(container is None)
def test_stop_container(self):
container = self.driver.stop_container(
Container(
id=None,
name=None,
image=None,
state=None,
ip_addresses=None,
driver=self.driver,
extra={
'taskArn': '12345',
'taskDefinitionArn': '123556'
}
)
)
self.assertFalse(container is None)
def test_restart_container(self):
container = self.driver.restart_container(
Container(
id=None,
name=None,
image=None,
state=None,
ip_addresses=None,
driver=self.driver,
extra={
'taskArn': '12345',
'taskDefinitionArn': '123556'
}
)
)
self.assertFalse(container is None)
def test_list_images(self):
images = self.driver.list_images('my-images')
self.assertEqual(len(images), 1)
self.assertEqual(images[0].name, '647433528374.dkr.ecr.region.amazonaws.com/my-images:latest')
def test_ex_create_service(self):
cluster = self.driver.list_clusters()[0]
task_definition = self.driver.list_containers()[0].extra['taskDefinitionArn']
service = self.driver.ex_create_service(cluster=cluster,
name='jim',
task_definition=task_definition)
self.assertEqual(service['serviceName'], 'test')
def test_ex_list_service_arns(self):
arns = self.driver.ex_list_service_arns()
self.assertEqual(len(arns), 2)
def test_ex_describe_service(self):
arn = self.driver.ex_list_service_arns()[0]
service = self.driver.ex_describe_service(arn)
self.assertEqual(service['serviceName'], 'test')
def test_ex_destroy_service(self):
arn = self.driver.ex_list_service_arns()[0]
service = self.driver.ex_destroy_service(arn)
self.assertEqual(service['status'], 'DRAINING')
def test_ex_get_registry_client(self):
client = self.driver.ex_get_registry_client('my-images')
self.assertIsInstance(client, RegistryClient)
class ECSMockHttp(MockHttp):
fixtures = ContainerFileFixtures('ecs')
fixture_map = {
'DescribeClusters': 'describeclusters.json',
'CreateCluster': 'createcluster.json',
'DeleteCluster': 'deletecluster.json',
'DescribeTasks': 'describetasks.json',
'ListTasks': 'listtasks.json',
'ListClusters': 'listclusters.json',
'RegisterTaskDefinition': 'registertaskdefinition.json',
'RunTask': 'runtask.json',
'StopTask': 'stoptask.json',
'ListImages': 'listimages.json',
'DescribeRepositories': 'describerepositories.json',
'CreateService': 'createservice.json',
'ListServices': 'listservices.json',
'DescribeServices': 'describeservices.json',
'DeleteService': 'deleteservice.json',
'GetAuthorizationToken': 'getauthorizationtoken.json'
}
def root(
self, method, url, body, headers):
target = headers['x-amz-target']
if target is not None:
type = target.split('.')[-1]
if type is None or self.fixture_map.get(type) is None:
raise AssertionError('Unsupported request type %s' % (target))
body = self.fixtures.load(self.fixture_map.get(type))
else:
raise AssertionError('Unsupported method')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
|
"""
Django scommon settings for polls project.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import pathlib
base_dir = pathlib.Path(__file__).absolute().parents[2]
BASE_DIR = str(base_dir)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'hmxabo(_0=fl$huk$qr#zkkp_m%%3opyqxzm%s$e(2*7vb62=g'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd party
'bootstrap3',
# Project apps
'apps.polls.apps.PollsConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.admindocs.middleware.XViewMiddleware'
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [str(base_dir / 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# Custom processors
'project.context_processors.settings_values'
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': str(base_dir / 'var' / 'db' / 'db.sqlite3')
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', },
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', },
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', },
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', },
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
str(base_dir / 'static')
]
STATIC_ROOT = str(base_dir / 'var' / 'static')
# bootstrap3
# ------------
# https://django-bootstrap3.readthedocs.io/en/latest/settings.html
BOOTSTRAP3 = {}
# Project settings
# ----------------
# Your public hostname or public IP address
MY_HOSTNAME_OR_IP = None
def _fix_my_hostname_or_ip():
global MY_HOSTNAME_OR_IP
if MY_HOSTNAME_OR_IP is None:
import socket
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.connect(("8.8.8.8", 80))
MY_HOSTNAME_OR_IP = s.getsockname()[0]
return
_fix_my_hostname_or_ip()
MY_WAMP_URL = 'ws://{}:8080/ws'.format(MY_HOSTNAME_OR_IP)
MY_WAMP_REALM = 'polls'
MY_WAMP_HTTP_GATEWAY = 'http://{}:8080/publish'.format(MY_HOSTNAME_OR_IP)
|
import pygame
def draw():
pygame.init()
screen = pygame.display.set_mode((500,500))
screen.fill([255,255,255])
clock = pygame.time.Clock()
while running:
running = True
while running:
clock.tick(10)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.draw.polygon(screen, [255, 0, 0], [(0, 0), (250, 500), (250, 250)], width=0)
pygame.display.flip()
pygame.quit()
if __name__ == '__main__':
draw()
|
# -*- coding: utf-8 -*-
'''
The core behaviors used by minion and master
'''
# pylint: disable=W0232
# Import python libs
from __future__ import absolute_import
import os
import sys
import time
import types
import random
import logging
import itertools
import traceback
import multiprocessing
from collections import deque
# Import salt libs
import salt.daemons.masterapi
import salt.utils.args
import salt.transport
from raet import raeting, nacling
from raet.road.stacking import RoadStack
from raet.road.estating import RemoteEstate
from raet.lane.stacking import LaneStack
from raet.lane.yarding import RemoteYard
from salt import daemons
from salt.daemons import salting
from salt.utils import kinds
from salt.utils.event import tagify
from salt.exceptions import (
CommandExecutionError, CommandNotFoundError, SaltInvocationError)
# Import ioflo libs
from ioflo.base.odicting import odict
import ioflo.base.deeding
from ioflo.base.consoling import getConsole
console = getConsole()
# Import Third Party Libs
# pylint: disable=import-error
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
# pylint: disable=no-name-in-module,redefined-builtin
import salt.ext.six as six
from salt.ext.six.moves import range
# pylint: enable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
class SaltRaetCleanup(ioflo.base.deeding.Deed):
'''
Cleanup stray lane keep directories not reaped
FloScript:
do salt raet cleanup at enter
'''
Ioinits = {
'opts': '.salt.opts',
}
def action(self):
'''
Should only run once to cleanup stale lane uxd files.
'''
if self.opts.value.get('sock_dir'):
sockdirpath = os.path.abspath(self.opts.value['sock_dir'])
console.concise("Cleaning up uxd files in {0}\n".format(sockdirpath))
protecteds = self.opts.value.get('raet_cleanup_protecteds', [])
for name in os.listdir(sockdirpath):
path = os.path.join(sockdirpath, name)
if os.path.isdir(path):
continue
root, ext = os.path.splitext(name)
if ext != '.uxd':
continue
if not all(root.partition('.')):
continue
if path in protecteds:
continue
try:
os.unlink(path)
console.concise("Removed {0}\n".format(path))
except OSError:
console.concise("Failed removing {0}\n".format(path))
raise
class SaltRaetRoadClustered(ioflo.base.deeding.Deed):
'''
Updates value of share .salt.road.manor.cluster.clustered
Twith opts['cluster_mode']
FloScript:
do salt raet road clustered
go next if .salt.road.manor.cluster.clustered
'''
Ioinits = odict(inode=".salt.road.manor.",
clustered=odict(ipath='cluster.clustered', ival=False),
opts='.salt.opts',)
def action(self, **kwa):
'''
Update .cluster.clustered share from opts
'''
self.clustered.update(value=self.opts.value.get('cluster_mode', False))
class SaltRaetRoadUsherMinionSetup(ioflo.base.deeding.Deed):
'''
Set up .ushers which is initial list of masters to bootstrap
into road
FloScript:
do salt raet road usher minion setup at enter
'''
Ioinits = odict(
inode=".salt.road.manor.",
ushers='ushers',
opts='.salt.opts')
def action(self):
'''
Assign .ushers by parsing opts
'''
masters = 'master'
port = None
if self.opts.value.get('cluster_mode', False):
masters = 'cluster_masters'
self.ushers.value = daemons.extract_masters(self.opts.value,
masters=masters,
port=port)
class SaltRaetRoadUsherMasterSetup(ioflo.base.deeding.Deed):
'''
Set up .ushers which is initial list of masters to bootstrap
into road
FloScript:
do salt raet road usher master setup at enter
'''
Ioinits = odict(
inode=".salt.road.manor.",
ushers='ushers',
opts='.salt.opts')
def action(self):
'''
Assign .ushers by parsing opts
'''
masters = 'cluster_masters'
port = 'raet_port'
self.ushers.value = daemons.extract_masters(self.opts.value,
masters=masters,
port=port,
raise_if_empty=False)
class SaltRaetRoadClusterLoadSetup(ioflo.base.deeding.Deed):
'''
Sets up cluster.masters for load balancing
FloScript:
do salt raet road cluster load setup at enter
'''
Ioinits = odict(
inode='.salt.road.manor.',
masters={'ipath': 'cluster.masters', 'ival': odict()},
stack='stack',
opts='.salt.opts',)
def action(self, **kwa):
'''
Populate loads from masters in stack.remotes
'''
if self.opts.value.get('cluster_mode'):
for remote in six.itervalues(self.stack.value.remotes):
if remote.kind == kinds.applKinds.master:
self.masters.value[remote.name] = odict(load=0.0, expire=self.store.stamp)
class SaltRaetRoadStackSetup(ioflo.base.deeding.Deed):
'''
Initialize and run raet udp stack for Salt
FloScript:
do salt raet road stack setup at enter
'''
Ioinits = {
'inode': 'salt.road.manor.',
'stack': 'stack',
'opts': '.salt.opts',
'txmsgs': {'ipath': 'txmsgs',
'ival': deque()},
'rxmsgs': {'ipath': 'rxmsgs',
'ival': deque()},
'local': {'ipath': 'local',
'ival': {'main': False,
'mutable': False,
'uid': None,
'role': 'master',
'sighex': None,
'prihex': None}},
}
def _prepare(self):
'''
Assign class defaults
'''
RoadStack.Bk = raeting.bodyKinds.msgpack
RoadStack.JoinentTimeout = 0.0
def action(self):
'''
enter action
should only run once to setup road stack.
moved from _prepare so can do clean up before stack is initialized
do salt raet road stack setup at enter
'''
kind = self.opts.value['__role'] # application kind
if kind not in kinds.APPL_KINDS:
emsg = ("Invalid application kind = '{0}'.".format(kind))
log.error(emsg + '\n')
raise ValueError(emsg)
role = self.opts.value.get('id', '')
if not role:
emsg = ("Missing role required to setup RoadStack.")
log.error(emsg + "\n")
raise ValueError(emsg)
name = "{0}_{1}".format(role, kind)
main = self.opts.value.get('raet_main', self.local.data.main)
mutable = self.opts.value.get('raet_mutable', self.local.data.mutable)
always = self.opts.value.get('open_mode', False)
mutable = mutable or always # open_made when True takes precedence
uid = self.local.data.uid
if kind == kinds.APPL_KIND_NAMES[kinds.applKinds.caller]:
ha = (self.opts.value['interface'], self.opts.value['raet_alt_port'])
else:
ha = (self.opts.value['interface'], self.opts.value['raet_port'])
basedirpath = os.path.abspath(os.path.join(self.opts.value['cachedir'], 'raet'))
txMsgs = self.txmsgs.value
rxMsgs = self.rxmsgs.value
keep = salting.SaltKeep(opts=self.opts.value,
basedirpath=basedirpath,
stackname=name)
roledata = keep.loadLocalRoleData()
sighex = roledata['sighex'] or self.local.data.sighex
prihex = roledata['prihex'] or self.local.data.prihex
self.stack.value = RoadStack(store=self.store,
keep=keep,
name=name,
uid=uid,
ha=ha,
role=role,
sigkey=sighex,
prikey=prihex,
main=main,
kind=kinds.APPL_KINDS[kind],
mutable=mutable,
txMsgs=txMsgs,
rxMsgs=rxMsgs,
period=3.0,
offset=0.5)
if self.opts.value.get('raet_clear_remotes'):
for remote in six.itervalues(self.stack.value.remotes):
self.stack.value.removeRemote(remote, clear=True)
self.stack.puid = self.stack.value.Uid # reset puid
class SaltRaetRoadStackCloser(ioflo.base.deeding.Deed):
'''
Closes stack server socket connection
FloScript:
do salt raet road stack closer at exit
'''
Ioinits = odict(
inode=".salt.road.manor.",
stack='stack', )
def action(self, **kwa):
'''
Close udp socket
'''
if self.stack.value and isinstance(self.stack.value, RoadStack):
self.stack.value.server.close()
class SaltRaetRoadStackJoiner(ioflo.base.deeding.Deed):
'''
Initiates join transaction with master(s)
FloScript:
do salt raet road stack joiner at enter
assumes that prior the following has been run to setup .masters
do salt raet road usher minion setup
'''
Ioinits = odict(
inode=".salt.road.manor.",
stack='stack',
ushers='ushers',
opts='.salt.opts')
def action(self, **kwa):
'''
Join with all masters
'''
stack = self.stack.value
if stack and isinstance(stack, RoadStack):
refresh_masters = (self.opts.value.get('raet_clear_remote_masters',
True) or not stack.remotes)
refresh_all = (self.opts.value.get('raet_clear_remotes', True) or
not stack.remotes)
if refresh_masters: # clear all remote masters
for remote in six.itervalues(stack.remotes):
if remote.kind == kinds.applKinds.master:
stack.removeRemote(remote, clear=True)
if refresh_all: # clear all remotes
for remote in six.itervalues(stack.remotes):
stack.removeRemote(remote, clear=True)
if refresh_all or refresh_masters:
stack.puid = stack.Uid # reset puid so reuse same uid each time
for master in self.ushers.value:
mha = master['external']
stack.addRemote(RemoteEstate(stack=stack,
fuid=0, # vacuous join
sid=0, # always 0 for join
ha=mha,
kind=kinds.applKinds.master))
for remote in six.itervalues(stack.remotes):
if remote.kind == kinds.applKinds.master:
stack.join(uid=remote.uid, timeout=0.0)
class SaltRaetRoadStackJoined(ioflo.base.deeding.Deed):
'''
Updates status with .joined of zeroth remote estate (master)
FloScript:
do salt raet road stack joined
go next if joined in .salt.road.manor.status
'''
Ioinits = odict(
inode=".salt.road.manor.",
stack='stack',
status=odict(ipath='status', ival=odict(joined=False,
allowed=False,
alived=False,
rejected=False,
idle=False, )))
def action(self, **kwa):
'''
Update .status share
'''
stack = self.stack.value
joined = False
if stack and isinstance(stack, RoadStack):
if stack.remotes:
joined = any([remote.joined for remote in six.itervalues(stack.remotes)
if remote.kind == kinds.applKinds.master])
self.status.update(joined=joined)
class SaltRaetRoadStackRejected(ioflo.base.deeding.Deed):
'''
Updates status with rejected of .acceptance of zeroth remote estate (master)
FloScript:
do salt raet road stack rejected
go next if rejected in .salt.road.manor.status
'''
Ioinits = odict(
inode=".salt.road.manor.",
stack='stack',
status=odict(ipath='status', ival=odict(joined=False,
allowed=False,
alived=False,
rejected=False,
idle=False, )))
def action(self, **kwa):
'''
Update .status share
'''
stack = self.stack.value
rejected = False
if stack and isinstance(stack, RoadStack):
if stack.remotes:
rejected = all([remote.acceptance == raeting.acceptances.rejected
for remote in six.itervalues(stack.remotes)
if remote.kind == kinds.applKinds.master])
else: # no remotes so assume rejected
rejected = True
self.status.update(rejected=rejected)
class SaltRaetRoadStackAllower(ioflo.base.deeding.Deed):
'''
Initiates allow (CurveCP handshake) transaction with master
FloScript:
do salt raet road stack allower at enter
'''
Ioinits = odict(
inode=".salt.road.manor.",
stack='stack', )
def action(self, **kwa):
'''
Receive any udp packets on server socket and put in rxes
Send any packets in txes
'''
stack = self.stack.value
if stack and isinstance(stack, RoadStack):
for remote in six.itervalues(stack.remotes):
if remote.kind == kinds.applKinds.master:
stack.allow(uid=remote.uid, timeout=0.0)
class SaltRaetRoadStackAllowed(ioflo.base.deeding.Deed):
'''
Updates status with .allowed of zeroth remote estate (master)
FloScript:
do salt raet road stack allowed
go next if allowed in .salt.road.manor.status
'''
Ioinits = odict(
inode=".salt.road.manor.",
stack='stack',
status=odict(ipath='status', ival=odict(joined=False,
allowed=False,
alived=False,
rejected=False,
idle=False, )))
def action(self, **kwa):
'''
Update .status share
'''
stack = self.stack.value
allowed = False
if stack and isinstance(stack, RoadStack):
if stack.remotes:
allowed = any([remote.allowed for remote in six.itervalues(stack.remotes)
if remote.kind == kinds.applKinds.master])
self.status.update(allowed=allowed)
class SaltRaetRoadStackManager(ioflo.base.deeding.Deed):
'''
Runs the manage method of RoadStack
FloScript:
do salt raet road stack manager
'''
Ioinits = odict(
inode=".salt.road.manor.",
stack='stack',
alloweds={'ipath': '.salt.var.presence.alloweds',
'ival': odict()},
aliveds={'ipath': '.salt.var.presence.aliveds',
'ival': odict()},
reapeds={'ipath': '.salt.var.presence.reapeds',
'ival': odict()},
availables={'ipath': '.salt.var.presence.availables',
'ival': set()},
changeds={'ipath': '.salt.var.presence.changeds',
'ival': odict(plus=set(), minus=set())},
event='.salt.event.events',)
def _fire_events(self):
stack = self.stack.value
if self.changeds.data.plus or self.changeds.data.minus:
# fire presence change event
data = {'new': list(self.changeds.data.plus),
'lost': list(self.changeds.data.minus)}
tag = tagify('change', 'presence')
route = {'dst': (None, None, 'event_fire'),
'src': (None, stack.local.name, None)}
msg = {'route': route, 'tag': tag, 'data': data}
self.event.value.append(msg)
# fire presence present event
data = {'present': list(self.aliveds.value)}
tag = tagify('present', 'presence')
route = {'dst': (None, None, 'event_fire'),
'src': (None, stack.local.name, None)}
msg = {'route': route, 'tag': tag, 'data': data}
self.event.value.append(msg)
def action(self, **kwa):
'''
Manage the presence of any remotes
availables is set of names of alive remotes which are also allowed
changeds is is share with two fields:
plus is set of names of newly available remotes
minus is set of names of newly unavailable remotes
alloweds is dict of allowed remotes keyed by name
aliveds is dict of alived remotes keyed by name
reapeds is dict of reaped remotes keyed by name
'''
stack = self.stack.value
if stack and isinstance(stack, RoadStack):
stack.manage(cascade=True)
# make copies
self.availables.value = set(self.stack.value.availables)
self.changeds.update(plus=set(self.stack.value.changeds['plus']))
self.changeds.update(minus=set(self.stack.value.changeds['minus']))
self.alloweds.value = odict(self.stack.value.alloweds)
self.aliveds.value = odict(self.stack.value.aliveds)
self.reapeds.value = odict(self.stack.value.reapeds)
console.concise(" Manage {0}.\nAvailables: {1}\nChangeds:\nPlus: {2}\n"
"Minus: {3}\nAlloweds: {4}\nAliveds: {5}\nReapeds: {6}\n".format(
stack.name,
self.availables.value,
self.changeds.data.plus,
self.changeds.data.minus,
self.alloweds.value,
self.aliveds.value,
self.reapeds.value))
self._fire_events()
class SaltRaetRoadStackPrinter(ioflo.base.deeding.Deed):
'''
Prints out messages on rxMsgs queue for associated stack
FloScript:
do raet road stack printer
'''
Ioinits = odict(
inode=".salt.road.manor.",
rxmsgs=odict(ipath='rxmsgs', ival=deque()),)
def action(self, **kwa):
'''
Queue up message
'''
rxMsgs = self.rxmsgs.value
while rxMsgs:
msg, name = rxMsgs.popleft()
console.terse("\nReceived....\n{0}\n".format(msg))
class SaltLoadModules(ioflo.base.deeding.Deed):
'''
Reload the minion modules
FloScript:
do salt load modules at enter
'''
Ioinits = {'opts': '.salt.opts',
'grains': '.salt.grains',
'modules': '.salt.loader.modules',
'grain_time': '.salt.var.grain_time',
'module_refresh': '.salt.var.module_refresh',
'returners': '.salt.loader.returners'}
def _prepare(self):
self._load_modules()
def action(self):
self._load_modules()
def _load_modules(self):
'''
Return the functions and the returners loaded up from the loader
module
'''
if self.grain_time.value is None:
self.grain_time.value = 0.0
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.value.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug(
'modules_max_memory set, enforcing a maximum of {0}'.format(
self.opts.value['modules_max_memory'])
)
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).get_memory_info()
mem_limit = rss + vms + self.opts.value['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.value.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
if time.time() - self.grain_time.value > 300.0 or self.module_refresh.value:
self.opts.value['grains'] = salt.loader.grains(self.opts.value)
self.grain_time.value = time.time()
self.grains.value = self.opts.value['grains']
self.modules.value = salt.loader.minion_mods(self.opts.value)
self.returners.value = salt.loader.returners(self.opts.value, self.modules.value)
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
self.module_refresh.value = False
class SaltLoadPillar(ioflo.base.deeding.Deed):
'''
Load up the initial pillar for the minion
do salt load pillar
'''
Ioinits = {'opts': '.salt.opts',
'pillar': '.salt.pillar',
'grains': '.salt.grains',
'modules': '.salt.loader.modules',
'pillar_refresh': '.salt.var.pillar_refresh',
'road_stack': '.salt.road.manor.stack',
'master_estate_name': '.salt.track.master_estate_name', }
def action(self):
'''
Initial pillar
'''
# default master is the first remote that is allowed
available_masters = [remote for remote in six.itervalues(self.road_stack.value.remotes)
if remote.allowed]
while not available_masters:
available_masters = [remote for remote in six.itervalues(self.road_stack.value.remotes)
if remote.allowed]
time.sleep(0.1)
random_master = self.opts.value.get('random_master')
if random_master:
master = available_masters[random.randint(0, len(available_masters) - 1)]
else:
master = available_masters[0]
self.master_estate_name.value = master.name
route = {'src': (self.road_stack.value.local.name, None, None),
'dst': (next(six.itervalues(self.road_stack.value.remotes)).name, None, 'remote_cmd')}
load = {'id': self.opts.value['id'],
'grains': self.grains.value,
'saltenv': self.opts.value['environment'],
'ver': '2',
'cmd': '_pillar'}
self.road_stack.value.transmit({'route': route, 'load': load},
uid=master.uid)
self.road_stack.value.serviceAll()
while True:
time.sleep(0.1)
while self.road_stack.value.rxMsgs:
msg, sender = self.road_stack.value.rxMsgs.popleft()
self.pillar.value = msg.get('return', {})
if self.pillar.value is None:
continue
self.opts.value['pillar'] = self.pillar.value
self.pillar_refresh.value = False
return
self.road_stack.value.serviceAll()
class SaltSchedule(ioflo.base.deeding.Deed):
'''
Evaluates the schedule
FloScript:
do salt schedule
'''
Ioinits = {'opts': '.salt.opts',
'grains': '.salt.grains',
'modules': '.salt.loader.modules',
'returners': '.salt.loader.returners'}
def _prepare(self):
'''
Map opts and make the schedule object
'''
self.modules.value = salt.loader.minion_mods(self.opts.value)
self.returners.value = salt.loader.returners(self.opts.value, self.modules.value)
self.schedule = salt.utils.schedule.Schedule(
self.opts.value,
self.modules.value,
self.returners.value)
def action(self):
'''
Eval the schedule
'''
self.schedule.eval()
class SaltRaetManorLaneSetup(ioflo.base.deeding.Deed):
'''
Only intended to be called once at the top of the manor house
Sets up the LaneStack for the main yard
FloScript:
do salt raet manor lane setup at enter
'''
Ioinits = {'opts': '.salt.opts',
'event_yards': '.salt.event.yards',
'local_cmd': '.salt.var.local_cmd',
'remote_cmd': '.salt.var.remote_cmd',
'publish': '.salt.var.publish',
'fun': '.salt.var.fun',
'worker_verify': '.salt.var.worker_verify',
'event': '.salt.event.events',
'event_req': '.salt.event.event_req',
'presence_req': '.salt.presence.event_req',
'workers': '.salt.track.workers',
'inode': '.salt.lane.manor.',
'stack': 'stack',
'local': {'ipath': 'local',
'ival': {'lanename': 'master'}},
}
def _prepare(self):
'''
Set up required objects and queues
'''
pass
def action(self):
'''
Run once at enter
'''
kind = self.opts.value['__role']
if kind not in kinds.APPL_KINDS:
emsg = ("Invalid application kind = '{0}' for manor lane.".format(kind))
log.error(emsg + "\n")
raise ValueError(emsg)
if kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.master],
kinds.APPL_KIND_NAMES[kinds.applKinds.syndic]]:
lanename = 'master'
elif kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.minion],
kinds.APPL_KIND_NAMES[kinds.applKinds.caller], ]:
role = self.opts.value.get('id', '')
if not role:
emsg = ("Missing role required to setup manor Lane.")
log.error(emsg + "\n")
raise ValueError(emsg)
lanename = "{0}_{1}".format(role, kind)
else:
emsg = ("Unsupported application kind = '{0}' for manor Lane.".format(kind))
log.error(emsg + '\n')
raise ValueError(emsg)
name = 'manor'
self.stack.value = LaneStack(
name=name,
lanename=lanename,
sockdirpath=self.opts.value['sock_dir'])
self.stack.value.Pk = raeting.packKinds.pack
self.event_yards.value = set()
self.local_cmd.value = deque()
self.remote_cmd.value = deque()
self.fun.value = deque()
self.event.value = deque()
self.event_req.value = deque()
self.presence_req.value = deque()
self.publish.value = deque()
self.worker_verify.value = salt.utils.rand_string()
if self.opts.value.get('worker_threads'):
worker_seed = []
for index in range(self.opts.value['worker_threads']):
worker_seed.append('worker{0}'.format(index + 1))
self.workers.value = itertools.cycle(worker_seed)
class SaltRaetLaneStackCloser(ioflo.base.deeding.Deed): # pylint: disable=W0232
'''
Closes lane stack server socket connection
FloScript:
do raet lane stack closer at exit
'''
Ioinits = odict(
inode=".salt.lane.manor",
stack='stack',)
def action(self, **kwa):
'''
Close uxd socket
'''
if self.stack.value and isinstance(self.stack.value, LaneStack):
self.stack.value.server.close()
class SaltRaetRoadStackService(ioflo.base.deeding.Deed):
'''
Process the udp traffic
FloScript:
do rx
'''
Ioinits = {
'road_stack': '.salt.road.manor.stack',
}
def action(self):
'''
Process inboud queues
'''
self.road_stack.value.serviceAll()
class SaltRaetRoadStackServiceRx(ioflo.base.deeding.Deed):
'''
Process the inbound Road traffic
FloScript:
do salt raet road stack service rx
'''
Ioinits = {
'road_stack': '.salt.road.manor.stack',
}
def action(self):
'''
Process inboud queues
'''
self.road_stack.value.serviceAllRx()
class SaltRaetRoadStackServiceTx(ioflo.base.deeding.Deed):
'''
Process the outbound Road traffic
FloScript:
do salt raet road stack service tx
'''
# Yes, this class is identical to RX, this is because we still need to
# separate out rx and tx in raet itself
Ioinits = {
'road_stack': '.salt.road.manor.stack',
}
def action(self):
'''
Process inbound queues
'''
self.road_stack.value.serviceAllTx()
class SaltRaetLaneStackServiceRx(ioflo.base.deeding.Deed):
'''
Process the inbound Lane traffic
FloScript:
do salt raet lane stack service rx
'''
Ioinits = {
'lane_stack': '.salt.lane.manor.stack',
}
def action(self):
'''
Process inboud queues
'''
self.lane_stack.value.serviceAllRx()
class SaltRaetLaneStackServiceTx(ioflo.base.deeding.Deed):
'''
Process the outbound Lane traffic
FloScript:
do salt raet lane stack service tx
'''
# Yes, this class is identical to RX, this is because we still need to
# separate out rx and tx in raet itself
Ioinits = {
'lane_stack': '.salt.lane.manor.stack',
}
def action(self):
'''
Process outbound queues
'''
self.lane_stack.value.serviceAllTx()
class SaltRaetRouter(ioflo.base.deeding.Deed):
'''
Routes the communication in and out of Road and Lane connections
This is a base class
'''
Ioinits = {'opts': '.salt.opts',
'local_cmd': '.salt.var.local_cmd',
'remote_cmd': '.salt.var.remote_cmd',
'publish': '.salt.var.publish',
'fun': '.salt.var.fun',
'event': '.salt.event.events',
'event_req': '.salt.event.event_req', # deque
'presence_req': '.salt.presence.event_req', # deque
'availables': '.salt.var.presence.availables', # set()
'workers': '.salt.track.workers',
'worker_verify': '.salt.var.worker_verify',
'lane_stack': '.salt.lane.manor.stack',
'road_stack': '.salt.road.manor.stack',
'master_estate_name': '.salt.track.master_estate_name',
'laters': {'ipath': '.salt.lane.manor.laters', # requeuing when not yet routable
'ival': deque()}}
def _process_road_rxmsg(self, msg, sender):
'''
Send to the right queue
msg is the message body dict
sender is the unique name of the remote estate that sent the message
'''
pass
def _process_lane_rxmsg(self, msg, sender):
'''
Send uxd messages tot he right queue or forward them to the correct
yard etc.
msg is message body dict
sender is unique name of remote that sent the message
'''
pass
def _get_master_estate_name(self, clustered=False):
'''
Assign and return the name of the estate for the default master or empty if none
If the default master is no longer available then selects one of the available
masters
If clustered is True then use load balancing algorithm to select master
'''
opts = self.opts.value
master = self.road_stack.value.nameRemotes.get(self.master_estate_name.value)
if not master or not master.alived: # select a different master
available_masters = [remote for remote in
six.Iterator(self.road_stack.value.remotes)
if remote.alived]
if available_masters:
random_master = opts.get('random_master')
if random_master:
master = available_masters[random.randint(0, len(available_masters) - 1)]
else:
master = available_masters[0]
else:
master = None
self.master_estate_name.value = master.name if master else ''
return self.master_estate_name.value
def _availablize(self, minions):
'''
Return set that is intersection of associated minion estates for
roles in minions and the set of available minion estates.
'''
suffix = '_{0}'.format(kinds.APPL_KIND_NAMES[kinds.applKinds.minion])
return list(set(minions) &
set((name.rstrip(suffix) for name in self.availables.value)))
def action(self):
'''
Process the messages!
'''
while self.road_stack.value.rxMsgs:
msg, sender = self.road_stack.value.rxMsgs.popleft()
self._process_road_rxmsg(msg=msg, sender=sender)
while self.laters.value: # process requeued LaneMsgs
msg, sender = self.laters.value.popleft()
self.lane_stack.value.rxMsgs.append((msg, sender))
while self.lane_stack.value.rxMsgs:
msg, sender = self.lane_stack.value.rxMsgs.popleft()
self._process_lane_rxmsg(msg=msg, sender=sender)
class SaltRaetRouterMaster(SaltRaetRouter):
'''
Routes the communication in and out of Road and Lane connections
Specific to Master
do salt raet router master
'''
def _process_road_rxmsg(self, msg, sender):
'''
Send to the right queue
msg is the message body dict
sender is the unique name of the remote estate that sent the message
'''
try:
s_estate, s_yard, s_share = msg['route']['src']
d_estate, d_yard, d_share = msg['route']['dst']
except (ValueError, IndexError):
log.error('Received invalid message: {0}'.format(msg))
return
if s_estate is None: # drop
return
log.debug("**** Road Router rxMsg **** id={0} estate={1} yard={2}\n"
" msg= {3}\n".format(
self.opts.value['id'],
self.road_stack.value.local.name,
self.lane_stack.value.local.name,
msg))
if d_estate is not None and d_estate != self.road_stack.value.local.name:
log.error(
'Road Router Received message for wrong estate: {0}'.format(d_estate))
return
if d_yard is not None:
# Meant for another yard, send it off!
if d_yard in self.lane_stack.value.nameRemotes:
self.lane_stack.value.transmit(msg,
self.lane_stack.value.nameRemotes[d_yard].uid)
return
if d_share is None:
# No queue destination!
log.error('Received message without share: {0}'.format(msg))
return
elif d_share == 'event_fire': # rebroadcast events from other masters
self.event.value.append(msg)
#log.debug("\n**** Event Fire \n {0}\n".format(msg))
return
elif d_share == 'local_cmd':
# Refuse local commands over the wire
log.error('Received local command remotely! Ignoring: {0}'.format(msg))
return
elif d_share == 'remote_cmd':
# Send it to a remote worker
if 'load' in msg:
role = self.road_stack.value.nameRemotes[sender].role
msg['load']['id'] = role # sender # should this be role XXXX
self.lane_stack.value.transmit(msg,
self.lane_stack.value.fetchUidByName(next(self.workers.value)))
def _process_lane_rxmsg(self, msg, sender):
'''
Send uxd messages tot he right queue or forward them to the correct
yard etc.
msg is message body dict
sender is unique name of remote that sent the message
'''
try:
s_estate, s_yard, s_share = msg['route']['src']
d_estate, d_yard, d_share = msg['route']['dst']
except (ValueError, IndexError):
log.error('Lane Router Received invalid message: {0}'.format(msg))
return
if s_yard is None:
return # drop message
if s_estate is None: # substitute local estate
s_estate = self.road_stack.value.local.name
msg['route']['src'] = (s_estate, s_yard, s_share)
log.debug("**** Lane Router rxMsg **** id={0} estate={1} yard={2}\n"
" msg={3}\n".format(
self.opts.value['id'],
self.road_stack.value.local.name,
self.lane_stack.value.local.name,
msg))
if d_estate is None:
pass
elif d_estate != self.road_stack.value.local.name:
# Forward to the correct estate
if d_estate in self.road_stack.value.nameRemotes:
self.road_stack.value.message(msg,
self.road_stack.value.nameRemotes[d_estate].uid)
return
if d_share == 'pub_ret':
# only publish to available minions
msg['return']['ret']['minions'] = self._availablize(msg['return']['ret']['minions'])
if msg.get('__worker_verify') == self.worker_verify.value:
self.publish.value.append(msg)
if d_yard is None:
pass
elif d_yard != self.lane_stack.value.local.name:
# Meant for another yard, send it off!
if d_yard in self.lane_stack.value.nameRemotes:
self.lane_stack.value.transmit(msg,
self.lane_stack.value.nameRemotes[d_yard].uid)
return
if d_share is None:
# No queue destination!
log.error('Lane Router Received message without share: {0}'.format(msg))
return
elif d_share == 'local_cmd':
self.lane_stack.value.transmit(msg,
self.lane_stack.value.fetchUidByName(next(self.workers.value)))
elif d_share == 'event_req':
self.event_req.value.append(msg)
#log.debug("\n**** Event Subscribe \n {0}\n".format(msg))
elif d_share == 'event_fire':
self.event.value.append(msg)
#log.debug("\n**** Event Fire \n {0}\n".format(msg))
elif d_share == 'presence_req':
self.presence_req.value.append(msg)
#log.debug("\n**** Presence Request \n {0}\n".format(msg))
class SaltRaetRouterMinion(SaltRaetRouter):
'''
Routes the communication in and out of Road and Lane connections
Specific to Minions
do salt raet router minion
'''
def _process_road_rxmsg(self, msg, sender):
'''
Send to the right queue
msg is the message body dict
sender is the unique name of the remote estate that sent the message
'''
try:
s_estate, s_yard, s_share = msg['route']['src']
d_estate, d_yard, d_share = msg['route']['dst']
except (ValueError, IndexError):
log.error('Received invalid message: {0}'.format(msg))
return
if s_estate is None: # drop
return
log.debug("**** Road Router rxMsg **** id={0} estate={1} yard={2}\n"
" msg= {3}\n".format(
self.opts.value['id'],
self.road_stack.value.local.name,
self.lane_stack.value.local.name,
msg))
if d_estate is not None and d_estate != self.road_stack.value.local.name:
log.error(
'Road Router Received message for wrong estate: {0}'.format(d_estate))
return
if d_yard is not None:
# Meant for another yard, send it off!
if d_yard in self.lane_stack.value.nameRemotes:
self.lane_stack.value.transmit(msg,
self.lane_stack.value.nameRemotes[d_yard].uid)
return
return
if d_share is None:
# No queue destination!
log.error('Received message without share: {0}'.format(msg))
return
elif d_share == 'fun':
if self.road_stack.value.kind == kinds.applKinds.minion:
self.fun.value.append(msg)
def _process_lane_rxmsg(self, msg, sender):
'''
Send uxd messages tot he right queue or forward them to the correct
yard etc.
msg is message body dict
sender is unique name of remote that sent the message
'''
try:
s_estate, s_yard, s_share = msg['route']['src']
d_estate, d_yard, d_share = msg['route']['dst']
except (ValueError, IndexError):
log.error('Lane Router Received invalid message: {0}'.format(msg))
return
if s_yard is None:
return # drop message
if s_estate is None: # substitute local estate
s_estate = self.road_stack.value.local.name
msg['route']['src'] = (s_estate, s_yard, s_share)
log.debug("**** Lane Router rxMsg **** id={0} estate={1} yard={2}\n"
" msg={3}\n".format(
self.opts.value['id'],
self.road_stack.value.local.name,
self.lane_stack.value.local.name,
msg))
if d_estate is None:
pass
elif d_estate != self.road_stack.value.local.name:
# Forward to the correct estate
if d_estate in self.road_stack.value.nameRemotes:
self.road_stack.value.message(msg,
self.road_stack.value.nameRemotes[d_estate].uid)
return
if d_yard is None:
pass
elif d_yard != self.lane_stack.value.local.name:
# Meant for another yard, send it off!
if d_yard in self.lane_stack.value.nameRemotes:
self.lane_stack.value.transmit(msg,
self.lane_stack.value.nameRemotes[d_yard].uid)
return
return
if d_share is None:
# No queue destination!
log.error('Lane Router Received message without share: {0}'.format(msg))
return
elif d_share == 'event_req':
self.event_req.value.append(msg)
#log.debug("\n**** Event Subscribe \n {0}\n".format(msg))
elif d_share == 'event_fire':
self.event.value.append(msg)
#log.debug("\n**** Event Fire \n {0}\n".format(msg))
elif d_share == 'remote_cmd': # assume minion to master or salt-call
if not self.road_stack.value.remotes:
log.error("**** Lane Router: Missing joined master. Unable to route "
"remote_cmd. Requeuing".format())
self.laters.value.append((msg, sender))
return
d_estate = self._get_master_estate_name(clustered=self.opts.get('cluster_mode', False))
if not d_estate:
log.error("**** Lane Router: No available destination estate for 'remote_cmd'."
"Unable to route. Requeuing".format())
self.laters.value.append((msg, sender))
return
msg['route']['dst'] = (d_estate, d_yard, d_share)
log.debug("**** Lane Router: Missing destination estate for 'remote_cmd'. "
"Using default route={0}.".format(msg['route']['dst']))
self.road_stack.value.message(msg,
self.road_stack.value.nameRemotes[d_estate].uid)
def _get_master_estate_name(self, clustered=False):
'''
Assign and return the name of the estate for the default master or empty if none
If the default master is no longer available then selects one of the available
masters
'''
opts = self.opts.value
master = self.road_stack.value.nameRemotes.get(self.master_estate_name.value)
if not master or not master.alived: # select a different master
available_masters = [remote for remote in
six.itervalues(self.road_stack.value.remotes)
if remote.alived]
if available_masters:
random_master = opts.get('random_master')
if random_master:
master = available_masters[random.randint(0, len(available_masters) - 1)]
else:
master = available_masters[0]
else:
master = None
self.master_estate_name.value = master.name if master else ''
return self.master_estate_name.value
def _availablize(self, minions):
'''
Return set that is intersection of associated minion estates for
roles in minions and the set of available minion estates.
'''
suffix = '_{0}'.format(kinds.APPL_KIND_NAMES[kinds.applKinds.minion])
return list(set(minions) &
set((name.rstrip(suffix) for name in self.availables.value)))
def action(self):
'''
Process the messages!
'''
while self.road_stack.value.rxMsgs:
msg, sender = self.road_stack.value.rxMsgs.popleft()
self._process_udp_rxmsg(msg=msg, sender=sender)
while self.laters.value: # process requeued LaneMsgs
msg, sender = self.laters.value.popleft()
self.lane_stack.value.rxMsgs.append((msg, sender))
while self.lane_stack.value.rxMsgs:
msg, sender = self.lane_stack.value.rxMsgs.popleft()
self._process_uxd_rxmsg(msg=msg, sender=sender)
class SaltRaetEventer(ioflo.base.deeding.Deed):
'''
Fire events!
FloScript:
do salt raet eventer
'''
Ioinits = {'opts': '.salt.opts',
'event_yards': '.salt.event.yards',
'event': '.salt.event.events',
'event_req': '.salt.event.event_req',
'module_refresh': '.salt.var.module_refresh',
'pillar_refresh': '.salt.var.pillar_refresh',
'lane_stack': '.salt.lane.manor.stack',
'road_stack': '.salt.road.manor.stack',
'availables': '.salt.var.presence.availables', }
def _register_event_yard(self, msg):
'''
register an incoming event request with the requesting yard id
'''
self.event_yards.value.add(msg['route']['src'][1])
def _forward_event(self, msg):
'''
Forward an event message to all subscribed yards
Event message has a route
'''
rm_ = []
if msg.get('tag') == 'pillar_refresh':
self.pillar_refresh.value = True
if msg.get('tag') == 'module_refresh':
self.module_refresh.value = True
for y_name in self.event_yards.value:
if y_name not in self.lane_stack.value.nameRemotes: # subscriber not a remote
rm_.append(y_name)
continue # drop msg don't publish
self.lane_stack.value.transmit(msg,
self.lane_stack.value.fetchUidByName(y_name))
self.lane_stack.value.serviceAll()
for y_name in rm_: # remove missing subscribers
self.event_yards.value.remove(y_name)
def action(self):
'''
Register event requests
Iterate over the registered event yards and fire!
'''
while self.event_req.value: # event subscription requests are msg with routes
self._register_event_yard(
self.event_req.value.popleft()
)
while self.event.value: # events are msgs with routes
self._forward_event(
self.event.value.popleft()
)
class SaltRaetEventerMaster(SaltRaetEventer):
'''
Fire events!
FloScript:
do salt raet eventer master
'''
def _forward_event(self, msg):
'''
Forward an event message to all subscribed yards
Event message has a route
Also rebroadcast to all masters in cluster
'''
super(SaltRaetEventerMaster, self)._forward_event(msg)
if self.opts.value.get('cluster_mode'):
if msg.get('origin') is None:
masters = (self.availables.value &
set((remote.name for remote in six.itervalues(self.road_stack.value.remotes)
if remote.kind == kinds.applKinds.master)))
for name in masters:
remote = self.road_stack.value.nameRemotes[name]
msg['origin'] = self.road_stack.value.name
s_estate, s_yard, s_share = msg['route']['src']
msg['route']['src'] = (self.road_stack.value.name, s_yard, s_share)
msg['route']['dst'] = (remote.name, None, 'event_fire')
self.road_stack.value.message(msg, remote.uid)
class SaltRaetPresenter(ioflo.base.deeding.Deed):
'''
Fire presence events!
FloScript:
do salt raet presenter
'''
Ioinits = {'opts': '.salt.opts',
'presence_req': '.salt.presence.event_req',
'lane_stack': '.salt.lane.manor.stack',
'alloweds': '.salt.var.presence.alloweds', # odict
'aliveds': '.salt.var.presence.aliveds', # odict
'reapeds': '.salt.var.presence.reapeds', # odict
'availables': '.salt.var.presence.availables', # set
}
def _send_presence(self, msg):
'''
Forward an presence message to all subscribed yards
Presence message has a route
'''
y_name = msg['route']['src'][1]
if y_name not in self.lane_stack.value.nameRemotes: # subscriber not a remote
pass # drop msg don't answer
else:
if 'data' in msg and 'state' in msg['data']:
state = msg['data']['state']
else:
state = None
# create answer message
if state in [None, 'available', 'present']:
present = odict()
for name in self.availables.value:
minion = self.aliveds.value.get(name, None)
present[name] = minion.ha[0] if minion else None
data = {'present': present}
else:
# TODO: update to really return joineds
states = {'joined': self.alloweds,
'allowed': self.alloweds,
'alived': self.aliveds,
'reaped': self.reapeds}
try:
minions = states[state].value
except KeyError:
# error: wrong/unknown state requested
log.error('Lane Router Received invalid message: {0}'.format(msg))
return
result = odict()
for name in minions:
result[name] = minions[name].ha[0]
data = {state: result}
tag = tagify('present', 'presence')
route = {'dst': (None, None, 'event_fire'),
'src': (None, self.lane_stack.value.local.name, None)}
msg = {'route': route, 'tag': tag, 'data': data}
self.lane_stack.value.transmit(msg,
self.lane_stack.value.fetchUidByName(y_name))
self.lane_stack.value.serviceAll()
def action(self):
'''
Register presence requests
Iterate over the registered presence yards and fire!
'''
while self.presence_req.value: # presence are msgs with routes
self._send_presence(
self.presence_req.value.popleft()
)
class SaltRaetPublisher(ioflo.base.deeding.Deed):
'''
Publish to the minions
FloScript:
do salt raet publisher
'''
Ioinits = {'opts': '.salt.opts',
'publish': '.salt.var.publish',
'stack': '.salt.road.manor.stack',
'availables': '.salt.var.presence.availables',
}
def _publish(self, pub_msg):
'''
Publish the message out to the targeted minions
'''
stack = self.stack.value
pub_data = pub_msg['return']
# only publish to available minions by intersecting sets
minions = (self.availables.value &
set((remote.name for remote in six.itervalues(stack.remotes)
if remote.kind in [kinds.applKinds.minion,
kinds.applKinds.syndic])))
for minion in minions:
uid = self.stack.value.fetchUidByName(minion)
if uid:
route = {
'dst': (minion, None, 'fun'),
'src': (self.stack.value.local.name, None, None)}
msg = {'route': route, 'pub': pub_data['pub']}
self.stack.value.message(msg, uid)
def action(self):
'''
Pop the publish queue and publish the requests!
'''
while self.publish.value:
self._publish(
self.publish.value.popleft()
)
class SaltRaetSetupBeacon(ioflo.base.deeding.Deed):
'''
Create the Beacon subsystem
'''
Ioinits = {'opts': '.salt.opts',
'beacon': '.salt.beacon'}
def action(self):
'''
Run the beacons
'''
self.beacon.value = salt.beacons.Beacon(self.opts.value)
class SaltRaetBeacon(ioflo.base.deeding.Deed):
'''
Run the beacons
'''
Ioinits = {'opts': '.salt.opts',
'modules': '.salt.loader.modules',
'master_events': '.salt.var.master_events',
'beacon': '.salt.beacon'}
def action(self):
'''
Run the beacons
'''
if 'config.merge' in self.modules.value:
b_conf = self.modules.value['config.merge']('beacons')
if b_conf:
try:
self.master_events.value.extend(self.beacon.value.process(b_conf))
except Exception:
log.error('Error in the beacon system: ', exc_info=True)
return []
class SaltRaetMasterEvents(ioflo.base.deeding.Deed):
'''
Take the events off the master event que and send them to the master to
be fired
'''
Ioinits = {'opts': '.salt.opts',
'road_stack': '.salt.road.manor.stack',
'master_events': '.salt.var.master_events'}
def postinitio(self):
self.master_events.value = deque()
def action(self):
if not self.master_events.value:
return
events = []
for master in self.road_stack.value.remotes:
master_uid = master
while self.master_events.value:
events.append(self.master_events.value.popleft())
route = {'src': (self.road_stack.value.local.name, None, None),
'dst': (next(six.itervalues(self.road_stack.value.remotes)).name, None, 'remote_cmd')}
load = {'id': self.opts.value['id'],
'events': events,
'cmd': '_minion_event'}
self.road_stack.value.transmit({'route': route, 'load': load},
uid=master_uid)
class SaltRaetNixJobber(ioflo.base.deeding.Deed):
'''
Execute a function call job on a minion on a *nix based system
FloScript:
do salt raet nix jobber
'''
Ioinits = {'opts_store': '.salt.opts',
'grains': '.salt.grains',
'modules': '.salt.loader.modules',
'returners': '.salt.loader.returners',
'fun': '.salt.var.fun',
'executors': '.salt.track.executors',
'road_stack': '.salt.road.manor.stack', }
def _prepare(self):
'''
Map opts for convenience
'''
self.opts = self.opts_store.value
self.matcher = salt.minion.Matcher(
self.opts,
self.modules.value)
self.proc_dir = salt.minion.get_proc_dir(self.opts['cachedir'])
self.serial = salt.payload.Serial(self.opts)
self.executors.value = {}
def _setup_jobber_stack(self):
'''
Setup and return the LaneStack and Yard used by the jobber yard
to communicate with the minion manor yard
'''
role = self.opts.get('id', '')
if not role:
emsg = ("Missing role required to setup Jobber Lane.")
log.error(emsg + "\n")
raise ValueError(emsg)
kind = self.opts['__role']
if kind not in kinds.APPL_KINDS:
emsg = ("Invalid application kind = '{0}' for Jobber lane.".format(kind))
log.error(emsg + "\n")
raise ValueError(emsg)
if kind == 'minion':
lanename = "{0}_{1}".format(role, kind)
else:
emsg = ("Unsupported application kind = '{0}' for Jobber Lane.".format(kind))
log.error(emsg + '\n')
raise ValueError(emsg)
sockdirpath = self.opts['sock_dir']
name = 'jobber' + nacling.uuid(size=18)
stack = LaneStack(
name=name,
lanename=lanename,
sockdirpath=sockdirpath)
stack.Pk = raeting.packKinds.pack
# add remote for the manor yard
stack.addRemote(RemoteYard(stack=stack,
name='manor',
lanename=lanename,
dirpath=sockdirpath))
console.concise("Created Jobber Stack {0}\n".format(stack.name))
return stack
def _return_pub(self, msg, ret, stack):
'''
Send the return data back via the uxd socket
'''
route = {'src': (self.road_stack.value.local.name, stack.local.name, 'jid_ret'),
'dst': (msg['route']['src'][0], None, 'remote_cmd')}
mid = self.opts['id']
ret['cmd'] = '_return'
ret['id'] = mid
try:
oput = self.modules.value[ret['fun']].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, str):
ret['out'] = oput
msg = {'route': route, 'load': ret}
stack.transmit(msg, stack.fetchUidByName('manor'))
stack.serviceAll()
def action(self):
'''
Pull the queue for functions to execute
'''
while self.fun.value:
msg = self.fun.value.popleft()
data = msg.get('pub')
match = getattr(
self.matcher,
'{0}_match'.format(
data.get('tgt_type', 'glob')
)
)(data['tgt'])
if not match:
continue
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data))
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
process = multiprocessing.Process(
target=self.proc_run,
kwargs={'msg': msg}
)
process.start()
process.join()
def proc_run(self, msg):
'''
Execute the run in a dedicated process
'''
data = msg['pub']
fn_ = os.path.join(self.proc_dir, data['jid'])
self.opts['__ex_id'] = data['jid']
salt.utils.daemonize_if(self.opts)
salt.transport.jobber_stack = stack = self._setup_jobber_stack()
# set up return destination from source
src_estate, src_yard, src_share = msg['route']['src']
salt.transport.jobber_estate_name = src_estate
salt.transport.jobber_yard_name = src_yard
sdata = {'pid': os.getpid()}
sdata.update(data)
with salt.utils.fopen(fn_, 'w+') as fp_:
fp_.write(self.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in self.modules.value:
try:
func = self.modules.value[data['fun']]
args, kwargs = salt.minion.load_args_and_kwargs(
func,
salt.utils.args.parse_input(data['arg']),
data)
sys.modules[func.__module__].__context__['retcode'] = 0
return_data = func(*args, **kwargs)
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, list):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify(
[data['jid'], 'prog', self.opts['id'], str(ind)],
'job')
event_data = {'return': single}
self._fire_master(event_data, tag) # Need to look into this
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = sys.modules[func.__module__].__context__.get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for {0!r} not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
except CommandExecutionError as exc:
log.error(
'A command in {0!r} had a problem: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
except SaltInvocationError as exc:
log.error(
'Problem executing {0!r}: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing {0!r}: {1}'.format(
function_name, exc
)
except TypeError as exc:
msg = ('TypeError encountered executing {0}: {1}. See '
'debug log for more info.').format(function_name, exc)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
else:
ret['return'] = '{0!r} is not available.'.format(function_name)
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
self._return_pub(msg, ret, stack)
if data['ret']:
ret['id'] = self.opts['id']
for returner in set(data['ret'].split(',')):
try:
self.returners.value['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
console.concise("Closing Jobber Stack {0}\n".format(stack.name))
stack.server.close()
salt.transport.jobber_stack = None
|
from application import create_app
from os import environ, path
from dotenv import load_dotenv
basedir = path.abspath(path.dirname(__file__))
load_dotenv(path.join(basedir, '.env'))
class Config:
"""Base config."""
STATIC_FOLDER = 'static'
TEMPLATES_FOLDER = 'templates'
SWAGGER_URL = '/api/docs' # URL for exposing Swagger UI (without trailing '/')
API_URL = '/static/swagger.yaml'
SECRET_KEY = environ.get('SECRET_KEY')
FLASK_ENV = environ.get('FLASK_ENV')
DEBUG = environ.get('DEBUG')
TESTING = environ.get('TESTING')
MONGODB_DB = environ.get('MONGODB_DB')
MONGODB_HOST = environ.get('MONGODB_HOST')
MONGODB_USERNAME = environ.get('MONGODB_USERNAME')
MONGODB_PASSWORD = environ.get('MONGODB_PASSWORD')
app = create_app(Config)
if __name__ == "__main__":
app.run(host='0.0.0.0') |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# Main
#------------------------------------------------------------------------------
def main():
# Get the number of particles per cell
number_particle_cell = get_number_particle_cell()
# Get the number of cells
number_cell = get_number_cell()
# Set the number of grids
number_grid = number_cell + 1
# Get cell size
cell_size = get_cell_size(number_cell)
# Get grid coordinates
grid_coordinate = get_grid_coordinate(number_grid, cell_size)
# Get distribution
distribution = get_distribution(number_particle_cell, \
number_cell, \
number_grid, \
cell_size, \
grid_coordinate)
# Plot distribution
plot_distribution(grid_coordinate, distribution)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Get cell size
#------------------------------------------------------------------------------
def get_cell_size(number_cell):
# Get cell size
cell_size = 1.0 / float(number_cell)
#Return
return cell_size
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Get distribution
#------------------------------------------------------------------------------
def get_distribution(number_particle_cell, \
number_cell, \
number_grid, \
cell_size, \
grid_coordinate):
# Import
from random import random
# Format
index = [0] * 2
length = [0] * 2
shape_factor = [0] * 2
distribution = [0] * number_grid
for counter in range(number_particle_cell * number_cell):
# Get position of particle
position = random()
# Get index
index[0] = int(position / cell_size)
index[1] = index[0] + 1
# Get length
length[0] = position - grid_coordinate[index[0]]
length[1] = - position + grid_coordinate[index[1]]
# Get shape factor
shape_factor[0] = length[1] / cell_size
shape_factor[1] = length[0] / cell_size
# Add shape factor to distribution
distribution[index[0]] += shape_factor[0]
distribution[index[1]] += shape_factor[1]
# Fix
distribution[ 0] *= 2.0
distribution[-1] *= 2.0
# Return
return distribution
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Get grid coordinates
#------------------------------------------------------------------------------
def get_grid_coordinate(number_grid, cell_size):
# Format
grid_coordinate = []
for counter in range(number_grid):
# Get grid coordinates
grid_coordinate.append(float(counter) * cell_size)
# Return
return grid_coordinate
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Get the number of cells
#------------------------------------------------------------------------------
def get_number_cell():
while True:
# Input
number_cell = input('The number of cells: ')
# Convert
number_cell = int(number_cell)
# Check
if (number_cell > 0):
# Break
break
else:
print('Invalid.')
# Return
return number_cell
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Get the number of particles per cell
#------------------------------------------------------------------------------
def get_number_particle_cell():
while True:
# Input
number_particle_cell = input('The number of particles per cell: ')
# Convert
number_particle_cell = int(number_particle_cell)
# Check
if (number_particle_cell > 0):
# Break
break
else:
print('Invalid.')
# Return
return number_particle_cell
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Plot distribution
#------------------------------------------------------------------------------
def plot_distribution(grid_coordinate, distribution):
# Import
import matplotlib.pyplot as plt
from statistics import mean, stdev
# Font
plt.rcParams['font.family'] = 'FreeSans'
# Get average
average = mean(distribution)
# Get standard deviation
standard_deviation = stdev(distribution)
# Make new window
fig, ax = plt.subplots()
# Plot
plt.axhspan(average - standard_deviation, \
average + standard_deviation, \
alpha = 0.2, \
label = 'Standard deviation', \
color = 'gray')
plt.axhline(average, color = 'orange', label = 'Average')
plt.plot(grid_coordinate, distribution, '.-', label = 'Particle')
# Set axes label
ax.set_xlabel('Coordinate')
ax.set_ylabel('The number of particles')
# Set legend
plt.legend()
# Save figure
plt.savefig('shape-factor-particle.pdf', bbox_inches = 'tight')
# Close figure
plt.close()
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Call main
#------------------------------------------------------------------------------
if __name__ == '__main__':
main()
#------------------------------------------------------------------------------
|
import json
import urllib
import urllib.request
import remoteconsole.client_input
def send_request(server_address, request_data):
ip, port = server_address
console_url = 'http://{}:{}'.format(ip, port)
request_body = json.dumps(request_data).encode('utf-8')
request = urllib.request.Request(
console_url,
data=request_body,
headers={'Content-Type': 'application/json'}
)
response = urllib.request.urlopen(request)
response_code = response.getcode()
response_body = response.read().decode('utf-8')
response_data = json.loads(response_body)
if request_data['type'] == 'run':
handler_output = response_data.get('handler_output')
if handler_output:
print(handler_output, end='')
return response_data
def run_once(server_address, input, more):
if more:
s = input('... ')
else:
s = input('>>> ')
if s == '\x1A': # Ctrl-Z on win32
raise EOFError
request_data = {
'type': 'run',
'line': s
}
response_data = send_request(server_address, request_data)
more = response_data.get('more', False)
return more
def run_repl(server_address):
input = remoteconsole.client_input.setup(send_request, server_address)
more = False
while True:
try:
more = run_once(server_address, input, more)
except KeyboardInterrupt:
print('KeyboardInterrupt')
except EOFError:
break
def parse_address():
import sys
argn = len(sys.argv)
ip = '127.0.0.1'
port = 9000
if argn >= 2:
ip = sys.argv[1]
if argn >= 3:
port = int(sys.argv[2])
server_address = (ip, port)
return server_address
def main():
server_address = parse_address()
run_repl(server_address)
if __name__ == '__main__':
main()
|
import pandas as pb
data = pb.read_csv("products.csv")
data = data.groupby("product_name")
|
'''8. Матрица 5x4 заполняется вводом с клавиатуры кроме последних
элементов строк. Программа должна вычислять сумму введенных элементов
каждой строки и записывать в последнюю ячейку строки. В конце следует
вывести полученную матрицу.'''
matrix = []
for i in range(4):
matrix.append([])
sum = 0
for n in range(4):
user_number = int(input(f'Введите элемент {i+1} и {n+1} столбца: '))
sum += user_number
matrix[i].append(user_number)
matrix[i].append(sum)
for a in matrix:
print(('{:>4d}' * 5).format(*a))
|
from __future__ import absolute_import, division, print_function
from dask.utils import Dispatch
is_device_object = Dispatch(name="is_device_object")
@is_device_object.register(object)
def is_device_object_default(o):
return hasattr(o, "__cuda_array_interface__")
@is_device_object.register(list)
@is_device_object.register(tuple)
@is_device_object.register(set)
@is_device_object.register(frozenset)
def is_device_object_python_collection(seq):
return any([is_device_object(s) for s in seq])
@is_device_object.register(dict)
def is_device_object_python_dict(seq):
return any([is_device_object(s) for s in seq.items()])
@is_device_object.register_lazy("cudf")
def register_cudf():
import cudf
@is_device_object.register(cudf.DataFrame)
def is_device_object_cudf_dataframe(df):
return True
@is_device_object.register(cudf.Series)
def is_device_object_cudf_series(s):
return True
@is_device_object.register(cudf.BaseIndex)
def is_device_object_cudf_index(s):
return True
|
import flask_jwtlogin as jwtl
from flask import Flask, request, jsonify, abort
from threading import Thread
from requests import get, post
from json import loads
import time
app = Flask(__name__)
app.config.update({
'JWT_HEADER_NAME': 'access-token',
'JWT_SECRET_KEY': 'you will never guess me',
'JWT_ENCODING_ALGORITHM': 'HS256',
'JWT_LIFETIME': 3600 * 24 * 7
})
login_manager = jwtl.JWTLogin()
login_manager.init_app(app)
assert all([i in login_manager.config.values() for i in app.config.values()]) # check singleton and configs
@app.route('/')
def hello():
"""Simple undecorated route"""
return 'Hello world'
@app.route('/test_anonymous/')
def test_anonymous():
if jwtl.current_user.is_anonymous:
abort(403)
@app.route('/jwt/')
@login_manager.jwt_required
def hello_jwt():
"""Sample View with needed jwt in request"""
assert app.config.get('JWT_HEADER_NAME') in request.headers
return 'Succeeded'
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/shutdown/', methods=['POST'])
def shutdown():
shutdown_server()
return 'Server shutting down...'
@app.route('/get-token/<name>')
def get_token(name):
"""Sample view which returns jwt"""
for i in user_storage:
if i.name == name:
return jsonify(login_manager.generate_jwt_token(i.identifier))
abort(401)
@app.route('/current_user_test/')
@login_manager.jwt_required
def test_current_user():
return jwtl.current_user.identifier
@app.route('/login/')
@login_manager.jwt_required
def login():
"""View that loads user from jwt present in request"""
user = login_manager.load_user()
return user.identifier
def start_app_in_another_thread(app_instance):
app_instance.run()
class User(jwtl.KnownUser):
"""Example of class representing user"""
def __init__(self, name, age, identifier):
self.name = name
self.age = age
self.identifier = identifier
user_storage = [
User("Tom", 22, "AF5F123"),
User("Jim", 25, "FFF1832"),
User("Peter", 18, "CB0CA931")]
@login_manager.user_loader
def load_user(identifier):
"""example of user loader function"""
for i in user_storage:
if i.identifier == identifier:
return i
if __name__ == '__main__':
thread = Thread(target=start_app_in_another_thread, args=(app, ))
thread.start()
time.sleep(2)
for i in user_storage:
r_get = get('http://127.0.0.1:5000/get-token/{}'.format(i.name))
token = loads(r_get.text)
r_login = get('http://127.0.0.1:5000/login/', headers=token)
assert r_login.text == i.identifier
current_user_req = get('http://127.0.0.1:5000/current_user_test/', headers=token)
assert current_user_req.text == i.identifier
assert get('http://127.0.0.1:5000/test_anonymous/').status_code == 403
assert get('http://127.0.0.1:5000/').status_code == 200
assert get('http://127.0.0.1:5000/').text == 'Hello world'
assert get('http://127.0.0.1:5000/jwt/').status_code == 401
assert get('http://127.0.0.1:5000/jwt/', headers={app.config.get('JWT_HEADER_NAME'): 'random'}).status_code == 401
for i in user_storage:
assert get('http://127.0.0.1:5000/get-token/{}'.format(i.name)).status_code == 200
assert get('http://127.0.0.1:5000/get-token/Unknown_user').status_code == 401
for i in user_storage:
token_json = loads(get('http://127.0.0.1:5000/get-token/{}'.format(i.name)).text)
user_identifier = get('http://127.0.0.1:5000/login/', headers=token_json).text
assert i.identifier == user_identifier
post('http://127.0.0.1:5000/shutdown/')
|
#!/usr/bin/python
# 789234
import getpass
import sys
import telnetlib
HOST = "telephone"
user = "administrator"
password = "789234"
tn = telnetlib.Telnet(HOST)
tn.read_until("Login: ")
tn.write(user)
tn.write("\r")
if password:
tn.read_until("Password: ")
tn.write(password)
tn.write("\r")
tn.read_until("[administrator]# ")
tn.write("uiusim handsfree\n")
tn.read_until("[administrator]# ")
tn.write("exit\n")
|
# Generated by Django 3.2.3 on 2021-06-05 05:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('opportunities', '0002_alter_opportunity_options'),
]
operations = [
migrations.AlterModelOptions(
name='opportunity',
options={'verbose_name_plural': 'Opportunity'},
),
migrations.AlterField(
model_name='opportunity',
name='Opportunities_description',
field=models.TextField(verbose_name='Description'),
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import os
import unittest
from glob import glob
from flowgen import language
from pypeg2 import parse, some
class PEGMixin(unittest.TestCase):
def assertPEG(self, obj, cls, value=None):
self.assertIsInstance(obj, cls)
self.assertEqual(obj, value)
def assertConditionEqual(self, obj, name, condition):
self.assertIsInstance(obj, language.Condition)
self.assertEqual(obj.name, name)
self.assertEqual(obj.condition, condition)
class InstructionTestCase(PEGMixin, unittest.TestCase):
def _test_instructions(self, case, result):
tree = parse(case, language.Instruction)
self.assertPEG(tree, language.Instruction, result)
def test_instructions_parse(self):
self._test_instructions('Welcome to code2flow;', 'Welcome to code2flow')
self._test_instructions('Some text!;', 'Some text!')
class ConditionTestCase(PEGMixin, unittest.TestCase):
def test_basic_while(self):
tree = parse("""while (my_condition) {
instruction;
}""", language.Condition)
self.assertConditionEqual(tree, 'while', "my_condition")
self.assertPEG(tree[0], language.Instruction, "instruction")
def test_basic_if(self):
tree = parse("""if (example && aa) {
instruction;
}""", language.Condition)
self.assertConditionEqual(tree, 'if', "example && aa")
self.assertPEG(tree[0], language.Instruction, "instruction")
def test_single_line_condition(self):
tree = parse("if (cond) instruction;", language.Condition)
self.assertConditionEqual(tree, 'if', "cond")
self.assertPEG(tree[0], language.Instruction, "instruction")
def test_condition_with_multiline_comment(self):
tree = parse("""if (my_condition) {
code;
/* XXX */
}""", language.Condition)
self.assertConditionEqual(tree, 'if', "my_condition")
self.assertPEG(tree[0], language.Instruction, "code")
self.assertPEG(tree[1], language.Comment, "/* XXX */")
def test_condition_with_multiline_comment_in_multi_lines(self):
tree = parse("""if (my_condition) {
code;
/* XXX
xxx
*/
}""", language.Condition)
self.assertConditionEqual(tree, 'if', "my_condition")
self.assertPEG(tree[0], language.Instruction, "code")
self.assertPEG(tree[1], language.Comment, """/* XXX
xxx
*/""")
def test_nested_condition(self):
tree = parse("""if(my_condition) {
while(nested) {
code;
}
}""", language.Condition)
self.assertConditionEqual(tree, 'if', "my_condition")
self.assertConditionEqual(tree[0], 'while', "nested")
self.assertEqual(tree[0][0], "code")
class CommentUnitTestCase(PEGMixin, unittest.TestCase):
def test_plain_multiline_comment(self):
tree = parse("""/* foo
bar */
""", language.Comment)
self.assertPEG(tree, language.Comment, """/* foo
bar */""")
def test_plain_end_line_comment(self):
tree = parse("""// foo""", language.Comment)
self.assertPEG(tree, language.Comment, "foo")
class CodeUnitTestCase(PEGMixin, unittest.TestCase):
heading = """Welcome to code2flow;
"""
condition = """if(In doubt?) {
Press Help;
while(!Ready?)
Read help;
}
"""
comment = """//the preview updates
//as you write"""
footer = "Improve your workflow!;"""
def test_heading(self):
parse(self.heading, some(language.Instruction))
parse(self.heading, language.Code)
def test_condition(self):
parse(self.condition, some(language.Condition))
parse(self.condition, language.Code)
def test_comment(self):
parse(self.comment, some(language.Comment))
parse(self.comment, language.Code)
def test_footer(self):
parse(self.footer, some(language.Instruction))
parse(self.footer, language.Code)
def test_concat(self):
parse(self.heading + self.condition + self.comment + self.footer, language.Code)
def test_ignore_condition_in_comment(self):
tree = parse("""// foo if(cond) instruction;
// bar""", language.Code)
self.assertPEG(tree[0], language.Comment, "foo if(cond) instruction;")
self.assertPEG(tree[1], language.Comment, "bar")
def test_condition_with_end_line_comment(self):
tree = parse("""if (my_condition) {
code;
};// simple comment""", language.Code)
self.assertConditionEqual(tree[0], 'if', "my_condition")
self.assertPEG(tree[0][0], language.Instruction, 'code')
self.assertPEG(tree[1], language.Comment, 'simple comment')
def test_condition_with_multiple_end_line_comments(self):
tree = parse("""if (my_condition) {
code;
}; // simple comment
// second comment
""", language.Code)
self.assertConditionEqual(tree[0], 'if', 'my_condition')
self.assertPEG(tree[1], language.Comment, 'simple comment')
self.assertPEG(tree[2], language.Comment, 'second comment')
def test_empty_string(self):
parse("", language.Code)
def _get_root_dir(self):
return os.path.join(os.path.dirname(__file__), '..')
def test_parse_examples(self):
path = os.path.join(self._get_root_dir(), 'examples', '*.txt')
files = glob(path)
for file in files:
with open(file, 'r') as fp:
parse(fp.read(), language.Code)
|
import json
import pandas as pd
class Usuario:
def __init__(self, json_usuario=''):
if not json_usuario:
self.email = ''
self.preferencias = {}
else:
self.email = json.dumps(json_usuario["email"]).strip('"')
self.preferencias = {}
json_preferencias = json_usuario["preference"]
for json_preferencia in json_preferencias:
chave = json_preferencia.strip('"')
valor = float(json.dumps(json_preferencias[json_preferencia]))
self.preferencias[chave] = valor
def __str__(self):
ret = '\nEmail: {0}\n Preference:\n'
ret = ret.format(self.email)
str_prefs = ''
for preferencia in self.preferencias.keys():
str_prefs += '{}: {}\n'
str_prefs = str_prefs.format(preferencia, self.preferencias[preferencia])
ret += str_prefs
return ret
def __eq__(self, other):
return self.email == other.email
def to_json(self):
return json.dumps(self.__dict__)
def get_lista_preferencias(self):
ret = []
for preferencia in self.preferencias.keys():
ret.append(self.preferencias[preferencia])
return ret
def to_series(self):
dict_preferencias = {}
for nome_preferencia in self.preferencias.keys():
dict_preferencias[nome_preferencia+'_usr'] = self.preferencias[nome_preferencia]
series_form = pd.Series(data=dict_preferencias)
return series_form
|
import datetime
import pandas as pd
from iexfinance.base import _IEXBase
class ReferenceData(_IEXBase):
@property
def url(self):
return "ref-data/%s" % self.endpoint
@property
def endpoint(self):
raise NotImplementedError
class TradingDatesReader(ReferenceData):
"""
Base class to retrieve trading holiday information
"""
def __init__(self, type_, direction=None, last=1, startDate=None, **kwargs):
if isinstance(startDate, datetime.date) or isinstance(
startDate, datetime.datetime
):
self.startDate = startDate.strftime("%Y%m%d")
else:
self.startDate = startDate
self.type = type_
if direction not in ("next", "last"):
raise ValueError("direction must be either next or last")
self.direction = direction
self.last = last
super(TradingDatesReader, self).__init__(**kwargs)
@property
def endpoint(self):
ret = "us/dates/%s/%s/%s" % (self.type, self.direction, self.last)
if self.startDate:
ret += "/%s" % self.startDate
return ret
def _format_output(self, out, format=None):
out = [{k: pd.to_datetime(v) for k, v in day.items()} for day in out]
return super(TradingDatesReader, self)._format_output(out)
class Symbols(ReferenceData):
@property
def endpoint(self):
return "symbols"
class IEXSymbols(ReferenceData):
@property
def endpoint(self):
return "iex/symbols"
class Exchanges(ReferenceData):
@property
def endpoint(self):
return "exchanges"
class InternationalSymbols(ReferenceData):
def __init__(self, exchange, region, **kwargs):
self.exchange = exchange
self.region = region
super(InternationalSymbols, self).__init__(**kwargs)
@property
def url(self):
if self.exchange is not None:
return "/ref-data/exchange/%s/symbols" % self.exchange
elif self.region is not None:
return "/ref-data/region/%s/symbols" % self.region
def fetch(self):
return super(InternationalSymbols, self).fetch()
def _convert_output(self, out):
return pd.DataFrame(out, index=[out["symbol"]])
|
def producerColor():
with open("todaysTemMax.txt", "r+") as file:
content = file.readline().splitlines()
for i in content:
i = int(i)
if i < 15:
return 'blue'
elif i > 15:
return 'yellow' |
import requests
import scrapbox
def make_request(method, url, params):
request_func = getattr(requests, method, None)
if request_func is None:
raise TypeError('Unknown method: %s' % (method,))
kwargs = {
'headers': {
'User-Agent': scrapbox.USER_AGENT
}
}
params = dict(params, **kwargs)
result = request_func(url, params)
return result |
from grainy.core import PermissionSet, Namespace
class GrainyHandler:
"""
The base class to use for the Grainy Meta class
"""
parent = None
namespace_base = None
namespace_instance_template = "{namespace}.{instance}"
@classmethod
def namespace_instance(cls, instance, **kwargs):
"""
Returns the permissioning namespace for the passed instance
Arguments:
- instance <object|str|Namespace>: the value of this will be appended
to the base namespace and returned
Keyword Arguments:
- any keyword arguments will be used for formatting of the
namespace
Returns:
- unicode: namespace
"""
if not isinstance(cls.namespace_base, Namespace):
raise ValueError("`namespace_base` needs to be a Namespace instance")
template = cls.namespace_instance_template
if instance == "*":
if "id" not in kwargs:
kwargs.update(id="*")
template = template.replace("{instance.","{")
if kwargs.get("pk") is None:
kwargs.update(pk=kwargs.get("id"))
return template.format(
namespace=str(cls.namespace_base).format(**kwargs),
instance=instance,
**kwargs,
).lower()
@classmethod
def namespace(cls, instance=None, **kwargs):
"""
Wrapper function to return either the result of namespace_base or
namespace instance depending on whether or not a value was passed in
`instance`
All keyword arguments will be available while formatting the
namespace string.
Keyword Arguments:
- instance <object|str|Namespace>: the value of this will be appended
Returns:
- unicode
"""
if instance:
return cls.namespace_instance(instance, **kwargs)
namespace = f"{cls.namespace_base}"
if kwargs:
namespace = namespace.format(**kwargs)
return namespace.lower()
@classmethod
def set_namespace_base(cls, value):
if not isinstance(value, Namespace):
raise TypeError("`value` needs to be a Namespace instance")
cls.namespace_base = value
@classmethod
def set_parent(cls, parent):
cls.parent = parent
class GrainyModelHandler(GrainyHandler):
"""
grainy model handler meta class
"""
model = None
namespace_instance_template = "{namespace}.{instance.pk}"
@classmethod
def set_parent(cls, model):
cls.parent = model
cls.model = model
cls.set_namespace_base(
Namespace([model._meta.app_label, model._meta.object_name])
)
class GrainyMixin:
@property
def grainy_namespace(self):
return self.Grainy.namespace(self)
|
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
#Code starts here
data=pd.read_csv(path)
data.rename(columns={'Total':'Total_Medals'},inplace=True)
data.head(10)
# --------------
#Code starts here
condition_1=data['Total_Summer']>data['Total_Winter']
condition_2=data['Total_Summer']==data['Total_Winter']
data['Better_Event']=np.where(condition_1, 'Summer',(np.where(condition_2, 'Both', 'Winter')))
better_event=data['Better_Event'].value_counts().idxmax()
print(better_event)
# --------------
#Code starts here
top_countries= data.filter(['Country_Name','Total_Summer', 'Total_Winter','Total_Medals'], axis=1)
top_countries.drop(index=len(top_countries)-1,axis=0,inplace=True)
def top_ten(top_countries,column):
l=top_countries.nlargest(10,column)
country_list=list(l['Country_Name'])
return country_list
top_10_summer=top_ten(top_countries,"Total_Summer")
top_10_winter=top_ten(top_countries,"Total_Winter")
top_10=top_ten(top_countries,"Total_Medals")
print(top_10_summer)
print(top_10_winter)
print(top_10)
common=[x for x in top_10_summer if x in top_10_winter and x in top_10]
print(common)
# --------------
#Code starts here
summer_df=data[data["Country_Name"].isin(top_10_summer)]
winter_df=data[data["Country_Name"].isin(top_10_winter)]
top_df=data[data["Country_Name"].isin(top_10)]
fig, (ax_1,ax_2,ax_3)=plt.subplots(3,1,figsize=(20,10))
summer_df.plot(kind='bar',ax=ax_1)
winter_df.plot(kind='bar',ax=ax_2)
top_df.plot(kind='bar',ax=ax_3)
# --------------
#Code starts here
#for summer
summer_df['Golden_Ratio']=summer_df['Gold_Summer']/summer_df['Total_Summer']
summer_max_ratio=summer_df['Golden_Ratio'].max()
c1=summer_df.loc[summer_df['Golden_Ratio'] == summer_max_ratio]
summer_country_gold=c1.iloc[0]["Country_Name"]
print(summer_country_gold)
#for winter
winter_df['Golden_Ratio']=winter_df['Gold_Winter']/winter_df['Total_Winter']
winter_max_ratio=winter_df['Golden_Ratio'].max()
c2=winter_df.loc[winter_df['Golden_Ratio'] == winter_max_ratio]
winter_country_gold=c2.iloc[0]["Country_Name"]
print(winter_country_gold)
#for total
top_df['Golden_Ratio']=top_df['Gold_Total']/top_df['Total_Medals']
top_max_ratio=top_df['Golden_Ratio'].max()
c3=top_df.loc[top_df['Golden_Ratio'] == top_max_ratio]
top_country_gold=c3.iloc[0]["Country_Name"]
print(top_country_gold)
# --------------
#Code starts here
data_1=data.drop(index=len(data)-1,axis=0)
#update
data_1['Total_Points']=data_1['Gold_Total']*3+data_1['Silver_Total']*2+data_1['Bronze_Total']
most_points=data_1['Total_Points'].max()
best_country=data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']
print("THE BEST COUNTRY IS:",best_country,"WITH",most_points,"POINTS")
# --------------
#Code starts here
best=data[data['Country_Name']==best_country]
best=best[['Gold_Total','Silver_Total','Bronze_Total']]
print(best)
best.plot(kind='bar', stacked=True, figsize=(15,10))
plt.xlabel("United States")
plt.ylabel("Medals")
plt.xticks(rotation=45)
|
'''
Using tuple to save numbers and show it later
'''
num = (int(input('Type a number: ')),
int(input('Type other number: ')),
int(input('Type another number: ')),
int(input('Type the last number: ')))
print('The pair numbers were: ',end='')
for n in num:
if n % 2 == 0:
print(n, end=' ')
print(f'\nAltogether {num.count(9)} number(s) 9 were typed. ')
if 3 in num:
print(f'The first number 3 is on the {num.index(3)+1}ª position.')
else:
print("The number 3 didn't appeared in any position.")
|
import quokka
from load_image import load_image
fb = load_image('NCSS-logo-68x50.qimz')
quokka.display.fill(1)
quokka.display.blit(fb, 30, 7)
quokka.display.show()
c = 1
while True:
quokka.sleep(1500)
quokka.display.invert(c)
quokka.display.show()
c = (c + 1) % 2
|
from captcha.fields import CaptchaField
from django import forms
from django.db.models import fields
from django.db.models.base import Model
from django.db.models.fields import CharField, DateTimeField
from django.forms import ModelForm, widgets
from django.forms.fields import ChoiceField, ImageField
from django.forms.models import ModelChoiceField
from django.forms.widgets import ChoiceWidget, EmailInput, Select, Textarea, TextInput
from index.models import Course, CourseNotice, Users
class userForm(forms.Form):
username = forms.CharField(label="用户名", max_length=128, widget=forms.TextInput)
password = forms.CharField(label="密码", max_length=256, widget=forms.PasswordInput)
re_password = forms.CharField(
label="重复密码", max_length=256, widget=forms.PasswordInput
)
email = forms.EmailField(label="电子邮箱")
captcha = CaptchaField(label="验证码")
class settingForm(ModelForm):
class Meta:
model = Users
exclude = ["access", "selected_course", "create_time", "is_deleted"]
widgets = {
"name": TextInput(attrs={"class": "form-control"}),
"real_name": TextInput(attrs={"class": "form-control"}),
"email": EmailInput(attrs={"class": "form-control"}),
"phone_number": TextInput(attrs={"class": "form-control"}),
"sex": Select(attrs={"class": "form-control"}),
}
class CourseSettingForm(ModelForm):
Course_Img = forms.ImageField(
label=("课程图片"), required=False, widget=forms.FileInput
)
def __init__(self, *args, **kwargs):
super(CourseSettingForm, self).__init__(*args, **kwargs)
for field in iter(self.fields):
if field == "Course_Img":
self.fields[field].widget.attrs.update(
{
"id": "updateImg",
"class": "file-loading",
"data-browse-on-zone-click": "true",
}
)
else:
self.fields[field].widget.attrs.update(
{
"class": "form-control col-sm-10",
}
)
class Meta:
model = Course
exclude = [
"Status",
"Course_Teacher",
"Stu_Count",
"Course_Chapter",
"View_Count",
]
class NoticeForm(ModelForm):
def __init__(self, *args, **kwargs):
super(NoticeForm, self).__init__(*args, **kwargs)
for field in iter(self.fields):
self.fields[field].widget.attrs.update(
{
"class": "form-control col-sm-10",
}
)
class Meta:
model = CourseNotice
exclude = [
"sourceCourse",
]
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019~2999 - Cologler <skyoflw@gmail.com>
# ----------
#
# ----------
from utils import assert_example
def test_build_class_empty():
def func():
class A:
pass
assert_example(func)
def test_build_class_with_methods():
def func():
class A:
def f():
pass
def s():
pass
assert_example(func)
def test_build_class_with_metaclass():
def func():
class A(metaclass=type):
pass
assert_example(func)
def test_build_class_with_bases():
def func():
class A(C, D, E):
pass
assert_example(func)
|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.contrib.staticfiles.urls import static, staticfiles_urlpatterns
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('account/', include('App_Login.urls')),
path('blog/', include('App_Blog.urls')),
path('', views.index, name='index')
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
def intersect(*args):
res = []
for x in args[0]: # Scan first sequence
if x in res: continue # Skip duplicates
for other in args[1:]: # For all other args
if x not in other: break # Item in each one?
else: # No: break out of loop
res.append(x) # Yes: add items to end
return res
def union(*args):
res = []
for seq in args: # For all args
for x in seq: # For all nodes
if not x in res:
res.append(x) # Add new items to result
return res
|
"""
*Select*
"""
import jax.numpy as jnp
from ._operator import Manipulation
__all__ = ["Select"]
class Select(
jnp.select,
Manipulation,
):
def __init__(
self,
*args,
**kwargs,
):
super(Select, self).__init__(
*args,
**kwargs,
)
|
# -*- coding: utf-8 -*-
import re
import logging
from distutils.cmd import Command
from cmds.helper import color
from hangulize import hangulize, get_lang, HangulizeError, \
DONE, SPECIAL, BLANK, ZWSP
class REPLHandler(logging.StreamHandler):
color_map = {'hangulize': 'cyan', 'rewrite': 'green', 'remove': 'red'}
@staticmethod
def readably(string):
string = string.replace(DONE, '.')
string = string.replace(SPECIAL, '#')
string = re.sub('^' + BLANK + '|' + BLANK + '$', '', string)
string = re.sub(ZWSP, '\r', string)
string = re.sub(BLANK, ' ', string)
string = re.sub('\r', ZWSP, string)
return string
def handle(self, record):
msg = self.readably(record.msg)
# keywords
maxlen = max([len(x) for x in self.color_map.keys()])
def deco(color_name):
def replace(m):
pad = ' ' * (maxlen - len(m.group(1)))
return color(m.group(1), color_name) + pad
return replace
for keyword, color_name in self.color_map.items():
msg = re.sub(r'(?<=\t)(%s)' % keyword, deco(color_name), msg)
# result
msg = re.sub(r'(?<=^\=\>)(.*)$', color(r'\1', 'yellow'), msg)
# step
msg = re.sub(r'^(>>|\.\.)', color(r'\1', 'blue'), msg)
msg = re.sub(r'^(=>)', color(r'\1', 'magenta'), msg)
# arrow
msg = re.sub(r'(->)(?= [^ ]+$)', color(r'\1', 'black'), msg)
record.msg = msg
return logging.StreamHandler.handle(self, record)
class repl(Command):
"""Read-eval-print loop for Hangulize
$ python setup.py repl
Select Locale: it
==> gloria
-> 'gloria'
-> ' loria'
-> ' oria'
-> ' o ia'
-> ' o i '
-> ' o '
-> ' '
글로리아
"""
user_options = [('lang=', 'l', 'the language code(ISO 639-3)')]
def initialize_options(self):
self.lang = None
def finalize_options(self):
pass
def run(self):
import sys
logger = make_logger()
encoding = sys.stdout.encoding
def _repl():
while True:
lang = self.lang or raw_input(color('Lang: ', 'magenta'))
try:
lang = get_lang(lang)
logger.info('** ' + color(type(lang).__name__, 'green') + \
' is selected')
break
except HangulizeError, e:
logger.error(color(e, 'red'))
self.lang = None
while True:
string = raw_input(color('==> ', 'cyan'))
if not string:
logger.info('** ' + color('end', 'green'))
break
yield lang.hangulize(string.decode(encoding), logger=logger)
for hangul in _repl():
pass
def make_logger(name='Hangulize REPL'):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
logger.addHandler(REPLHandler())
return logger
|
import sys
from warnings import warn
__all__ = [
'warning',
'unique',
]
def warning(message):
"""
Show error using warnings module. Shows function from stack that called this function.
"""
warn(message, stacklevel=3)
if sys.version_info > (3, 5):
from collections import OrderedDict
def unique(seq):
"""
Remove duplicates from sequence. Uses OrderedDict.
"""
# in python 3.5+ OrderedDict has C implementation -> fastest
return OrderedDict.fromkeys(seq).keys()
else:
def unique(seq):
"""
Remove duplicates from sequence. Uses generator and set.
"""
seen = set()
see = seen.add
return (x for x in seq if not (x in seen or see(x)))
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from mavros_msgs/State.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class State(genpy.Message):
_md5sum = "65cd0a9fff993b062b91e354554ec7e9"
_type = "mavros_msgs/State"
_has_header = True # flag to mark the presence of a Header object
_full_text = """# Current autopilot state
#
# Known modes listed here:
# http://wiki.ros.org/mavros/CustomModes
#
# For system_status values
# see https://mavlink.io/en/messages/common.html#MAV_STATE
#
std_msgs/Header header
bool connected
bool armed
bool guided
bool manual_input
string mode
uint8 system_status
string MODE_APM_PLANE_MANUAL = MANUAL
string MODE_APM_PLANE_CIRCLE = CIRCLE
string MODE_APM_PLANE_STABILIZE = STABILIZE
string MODE_APM_PLANE_TRAINING = TRAINING
string MODE_APM_PLANE_ACRO = ACRO
string MODE_APM_PLANE_FBWA = FBWA
string MODE_APM_PLANE_FBWB = FBWB
string MODE_APM_PLANE_CRUISE = CRUISE
string MODE_APM_PLANE_AUTOTUNE = AUTOTUNE
string MODE_APM_PLANE_AUTO = AUTO
string MODE_APM_PLANE_RTL = RTL
string MODE_APM_PLANE_LOITER = LOITER
string MODE_APM_PLANE_LAND = LAND
string MODE_APM_PLANE_GUIDED = GUIDED
string MODE_APM_PLANE_INITIALISING = INITIALISING
string MODE_APM_PLANE_QSTABILIZE = QSTABILIZE
string MODE_APM_PLANE_QHOVER = QHOVER
string MODE_APM_PLANE_QLOITER = QLOITER
string MODE_APM_PLANE_QLAND = QLAND
string MODE_APM_PLANE_QRTL = QRTL
string MODE_APM_COPTER_STABILIZE = STABILIZE
string MODE_APM_COPTER_ACRO = ACRO
string MODE_APM_COPTER_ALT_HOLD = ALT_HOLD
string MODE_APM_COPTER_AUTO = AUTO
string MODE_APM_COPTER_GUIDED = GUIDED
string MODE_APM_COPTER_LOITER = LOITER
string MODE_APM_COPTER_RTL = RTL
string MODE_APM_COPTER_CIRCLE = CIRCLE
string MODE_APM_COPTER_POSITION = POSITION
string MODE_APM_COPTER_LAND = LAND
string MODE_APM_COPTER_OF_LOITER = OF_LOITER
string MODE_APM_COPTER_DRIFT = DRIFT
string MODE_APM_COPTER_SPORT = SPORT
string MODE_APM_COPTER_FLIP = FLIP
string MODE_APM_COPTER_AUTOTUNE = AUTOTUNE
string MODE_APM_COPTER_POSHOLD = POSHOLD
string MODE_APM_COPTER_BRAKE = BRAKE
string MODE_APM_COPTER_THROW = THROW
string MODE_APM_COPTER_AVOID_ADSB = AVOID_ADSB
string MODE_APM_COPTER_GUIDED_NOGPS = GUIDED_NOGPS
string MODE_APM_ROVER_MANUAL = MANUAL
string MODE_APM_ROVER_LEARNING = LEARNING
string MODE_APM_ROVER_STEERING = STEERING
string MODE_APM_ROVER_HOLD = HOLD
string MODE_APM_ROVER_AUTO = AUTO
string MODE_APM_ROVER_RTL = RTL
string MODE_APM_ROVER_GUIDED = GUIDED
string MODE_APM_ROVER_INITIALISING = INITIALISING
string MODE_PX4_MANUAL = MANUAL
string MODE_PX4_ACRO = ACRO
string MODE_PX4_ALTITUDE = ALTCTL
string MODE_PX4_POSITION = POSCTL
string MODE_PX4_OFFBOARD = OFFBOARD
string MODE_PX4_STABILIZED = STABILIZED
string MODE_PX4_RATTITUDE = RATTITUDE
string MODE_PX4_MISSION = AUTO.MISSION
string MODE_PX4_LOITER = AUTO.LOITER
string MODE_PX4_RTL = AUTO.RTL
string MODE_PX4_LAND = AUTO.LAND
string MODE_PX4_RTGS = AUTO.RTGS
string MODE_PX4_READY = AUTO.READY
string MODE_PX4_TAKEOFF = AUTO.TAKEOFF
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
"""
# Pseudo-constants
MODE_APM_PLANE_MANUAL = 'MANUAL'
MODE_APM_PLANE_CIRCLE = 'CIRCLE'
MODE_APM_PLANE_STABILIZE = 'STABILIZE'
MODE_APM_PLANE_TRAINING = 'TRAINING'
MODE_APM_PLANE_ACRO = 'ACRO'
MODE_APM_PLANE_FBWA = 'FBWA'
MODE_APM_PLANE_FBWB = 'FBWB'
MODE_APM_PLANE_CRUISE = 'CRUISE'
MODE_APM_PLANE_AUTOTUNE = 'AUTOTUNE'
MODE_APM_PLANE_AUTO = 'AUTO'
MODE_APM_PLANE_RTL = 'RTL'
MODE_APM_PLANE_LOITER = 'LOITER'
MODE_APM_PLANE_LAND = 'LAND'
MODE_APM_PLANE_GUIDED = 'GUIDED'
MODE_APM_PLANE_INITIALISING = 'INITIALISING'
MODE_APM_PLANE_QSTABILIZE = 'QSTABILIZE'
MODE_APM_PLANE_QHOVER = 'QHOVER'
MODE_APM_PLANE_QLOITER = 'QLOITER'
MODE_APM_PLANE_QLAND = 'QLAND'
MODE_APM_PLANE_QRTL = 'QRTL'
MODE_APM_COPTER_STABILIZE = 'STABILIZE'
MODE_APM_COPTER_ACRO = 'ACRO'
MODE_APM_COPTER_ALT_HOLD = 'ALT_HOLD'
MODE_APM_COPTER_AUTO = 'AUTO'
MODE_APM_COPTER_GUIDED = 'GUIDED'
MODE_APM_COPTER_LOITER = 'LOITER'
MODE_APM_COPTER_RTL = 'RTL'
MODE_APM_COPTER_CIRCLE = 'CIRCLE'
MODE_APM_COPTER_POSITION = 'POSITION'
MODE_APM_COPTER_LAND = 'LAND'
MODE_APM_COPTER_OF_LOITER = 'OF_LOITER'
MODE_APM_COPTER_DRIFT = 'DRIFT'
MODE_APM_COPTER_SPORT = 'SPORT'
MODE_APM_COPTER_FLIP = 'FLIP'
MODE_APM_COPTER_AUTOTUNE = 'AUTOTUNE'
MODE_APM_COPTER_POSHOLD = 'POSHOLD'
MODE_APM_COPTER_BRAKE = 'BRAKE'
MODE_APM_COPTER_THROW = 'THROW'
MODE_APM_COPTER_AVOID_ADSB = 'AVOID_ADSB'
MODE_APM_COPTER_GUIDED_NOGPS = 'GUIDED_NOGPS'
MODE_APM_ROVER_MANUAL = 'MANUAL'
MODE_APM_ROVER_LEARNING = 'LEARNING'
MODE_APM_ROVER_STEERING = 'STEERING'
MODE_APM_ROVER_HOLD = 'HOLD'
MODE_APM_ROVER_AUTO = 'AUTO'
MODE_APM_ROVER_RTL = 'RTL'
MODE_APM_ROVER_GUIDED = 'GUIDED'
MODE_APM_ROVER_INITIALISING = 'INITIALISING'
MODE_PX4_MANUAL = 'MANUAL'
MODE_PX4_ACRO = 'ACRO'
MODE_PX4_ALTITUDE = 'ALTCTL'
MODE_PX4_POSITION = 'POSCTL'
MODE_PX4_OFFBOARD = 'OFFBOARD'
MODE_PX4_STABILIZED = 'STABILIZED'
MODE_PX4_RATTITUDE = 'RATTITUDE'
MODE_PX4_MISSION = 'AUTO.MISSION'
MODE_PX4_LOITER = 'AUTO.LOITER'
MODE_PX4_RTL = 'AUTO.RTL'
MODE_PX4_LAND = 'AUTO.LAND'
MODE_PX4_RTGS = 'AUTO.RTGS'
MODE_PX4_READY = 'AUTO.READY'
MODE_PX4_TAKEOFF = 'AUTO.TAKEOFF'
__slots__ = ['header','connected','armed','guided','manual_input','mode','system_status']
_slot_types = ['std_msgs/Header','bool','bool','bool','bool','string','uint8']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,connected,armed,guided,manual_input,mode,system_status
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(State, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.connected is None:
self.connected = False
if self.armed is None:
self.armed = False
if self.guided is None:
self.guided = False
if self.manual_input is None:
self.manual_input = False
if self.mode is None:
self.mode = ''
if self.system_status is None:
self.system_status = 0
else:
self.header = std_msgs.msg.Header()
self.connected = False
self.armed = False
self.guided = False
self.manual_input = False
self.mode = ''
self.system_status = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_4B().pack(_x.connected, _x.armed, _x.guided, _x.manual_input))
_x = self.mode
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.system_status
buff.write(_get_struct_B().pack(_x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 4
(_x.connected, _x.armed, _x.guided, _x.manual_input,) = _get_struct_4B().unpack(str[start:end])
self.connected = bool(self.connected)
self.armed = bool(self.armed)
self.guided = bool(self.guided)
self.manual_input = bool(self.manual_input)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.mode = str[start:end].decode('utf-8', 'rosmsg')
else:
self.mode = str[start:end]
start = end
end += 1
(self.system_status,) = _get_struct_B().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_4B().pack(_x.connected, _x.armed, _x.guided, _x.manual_input))
_x = self.mode
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.system_status
buff.write(_get_struct_B().pack(_x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 4
(_x.connected, _x.armed, _x.guided, _x.manual_input,) = _get_struct_4B().unpack(str[start:end])
self.connected = bool(self.connected)
self.armed = bool(self.armed)
self.guided = bool(self.guided)
self.manual_input = bool(self.manual_input)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.mode = str[start:end].decode('utf-8', 'rosmsg')
else:
self.mode = str[start:end]
start = end
end += 1
(self.system_status,) = _get_struct_B().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_4B = None
def _get_struct_4B():
global _struct_4B
if _struct_4B is None:
_struct_4B = struct.Struct("<4B")
return _struct_4B
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
|
"""
For more details, see the class documentation.
"""
class WikiInfoDTO:
"""
This class represents the data transfer object for a Wikipedia article.
"""
def __init__(self):
"""
Constructor to initialize this object.
"""
self.location = None
self.title = ""
self.link = ""
self.info = ""
self.pageid = ""
def json_dict(self):
"""
Returns a python dictionary to be used for building JSON response.
"""
return_dict = dict()
return_dict['title'] = self.title
return_dict['link'] = self.link
return_dict['info'] = self.info
return_dict['pageid'] = self.pageid
return_dict['location'] = self.location.json_dict()
return return_dict
|
import crazyimports
import tests.test_data.config_json as config
def test_json_file_type():
assert config.__file__.split(".")[-1] == "json"
def test_json_integer_type():
assert isinstance(config.number, int)
def test_json_integer_value():
assert config.number == 42
def test_json_string_type():
assert isinstance(config.string, str)
def test_json_string_value():
assert config.string == "apple"
def test_json_number_in_object():
assert config.object["number"] == 43
def test_json_string_in_object():
assert config.object["string"] == "orange"
|
from recollection.tests.classes import SetterTestClass
import os
import recollection
import tempfile
import unittest
# ------------------------------------------------------------------------------
class TestClass(object):
# noinspection PyUnusedLocal
def __init__(self):
foo = 1
bar = ''
# ------------------------------------------------------------------------------
class TestSerialiserAppData(unittest.TestCase):
# --------------------------------------------------------------------------
def test_serialise(self):
"""
Checks to see that our serialisation kicks in and we have
a persistent data artifact
:return:
"""
# -- Create out test stack
test_class, stack = self._memento_test_data()
# -- Define the data location
temp_file = tempfile.NamedTemporaryFile(delete=True)
temp_file.close()
# -- Ensure the data exists
self.assertFalse(
os.path.exists(temp_file.name)
)
# -- Register our serialiser
stack.register_serialiser(
serialiser=recollection.PickleSerialiser,
identifier=temp_file.name,
)
# -- Set some properties on the class
test_class.setTarget(10)
test_class.setDistance(20)
# -- Store the data and serialise
stack.store()
stack.serialise()
# -- Ensure the data exists
self.assertTrue(
os.path.exists(temp_file.name)
)
# --------------------------------------------------------------------------
def test_serialise_on_store(self):
"""
Checks to see that our serialisation kicks in and we have
a persistent data artifact when calling the shortcut of
serelialise=True on the store method
:return:
"""
# -- Create out test stack
test_class, stack = self._memento_test_data()
# -- Define the data location
temp_file = tempfile.NamedTemporaryFile(delete=True)
temp_file.close()
# -- Ensure the data exists
self.assertFalse(
os.path.exists(temp_file.name)
)
# -- Register our serialiser
stack.register_serialiser(
serialiser=recollection.PickleSerialiser,
identifier=temp_file.name,
)
# -- Set some properties on the class
test_class.setTarget(10)
test_class.setDistance(20)
# -- Store the data and serialise
stack.store(serialise=True)
# -- Ensure the data exists
self.assertTrue(
os.path.exists(temp_file.name)
)
# --------------------------------------------------------------------------
def test_deserialise(self):
"""
Checks to see that our serialisation kicks in and we have
a persistent data artifact when calling the shortcut of
serelialise=True on the store method
:return:
"""
# -- Create out test stack
test_class, stack = self._memento_test_data()
# -- Define the data location
temp_file = tempfile.NamedTemporaryFile(delete=True)
temp_file.close()
# -- Ensure the data exists
self.assertFalse(
os.path.exists(temp_file.name)
)
# -- Register our serialiser
stack.register_serialiser(
serialiser=recollection.PickleSerialiser,
identifier=temp_file.name,
)
# -- Set some properties on the class
test_class.setTarget(10)
test_class.setDistance(20)
# -- Store the data and serialise
stack.store(serialise=True)
# -- Create out test stack
new_test_class, new_stack = self._memento_test_data()
# -- Register our serialiser
new_stack.register_serialiser(
serialiser=recollection.PickleSerialiser,
identifier=temp_file.name,
)
new_stack.deserialise()
# -- Ensure the data exists
self.assertEqual(
10,
new_test_class.getTarget(),
)
self.assertEqual(
20,
test_class.getDistance(),
)
# --------------------------------------------------------------------------
@classmethod
def _memento_test_data(cls):
test_class = SetterTestClass()
stack = recollection.Memento(test_class)
stack.register(
label='target',
getter=test_class.getTarget,
setter=test_class.setTarget,
)
stack.register(
label='distance',
getter=test_class.getDistance,
setter=test_class.setDistance,
)
return test_class, stack
|
"""
This file contains the filtering functions that are using to process the
'--include' and '--exclude' command line options. The code in this module is
not specific to the netcfgbu inventory column names, can could be re-used for
other CSV related tools and use-cases.
"""
import ipaddress
import operator
import re
from abc import ABC, abstractmethod
from pathlib import Path
from typing import List, AnyStr, Optional, Callable, Dict
from .filetypes import CommentedCsvReader
__all__ = ["create_filter"]
value_pattern = r"(?P<value>\S+)$"
file_reg = re.compile(r"@(?P<filename>.+)$")
wordsep_re = re.compile(r"\s+|,")
class Filter(ABC):
"""Filter is a type that supports op comparisons against inventory fields
An implementation of Filter should capture:
- The record fieldname to compare
- The filter expression
A Filter instance will be passed an inventory record when called, returning
the bool result of whether the record matches the filter
"""
@abstractmethod
def __call__(self, record: Dict[str, AnyStr]) -> bool:
pass
class RegexFilter(Filter):
""" Filter an inventory record field with a given regex """
def __init__(self, fieldname: str, expr: str) -> None:
self.fieldname = fieldname
try:
self.re = re.compile(f"^{expr}$", re.IGNORECASE)
except re.error as exc:
raise ValueError(
f"Invalid filter regular-expression: {expr!r}: {exc}"
) from None
self.__doc__ = f"limit_{fieldname}({self.re.pattern})"
self.__name__ = self.__doc__
self.__qualname__ = self.__doc__
def __call__(self, record: Dict[str, AnyStr]) -> bool:
return bool(self.re.match(record[self.fieldname]))
def __repr__(self) -> str:
return f"RegexFilter(fieldname={self.fieldname!r}, expr={self.re})"
class IPFilter(Filter):
"""Filter an inventory record field based on IP address
When the specified filter ip address is a prefix (E.g 192.168.0.0/28), will
check that the record IP is within the prefix range
Will interpret single IP addresses (E.g. 2620:abcd:10::10) as an absolute match
"""
def __init__(self, fieldname: str, ip: str) -> None:
self.fieldname = fieldname
self.ip = ipaddress.ip_network(ip)
self.__doc__ = f"limit_{fieldname}({self.ip})"
self.__name__ = self.__doc__
self.__qualname__ = self.__doc__
def __call__(self, record: Dict[str, AnyStr]) -> bool:
return ipaddress.ip_address(record[self.fieldname]) in self.ip
def __repr__(self) -> str:
return f"IpFilter(fieldname={self.fieldname!r}, ip='{self.ip}')"
def create_filter_function(op_filters, optest_fn):
def filter_fn(rec):
for op_fn in op_filters:
if optest_fn(op_fn(rec)):
return False
return True
return filter_fn
def mk_file_filter(filepath, key):
if filepath.endswith(".csv"):
filter_hostnames = [rec[key] for rec in CommentedCsvReader(open(filepath))]
else:
raise ValueError(
f"File '{filepath}' not a CSV file. Only CSV files are supported at this time"
)
def op_filter(rec):
return rec[key] in filter_hostnames
op_filter.hostnames = filter_hostnames
op_filter.__doc__ = f"file: {filepath})"
op_filter.__name__ = op_filter.__doc__
op_filter.__qualname__ = op_filter.__doc__
return op_filter
def create_filter(
constraints: List[AnyStr], field_names: List[AnyStr], include: Optional[bool] = True
) -> Callable[[Dict], bool]:
"""
This function returns a function that is used to filter inventory records.
Parameters
----------
constraints:
A list of contraint expressions that are in the form "<field-name>=<value>".
field_names:
A list of known field names
include:
When True, the filter function will match when the constraint is true,
for example if the contraint is "os_name=eos", then it would match
records that have os_name field euqal to "eos".
When False, the filter function will match when the constraint is not
true. For exampl if the constraint is "os_name=eos", then the filter
function would match recoreds that have os_name fields not equal to
"eos".
Returns
-------
The returning filter function expects an inventory record as the single
input parameter, and the function returns True/False on match.
"""
fieldn_pattern = "^(?P<keyword>" + "|".join(fieldn for fieldn in field_names) + ")"
field_value_reg = re.compile(fieldn_pattern + "=" + value_pattern)
op_filters: List[Filter] = []
for filter_expr in constraints:
# check for the '@<filename>' filtering use-case first.
if mo := file_reg.match(filter_expr):
filepath = mo.group(1)
if not Path(filepath).exists():
raise FileNotFoundError(filepath)
try:
op_filters.append(mk_file_filter(filepath, key="host"))
continue
except KeyError:
raise ValueError(
f"File '{filepath}' does not contain host content as expected"
)
# next check for keyword=value filtering use-case
if (mo := field_value_reg.match(filter_expr)) is None:
raise ValueError(f"Invalid filter expression: {filter_expr}")
fieldn, value = mo.groupdict().values()
if fieldn.casefold() == "ipaddr":
try:
value_filter = IPFilter(fieldn, value)
except ValueError:
value_filter = RegexFilter(fieldn, value)
else:
value_filter = RegexFilter(fieldn, value)
op_filters.append(value_filter)
optest_fn = operator.not_ if include else operator.truth
filter_fn = create_filter_function(op_filters, optest_fn)
filter_fn.op_filters = op_filters
filter_fn.constraints = constraints
return filter_fn
|
import docker
from console.app import app
from console.widgets.dialogs import MessageBox
def popup_failure(e, self):
self.close_dialog()
e.trap(docker.errors.APIError)
e = e.value
self.show_dialog(
MessageBox(
e.explanation,
title="HTTP Error: " + str(e.response.status_code),
)
)
app.draw_screen()
def catch_docker_errors(fn):
def decorator(self, *args, **kwargs):
try:
d = fn(self, *args, **kwargs)
d.addErrback(popup_failure, self)
except docker.errors.APIError, e:
popup_failure(e, self)
return decorator
def split_repo_name(name):
for idx in range(len(name)):
c = name[-idx]
if c == ':':
return name[:-idx], name[-idx + 1:]
elif c == '/':
return name, ''
return name, None
class Bag(object):
def __init__(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
|
#!/usr/bin/python3
''' In this example, we have two INA233's, at addresses 0x40 and 0x41
for a hypothetical solar battery charging system.
'''
from ina233 import INA233
R_shunt_ohms = 0.020 # shunt resistor value in Ohms
I_max_amps = 10 # max expected current in Amps
bus = 1 # I2C bus
battery_address = 0x40 # address of INA233 connected to battery circuit
solar_address = 0x41 # address of INA233 connected to solar charging circuit
battery_ina233 = INA233(bus, battery_address)
solar_ina233 = INA233(bus, solar_address)
battery_ina233.calibrate(R_shunt_ohms, I_max_amps)
solar_ina233.calibrate(R_shunt_ohms, I_max_amps)
print("Battery Bus Voltage : %.3f V" % battery_ina233.getBusVoltageIn_V())
print("Battery Bus Current : %.3f mA" % battery_ina233.getCurrentIn_mA())
print("Solar Bus Voltage : %.3f V" % solar_ina233.getBusVoltageIn_V())
print("Solar Bus Power : %.3f mW" % solar_ina233.getPower_mW())
print("Solar Avg Bus Power : %.3f mW" % solar_ina233.getAv_Power_mW())
|
import numpy as np
from scipy.interpolate import interp1d
from pims import pipeline
from rixs.process2d import apply_curvature, image_to_photon_events
# Eventually we will create this information from the configuration
# attributes in ophyd.
process_dict_low_2theta = {'light_ROI': [slice(175, 1609), slice(1, 1751)],
'curvature': np.array([0., 0., 0.]),
'bins': None}
process_dict_high_2theta = {'light_ROI': [slice(175, 1609), slice(1753, 3503)],
'curvature': np.array([0., 0., 0.]),
'bins': None}
process_dicts = {'low_2theta': process_dict_low_2theta,
'high_2theta': process_dict_high_2theta}
@pipeline
def image_to_spectrum(image, light_ROI=[slice(None, None, None),
slice(None, None, None)],
curvature=np.array([0., 0., 0.]), bins=None,
background=None):
"""
Convert a 2D array of RIXS data into a spectrum
Parameters
----------
image : array
2D array of intensity
light_ROI : [slice, slice]
Region of image containing the data
curvature : array
The polynominal coeffcients describing the image curvature.
These are in decreasing order e.g.
.. code-block:: python
curvature[0]*x**2 + curvature[1]*x**1 + curvature[2]*x**0
The order of polynominal used is set by len(curvature) - 1
bins : int or array_like or [int, int] or [array, array]
The bin specification in y then x order:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(y_edges=x_edges=bins).
* If [int, int], the number of bins in each dimension
(ny, nx = bins).
* If [array, array], the bin edges in each dimension
(y_edges, x_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
background : array
2D array for background subtraction
Yields
------
spectrum : array
two column array of pixel, intensity
"""
try:
photon_events = image_to_photon_events(image[light_ROI]
- background[light_ROI])
except TypeError:
photon_events = image_to_photon_events(image[light_ROI])
spectrum = apply_curvature(photon_events, curvature, bins)
return spectrum
def get_rixs(header, light_ROI=[slice(None, None, None),
slice(None, None, None)],
curvature=np.array([0., 0., 0.]), bins=None,
background=None,
detector='rixscam_image'):
"""
Create rixs spectra according to procces_dict
and return data as generator with similar behavior to
header.data()
Parameters
----------
header : databroker header object
A dictionary-like object summarizing metadata for a run.
light_ROI : [slice, slice]
Region of image containing the data
curvature : array
The polynominal coeffcients describing the image curvature.
These are in decreasing order e.g.
.. code-block:: python
curvature[0]*x**2 + curvature[1]*x**1 + curvature[2]*x**0
The order of polynominal used is set by len(curvature) - 1
bins : int or array_like or [int, int] or [array, array]
The bin specification in y then x order:
* If bins is None a step of 1 is assumed over the relevant range
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(y_edges=x_edges=bins).
* If [int, int], the number of bins in each dimension
(ny, nx = bins).
* If [array, array], the bin edges in each dimension
(y_edges, x_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
background : array
2D array for background subtraction
detector : string
name of the detector passed on header.data
Yields
-------
ImageStack : pims ImageStack or list of ImageStack
Array-like object contains scans associated with an event.
If the input is a list of headers the output is a list of
"""
for ImageStack in header.data(detector):
yield image_to_spectrum(ImageStack, light_ROI=light_ROI,
curvature=curvature, bins=bins,
background=background)
def make_scan(headers, light_ROI=[slice(None, None, None),
slice(None, None, None)],
curvature=np.array([0., 0., 0.]), bins=None,
background=None):
"""
Make 4D array of RIXS spectra with structure
event, image_index, y, I
Parameters
----------
headers : databroker header object or iterable of same
iterable that returns databroker objects
light_ROI : [slice, slice]
Region of image containing the data
curvature : array
The polynominal coeffcients describing the image curvature.
These are in decreasing order e.g.
.. code-block:: python
curvature[0]*x**2 + curvature[1]*x**1 + curvature[2]*x**0
The order of polynominal used is set by len(curvature) - 1
bins : int or array_like or [int, int] or [array, array]
The bin specification in y then x order:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(y_edges=x_edges=bins).
* If [int, int], the number of bins in each dimension
(ny, nx = bins).
* If [array, array], the bin edges in each dimension
(y_edges, x_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
background : array
2D array for background subtraction
Returns
-------
scan : array
4D array of RIXS spectra with structure
event, image_index, y, I
"""
if hasattr(headers, 'data') is True:
headers = [headers]
rixs_generators = [get_rixs(h, light_ROI=light_ROI, curvature=curvature,
bins=bins, background=background)
for h in headers]
scan = np.concatenate([np.array([s for s in rg])
for rg in rixs_generators])
return scan
def calibrate(scan, elastics=None, energy_per_pixel=1, I0s=None):
"""Apply energy per pixel, I0 and energy zero calibration.
Parameters
---------
scan : array
4D array of RIXS spectra with structure
event, image_index, y, I
elastics : array
Elastic pixels to subtract to set energy zero
2D array with shape (event, images per event)
energy_per_pixel : float
Multiply all pixel (y) values by this number
to convert pixel index to energy loss
I0s : array
Intensity motor to divide all intensities by
2D array with shape (event, images per event)
Returns
-------
scan_out : array
calibrated scans
4D array of RIXS spectra with structure
event, image_index, y, I
"""
if elastics is None:
elastics = np.zeros(scan.shape[0:2])
if I0s is None:
I0s = np.ones(scan.shape[0:2])
scan_out = scan - elastics[:, :, np.newaxis, np.newaxis]
scan_out[:, :, :, 0:1] *= energy_per_pixel
scan_out[:, :, :, 1:2] /= I0s[:, :, np.newaxis, np.newaxis]
return scan_out
def interp_robust(x, xp, fp):
"""
Wrapper around scipy to interpolate data with either
increasing or decreasing x
Parameters
----------
x : array
values to interprate onto
xp : array
original x values
fp : array
original values of function
Returns
-------
f : array
values interpolated at x
"""
func = interp1d(xp, fp, bounds_error=False, fill_value=np.NaN)
f = func(x)
return f
|
# Copyright 2018 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module containing abstract base classes for Composer environments."""
from dm_control.composer.arena import Arena
from dm_control.composer.constants import * # pylint: disable=wildcard-import
from dm_control.composer.define import cached_property
from dm_control.composer.define import observable
from dm_control.composer.entity import Entity
from dm_control.composer.entity import FreePropObservableMixin
from dm_control.composer.entity import ModelWrapperEntity
from dm_control.composer.entity import Observables
from dm_control.composer.environment import Environment
from dm_control.composer.environment import HOOK_NAMES
from dm_control.composer.initializer import Initializer
from dm_control.composer.robot import Robot
from dm_control.composer.task import NullTask
from dm_control.composer.task import Task
|
from ..broker import Broker
class DeviceSupportWorksheetBroker(Broker):
controller = "device_support_worksheets"
def index(self, **kwargs):
"""Lists the available device support worksheets. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal system identifier of the associated device support request worksheet.
:type id: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, device_id, device_ip_dotted, netmri_version, license_id, license_type, license_expiration, device_vendor, device_model, os_version, device_type, discovery_diagnostic_collected_ind, snmp_data_collected_ind, cli_session_collected_ind, priv_mode_info, syslogs_collected_ind, admin_guide_collected_ind, access_to_device, access_to_device_other, created_at, updated_at, user_in_device_type, user_in_device_vendor, user_in_device_model, user_in_os_version, user_in_device_capabilities, snmp_version, snmp_community_string, snmp_port, snmp_auth_username, snmp_auth_password, snmp_auth_protocol, snmp_privacy_password, snmp_privacy_protocol, preferred_cli, cli_username, cli_password, secure_version, package_name, device_discovered_ind, delivery_method, unit_id, status, step_number, delivery_addl_email, manual_data_entry_ind, status_msg, virtual_network_id, contact_method, customer_name, contact_name, contact_value.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceSupportWorksheet. Valid values are id, device_id, device_ip_dotted, netmri_version, license_id, license_type, license_expiration, device_vendor, device_model, os_version, device_type, discovery_diagnostic_collected_ind, snmp_data_collected_ind, cli_session_collected_ind, priv_mode_info, syslogs_collected_ind, admin_guide_collected_ind, access_to_device, access_to_device_other, created_at, updated_at, user_in_device_type, user_in_device_vendor, user_in_device_model, user_in_os_version, user_in_device_capabilities, snmp_version, snmp_community_string, snmp_port, snmp_auth_username, snmp_auth_password, snmp_auth_protocol, snmp_privacy_password, snmp_privacy_protocol, preferred_cli, cli_username, cli_password, secure_version, package_name, device_discovered_ind, delivery_method, unit_id, status, step_number, delivery_addl_email, manual_data_entry_ind, status_msg, virtual_network_id, contact_method, customer_name, contact_name, contact_value. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_support_worksheets: An array of the DeviceSupportWorksheet objects that match the specified input criteria.
:rtype device_support_worksheets: Array of DeviceSupportWorksheet
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available device support worksheets matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param access_to_device: Information about how Infoblox will access the device for site testing.
:type access_to_device: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param access_to_device_other: Other information about how Infoblox will access the device.
:type access_to_device_other: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param admin_guide_collected_ind: Flag indicating that the admin guide was collected.
:type admin_guide_collected_ind: Array of Boolean
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param cli_password: The CLI Password of the device.
:type cli_password: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param cli_session_collected_ind: Flag indicating that CLI data was collected.
:type cli_session_collected_ind: Array of Boolean
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param cli_username: The CLI Username of the device.
:type cli_username: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param contact_method: How the customer should be contacted (valid values are 'Email' and 'Phone').
:type contact_method: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param contact_name: A name of a person to contact.
:type contact_name: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param contact_value: E-mail address or phone that can be used to contact the customer.
:type contact_value: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param created_at: The date and time the record was initially created in the system.
:type created_at: Array of DateTime
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param customer_name: Customer name.
:type customer_name: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param delivery_addl_email: Additional email addresses to which the device support data bundle is sent.
:type delivery_addl_email: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param delivery_method: The method of delivery (sftp, email, or download) for the device support data bundle.
:type delivery_method: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param device_discovered_ind: A flag indicating that the device has been discovered by the system.
:type device_discovered_ind: Array of Boolean
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param device_id: The internal system identifier of the associated device.
:type device_id: Array of Integer
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param device_ip_dotted: The IP address of the associated device.
:type device_ip_dotted: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param device_model: The device model of the associated device as determined by the system.
:type device_model: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param device_type: The device type of the associated device as determined by system.
:type device_type: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param device_vendor: The vendor of the associated device as determined by the system.
:type device_vendor: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param discovery_diagnostic_collected_ind: Flag indicating that discovery diagnostics were collected.
:type discovery_diagnostic_collected_ind: Array of Boolean
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal system identifier of the associated device support request worksheet.
:type id: Array of Integer
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param license_expiration: The expiration of the NetMRI license
:type license_expiration: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param license_id: The NetMRI license identifier.
:type license_id: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param license_type: The NetMRI license type.
:type license_type: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param manual_data_entry_ind: Flag indicating that device data is being collected in manual mode.
:type manual_data_entry_ind: Array of Boolean
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param netmri_version: The NetMRI version.
:type netmri_version: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param os_version: The OS version of the associated device as determined by the system.
:type os_version: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param package_name: The name of the compressed, encrypted device support data package.
:type package_name: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param preferred_cli: The preferred CLI method (SSH, Telnet, or Other).
:type preferred_cli: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param priv_mode_info: Privileged mode information.
:type priv_mode_info: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param secure_version: The encryption secure version of the credentials.
:type secure_version: Array of Integer
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param snmp_auth_password: The SNMP authorized password of the device (SNMPv3 only).
:type snmp_auth_password: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param snmp_auth_protocol: The SNMP authorized protocol of the device (SNMPv3 only).
:type snmp_auth_protocol: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param snmp_auth_username: The SNMP authorized username of the device (SNMPv3 only).
:type snmp_auth_username: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param snmp_community_string: The SNMP community string of the device.
:type snmp_community_string: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param snmp_data_collected_ind: Flag indicating that SNMP data was collected.
:type snmp_data_collected_ind: Array of Boolean
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param snmp_port: The SNMP port of the device.
:type snmp_port: Array of Integer
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param snmp_privacy_password: The SNMP privacy password of the device (SNMPv3 only).
:type snmp_privacy_password: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param snmp_privacy_protocol: The SNMP privacy protocol of the device (SNMPv3 only).
:type snmp_privacy_protocol: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param snmp_version: The SNMP version of the device (SNMPv1, SNMPv2, or SNMPv3).
:type snmp_version: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param status: The overall status of the device support request worksheet.
:type status: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param status_msg: Error message associated with worksheet status. Currently, only contain error messages for "Transfer Failed" status.
:type status_msg: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param step_number: The last step which the worksheet was saved at.
:type step_number: Array of Integer
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param syslogs_collected_ind: Flag indicating that the syslogs were collected.
:type syslogs_collected_ind: Array of Boolean
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param unit_id: The internal identifier for the collector that is used to collect the device support data. Used in an OC only.
:type unit_id: Array of Integer
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param updated_at: The date and time the record was last modified in the system.
:type updated_at: Array of DateTime
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param user_in_device_capabilities: The device capabilities as determined by the user.
:type user_in_device_capabilities: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param user_in_device_model: The device model as determined by the user.
:type user_in_device_model: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param user_in_device_type: The device type as determined by the user.
:type user_in_device_type: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param user_in_device_vendor: The device vendor as determined by the user.
:type user_in_device_vendor: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param user_in_os_version: The os version as determined by the user.
:type user_in_os_version: Array of String
| ``api version min:`` 2.9
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param virtual_network_id: The internal identifier for the network which the device is associated to.
:type virtual_network_id: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, device_id, device_ip_dotted, netmri_version, license_id, license_type, license_expiration, device_vendor, device_model, os_version, device_type, discovery_diagnostic_collected_ind, snmp_data_collected_ind, cli_session_collected_ind, priv_mode_info, syslogs_collected_ind, admin_guide_collected_ind, access_to_device, access_to_device_other, created_at, updated_at, user_in_device_type, user_in_device_vendor, user_in_device_model, user_in_os_version, user_in_device_capabilities, snmp_version, snmp_community_string, snmp_port, snmp_auth_username, snmp_auth_password, snmp_auth_protocol, snmp_privacy_password, snmp_privacy_protocol, preferred_cli, cli_username, cli_password, secure_version, package_name, device_discovered_ind, delivery_method, unit_id, status, step_number, delivery_addl_email, manual_data_entry_ind, status_msg, virtual_network_id, contact_method, customer_name, contact_name, contact_value.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceSupportWorksheet. Valid values are id, device_id, device_ip_dotted, netmri_version, license_id, license_type, license_expiration, device_vendor, device_model, os_version, device_type, discovery_diagnostic_collected_ind, snmp_data_collected_ind, cli_session_collected_ind, priv_mode_info, syslogs_collected_ind, admin_guide_collected_ind, access_to_device, access_to_device_other, created_at, updated_at, user_in_device_type, user_in_device_vendor, user_in_device_model, user_in_os_version, user_in_device_capabilities, snmp_version, snmp_community_string, snmp_port, snmp_auth_username, snmp_auth_password, snmp_auth_protocol, snmp_privacy_password, snmp_privacy_protocol, preferred_cli, cli_username, cli_password, secure_version, package_name, device_discovered_ind, delivery_method, unit_id, status, step_number, delivery_addl_email, manual_data_entry_ind, status_msg, virtual_network_id, contact_method, customer_name, contact_name, contact_value. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against device support worksheets, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: access_to_device, access_to_device_other, admin_guide_collected_ind, cli_password, cli_session_collected_ind, cli_username, contact_method, contact_name, contact_value, created_at, customer_name, delivery_addl_email, delivery_method, device_discovered_ind, device_id, device_ip_dotted, device_model, device_type, device_vendor, discovery_diagnostic_collected_ind, id, license_expiration, license_id, license_type, manual_data_entry_ind, netmri_version, os_version, package_name, preferred_cli, priv_mode_info, secure_version, snmp_auth_password, snmp_auth_protocol, snmp_auth_username, snmp_community_string, snmp_data_collected_ind, snmp_port, snmp_privacy_password, snmp_privacy_protocol, snmp_version, status, status_msg, step_number, syslogs_collected_ind, unit_id, updated_at, user_in_device_capabilities, user_in_device_model, user_in_device_type, user_in_device_vendor, user_in_os_version, virtual_network_id.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_support_worksheets: An array of the DeviceSupportWorksheet objects that match the specified input criteria.
:rtype device_support_worksheets: Array of DeviceSupportWorksheet
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available device support worksheets matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: access_to_device, access_to_device_other, admin_guide_collected_ind, cli_password, cli_session_collected_ind, cli_username, contact_method, contact_name, contact_value, created_at, customer_name, delivery_addl_email, delivery_method, device_discovered_ind, device_id, device_ip_dotted, device_model, device_type, device_vendor, discovery_diagnostic_collected_ind, id, license_expiration, license_id, license_type, manual_data_entry_ind, netmri_version, os_version, package_name, preferred_cli, priv_mode_info, secure_version, snmp_auth_password, snmp_auth_protocol, snmp_auth_username, snmp_community_string, snmp_data_collected_ind, snmp_port, snmp_privacy_password, snmp_privacy_protocol, snmp_version, status, status_msg, step_number, syslogs_collected_ind, unit_id, updated_at, user_in_device_capabilities, user_in_device_model, user_in_device_type, user_in_device_vendor, user_in_os_version, virtual_network_id.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_access_to_device: The operator to apply to the field access_to_device. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. access_to_device: Information about how Infoblox will access the device for site testing. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_access_to_device: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_access_to_device: If op_access_to_device is specified, the field named in this input will be compared to the value in access_to_device using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_access_to_device must be specified if op_access_to_device is specified.
:type val_f_access_to_device: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_access_to_device: If op_access_to_device is specified, this value will be compared to the value in access_to_device using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_access_to_device must be specified if op_access_to_device is specified.
:type val_c_access_to_device: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_access_to_device_other: The operator to apply to the field access_to_device_other. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. access_to_device_other: Other information about how Infoblox will access the device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_access_to_device_other: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_access_to_device_other: If op_access_to_device_other is specified, the field named in this input will be compared to the value in access_to_device_other using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_access_to_device_other must be specified if op_access_to_device_other is specified.
:type val_f_access_to_device_other: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_access_to_device_other: If op_access_to_device_other is specified, this value will be compared to the value in access_to_device_other using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_access_to_device_other must be specified if op_access_to_device_other is specified.
:type val_c_access_to_device_other: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_admin_guide_collected_ind: The operator to apply to the field admin_guide_collected_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. admin_guide_collected_ind: Flag indicating that the admin guide was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_admin_guide_collected_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_admin_guide_collected_ind: If op_admin_guide_collected_ind is specified, the field named in this input will be compared to the value in admin_guide_collected_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_admin_guide_collected_ind must be specified if op_admin_guide_collected_ind is specified.
:type val_f_admin_guide_collected_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_admin_guide_collected_ind: If op_admin_guide_collected_ind is specified, this value will be compared to the value in admin_guide_collected_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_admin_guide_collected_ind must be specified if op_admin_guide_collected_ind is specified.
:type val_c_admin_guide_collected_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cli_password: The operator to apply to the field cli_password. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cli_password: The CLI Password of the device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cli_password: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cli_password: If op_cli_password is specified, the field named in this input will be compared to the value in cli_password using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cli_password must be specified if op_cli_password is specified.
:type val_f_cli_password: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cli_password: If op_cli_password is specified, this value will be compared to the value in cli_password using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cli_password must be specified if op_cli_password is specified.
:type val_c_cli_password: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cli_session_collected_ind: The operator to apply to the field cli_session_collected_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cli_session_collected_ind: Flag indicating that CLI data was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cli_session_collected_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cli_session_collected_ind: If op_cli_session_collected_ind is specified, the field named in this input will be compared to the value in cli_session_collected_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cli_session_collected_ind must be specified if op_cli_session_collected_ind is specified.
:type val_f_cli_session_collected_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cli_session_collected_ind: If op_cli_session_collected_ind is specified, this value will be compared to the value in cli_session_collected_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cli_session_collected_ind must be specified if op_cli_session_collected_ind is specified.
:type val_c_cli_session_collected_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cli_username: The operator to apply to the field cli_username. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cli_username: The CLI Username of the device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cli_username: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cli_username: If op_cli_username is specified, the field named in this input will be compared to the value in cli_username using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cli_username must be specified if op_cli_username is specified.
:type val_f_cli_username: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cli_username: If op_cli_username is specified, this value will be compared to the value in cli_username using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cli_username must be specified if op_cli_username is specified.
:type val_c_cli_username: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_contact_method: The operator to apply to the field contact_method. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. contact_method: How the customer should be contacted (valid values are 'Email' and 'Phone'). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_contact_method: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_contact_method: If op_contact_method is specified, the field named in this input will be compared to the value in contact_method using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_contact_method must be specified if op_contact_method is specified.
:type val_f_contact_method: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_contact_method: If op_contact_method is specified, this value will be compared to the value in contact_method using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_contact_method must be specified if op_contact_method is specified.
:type val_c_contact_method: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_contact_name: The operator to apply to the field contact_name. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. contact_name: A name of a person to contact. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_contact_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_contact_name: If op_contact_name is specified, the field named in this input will be compared to the value in contact_name using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_contact_name must be specified if op_contact_name is specified.
:type val_f_contact_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_contact_name: If op_contact_name is specified, this value will be compared to the value in contact_name using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_contact_name must be specified if op_contact_name is specified.
:type val_c_contact_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_contact_value: The operator to apply to the field contact_value. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. contact_value: E-mail address or phone that can be used to contact the customer. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_contact_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_contact_value: If op_contact_value is specified, the field named in this input will be compared to the value in contact_value using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_contact_value must be specified if op_contact_value is specified.
:type val_f_contact_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_contact_value: If op_contact_value is specified, this value will be compared to the value in contact_value using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_contact_value must be specified if op_contact_value is specified.
:type val_c_contact_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_created_at: The operator to apply to the field created_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. created_at: The date and time the record was initially created in the system. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_created_at: If op_created_at is specified, the field named in this input will be compared to the value in created_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_created_at must be specified if op_created_at is specified.
:type val_f_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_created_at: If op_created_at is specified, this value will be compared to the value in created_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_created_at must be specified if op_created_at is specified.
:type val_c_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_customer_name: The operator to apply to the field customer_name. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. customer_name: Customer name. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_customer_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_customer_name: If op_customer_name is specified, the field named in this input will be compared to the value in customer_name using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_customer_name must be specified if op_customer_name is specified.
:type val_f_customer_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_customer_name: If op_customer_name is specified, this value will be compared to the value in customer_name using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_customer_name must be specified if op_customer_name is specified.
:type val_c_customer_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_delivery_addl_email: The operator to apply to the field delivery_addl_email. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. delivery_addl_email: Additional email addresses to which the device support data bundle is sent. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_delivery_addl_email: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_delivery_addl_email: If op_delivery_addl_email is specified, the field named in this input will be compared to the value in delivery_addl_email using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_delivery_addl_email must be specified if op_delivery_addl_email is specified.
:type val_f_delivery_addl_email: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_delivery_addl_email: If op_delivery_addl_email is specified, this value will be compared to the value in delivery_addl_email using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_delivery_addl_email must be specified if op_delivery_addl_email is specified.
:type val_c_delivery_addl_email: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_delivery_method: The operator to apply to the field delivery_method. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. delivery_method: The method of delivery (sftp, email, or download) for the device support data bundle. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_delivery_method: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_delivery_method: If op_delivery_method is specified, the field named in this input will be compared to the value in delivery_method using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_delivery_method must be specified if op_delivery_method is specified.
:type val_f_delivery_method: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_delivery_method: If op_delivery_method is specified, this value will be compared to the value in delivery_method using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_delivery_method must be specified if op_delivery_method is specified.
:type val_c_delivery_method: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_device_discovered_ind: The operator to apply to the field device_discovered_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. device_discovered_ind: A flag indicating that the device has been discovered by the system. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_device_discovered_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_device_discovered_ind: If op_device_discovered_ind is specified, the field named in this input will be compared to the value in device_discovered_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_device_discovered_ind must be specified if op_device_discovered_ind is specified.
:type val_f_device_discovered_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_device_discovered_ind: If op_device_discovered_ind is specified, this value will be compared to the value in device_discovered_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_device_discovered_ind must be specified if op_device_discovered_ind is specified.
:type val_c_device_discovered_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_device_id: The operator to apply to the field device_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. device_id: The internal system identifier of the associated device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_device_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_device_id: If op_device_id is specified, the field named in this input will be compared to the value in device_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_device_id must be specified if op_device_id is specified.
:type val_f_device_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_device_id: If op_device_id is specified, this value will be compared to the value in device_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_device_id must be specified if op_device_id is specified.
:type val_c_device_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_device_ip_dotted: The operator to apply to the field device_ip_dotted. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. device_ip_dotted: The IP address of the associated device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_device_ip_dotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_device_ip_dotted: If op_device_ip_dotted is specified, the field named in this input will be compared to the value in device_ip_dotted using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_device_ip_dotted must be specified if op_device_ip_dotted is specified.
:type val_f_device_ip_dotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_device_ip_dotted: If op_device_ip_dotted is specified, this value will be compared to the value in device_ip_dotted using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_device_ip_dotted must be specified if op_device_ip_dotted is specified.
:type val_c_device_ip_dotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_device_model: The operator to apply to the field device_model. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. device_model: The device model of the associated device as determined by the system. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_device_model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_device_model: If op_device_model is specified, the field named in this input will be compared to the value in device_model using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_device_model must be specified if op_device_model is specified.
:type val_f_device_model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_device_model: If op_device_model is specified, this value will be compared to the value in device_model using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_device_model must be specified if op_device_model is specified.
:type val_c_device_model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_device_type: The operator to apply to the field device_type. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. device_type: The device type of the associated device as determined by system. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_device_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_device_type: If op_device_type is specified, the field named in this input will be compared to the value in device_type using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_device_type must be specified if op_device_type is specified.
:type val_f_device_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_device_type: If op_device_type is specified, this value will be compared to the value in device_type using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_device_type must be specified if op_device_type is specified.
:type val_c_device_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_device_vendor: The operator to apply to the field device_vendor. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. device_vendor: The vendor of the associated device as determined by the system. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_device_vendor: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_device_vendor: If op_device_vendor is specified, the field named in this input will be compared to the value in device_vendor using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_device_vendor must be specified if op_device_vendor is specified.
:type val_f_device_vendor: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_device_vendor: If op_device_vendor is specified, this value will be compared to the value in device_vendor using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_device_vendor must be specified if op_device_vendor is specified.
:type val_c_device_vendor: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_discovery_diagnostic_collected_ind: The operator to apply to the field discovery_diagnostic_collected_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. discovery_diagnostic_collected_ind: Flag indicating that discovery diagnostics were collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_discovery_diagnostic_collected_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_discovery_diagnostic_collected_ind: If op_discovery_diagnostic_collected_ind is specified, the field named in this input will be compared to the value in discovery_diagnostic_collected_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_discovery_diagnostic_collected_ind must be specified if op_discovery_diagnostic_collected_ind is specified.
:type val_f_discovery_diagnostic_collected_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_discovery_diagnostic_collected_ind: If op_discovery_diagnostic_collected_ind is specified, this value will be compared to the value in discovery_diagnostic_collected_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_discovery_diagnostic_collected_ind must be specified if op_discovery_diagnostic_collected_ind is specified.
:type val_c_discovery_diagnostic_collected_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The internal system identifier of the associated device support request worksheet. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_id must be specified if op_id is specified.
:type val_f_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_id: If op_id is specified, this value will be compared to the value in id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_id must be specified if op_id is specified.
:type val_c_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_license_expiration: The operator to apply to the field license_expiration. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. license_expiration: The expiration of the NetMRI license For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_license_expiration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_license_expiration: If op_license_expiration is specified, the field named in this input will be compared to the value in license_expiration using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_license_expiration must be specified if op_license_expiration is specified.
:type val_f_license_expiration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_license_expiration: If op_license_expiration is specified, this value will be compared to the value in license_expiration using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_license_expiration must be specified if op_license_expiration is specified.
:type val_c_license_expiration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_license_id: The operator to apply to the field license_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. license_id: The NetMRI license identifier. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_license_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_license_id: If op_license_id is specified, the field named in this input will be compared to the value in license_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_license_id must be specified if op_license_id is specified.
:type val_f_license_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_license_id: If op_license_id is specified, this value will be compared to the value in license_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_license_id must be specified if op_license_id is specified.
:type val_c_license_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_license_type: The operator to apply to the field license_type. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. license_type: The NetMRI license type. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_license_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_license_type: If op_license_type is specified, the field named in this input will be compared to the value in license_type using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_license_type must be specified if op_license_type is specified.
:type val_f_license_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_license_type: If op_license_type is specified, this value will be compared to the value in license_type using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_license_type must be specified if op_license_type is specified.
:type val_c_license_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_manual_data_entry_ind: The operator to apply to the field manual_data_entry_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. manual_data_entry_ind: Flag indicating that device data is being collected in manual mode. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_manual_data_entry_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_manual_data_entry_ind: If op_manual_data_entry_ind is specified, the field named in this input will be compared to the value in manual_data_entry_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_manual_data_entry_ind must be specified if op_manual_data_entry_ind is specified.
:type val_f_manual_data_entry_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_manual_data_entry_ind: If op_manual_data_entry_ind is specified, this value will be compared to the value in manual_data_entry_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_manual_data_entry_ind must be specified if op_manual_data_entry_ind is specified.
:type val_c_manual_data_entry_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_netmri_version: The operator to apply to the field netmri_version. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. netmri_version: The NetMRI version. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_netmri_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_netmri_version: If op_netmri_version is specified, the field named in this input will be compared to the value in netmri_version using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_netmri_version must be specified if op_netmri_version is specified.
:type val_f_netmri_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_netmri_version: If op_netmri_version is specified, this value will be compared to the value in netmri_version using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_netmri_version must be specified if op_netmri_version is specified.
:type val_c_netmri_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_os_version: The operator to apply to the field os_version. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. os_version: The OS version of the associated device as determined by the system. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_os_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_os_version: If op_os_version is specified, the field named in this input will be compared to the value in os_version using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_os_version must be specified if op_os_version is specified.
:type val_f_os_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_os_version: If op_os_version is specified, this value will be compared to the value in os_version using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_os_version must be specified if op_os_version is specified.
:type val_c_os_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_package_name: The operator to apply to the field package_name. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. package_name: The name of the compressed, encrypted device support data package. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_package_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_package_name: If op_package_name is specified, the field named in this input will be compared to the value in package_name using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_package_name must be specified if op_package_name is specified.
:type val_f_package_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_package_name: If op_package_name is specified, this value will be compared to the value in package_name using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_package_name must be specified if op_package_name is specified.
:type val_c_package_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_preferred_cli: The operator to apply to the field preferred_cli. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. preferred_cli: The preferred CLI method (SSH, Telnet, or Other). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_preferred_cli: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_preferred_cli: If op_preferred_cli is specified, the field named in this input will be compared to the value in preferred_cli using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_preferred_cli must be specified if op_preferred_cli is specified.
:type val_f_preferred_cli: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_preferred_cli: If op_preferred_cli is specified, this value will be compared to the value in preferred_cli using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_preferred_cli must be specified if op_preferred_cli is specified.
:type val_c_preferred_cli: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_priv_mode_info: The operator to apply to the field priv_mode_info. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. priv_mode_info: Privileged mode information. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_priv_mode_info: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_priv_mode_info: If op_priv_mode_info is specified, the field named in this input will be compared to the value in priv_mode_info using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_priv_mode_info must be specified if op_priv_mode_info is specified.
:type val_f_priv_mode_info: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_priv_mode_info: If op_priv_mode_info is specified, this value will be compared to the value in priv_mode_info using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_priv_mode_info must be specified if op_priv_mode_info is specified.
:type val_c_priv_mode_info: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_secure_version: The operator to apply to the field secure_version. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. secure_version: The encryption secure version of the credentials. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_secure_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_secure_version: If op_secure_version is specified, the field named in this input will be compared to the value in secure_version using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_secure_version must be specified if op_secure_version is specified.
:type val_f_secure_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_secure_version: If op_secure_version is specified, this value will be compared to the value in secure_version using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_secure_version must be specified if op_secure_version is specified.
:type val_c_secure_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_snmp_auth_password: The operator to apply to the field snmp_auth_password. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. snmp_auth_password: The SNMP authorized password of the device (SNMPv3 only). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_snmp_auth_password: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_snmp_auth_password: If op_snmp_auth_password is specified, the field named in this input will be compared to the value in snmp_auth_password using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_snmp_auth_password must be specified if op_snmp_auth_password is specified.
:type val_f_snmp_auth_password: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_snmp_auth_password: If op_snmp_auth_password is specified, this value will be compared to the value in snmp_auth_password using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_snmp_auth_password must be specified if op_snmp_auth_password is specified.
:type val_c_snmp_auth_password: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_snmp_auth_protocol: The operator to apply to the field snmp_auth_protocol. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. snmp_auth_protocol: The SNMP authorized protocol of the device (SNMPv3 only). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_snmp_auth_protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_snmp_auth_protocol: If op_snmp_auth_protocol is specified, the field named in this input will be compared to the value in snmp_auth_protocol using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_snmp_auth_protocol must be specified if op_snmp_auth_protocol is specified.
:type val_f_snmp_auth_protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_snmp_auth_protocol: If op_snmp_auth_protocol is specified, this value will be compared to the value in snmp_auth_protocol using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_snmp_auth_protocol must be specified if op_snmp_auth_protocol is specified.
:type val_c_snmp_auth_protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_snmp_auth_username: The operator to apply to the field snmp_auth_username. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. snmp_auth_username: The SNMP authorized username of the device (SNMPv3 only). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_snmp_auth_username: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_snmp_auth_username: If op_snmp_auth_username is specified, the field named in this input will be compared to the value in snmp_auth_username using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_snmp_auth_username must be specified if op_snmp_auth_username is specified.
:type val_f_snmp_auth_username: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_snmp_auth_username: If op_snmp_auth_username is specified, this value will be compared to the value in snmp_auth_username using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_snmp_auth_username must be specified if op_snmp_auth_username is specified.
:type val_c_snmp_auth_username: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_snmp_community_string: The operator to apply to the field snmp_community_string. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. snmp_community_string: The SNMP community string of the device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_snmp_community_string: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_snmp_community_string: If op_snmp_community_string is specified, the field named in this input will be compared to the value in snmp_community_string using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_snmp_community_string must be specified if op_snmp_community_string is specified.
:type val_f_snmp_community_string: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_snmp_community_string: If op_snmp_community_string is specified, this value will be compared to the value in snmp_community_string using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_snmp_community_string must be specified if op_snmp_community_string is specified.
:type val_c_snmp_community_string: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_snmp_data_collected_ind: The operator to apply to the field snmp_data_collected_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. snmp_data_collected_ind: Flag indicating that SNMP data was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_snmp_data_collected_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_snmp_data_collected_ind: If op_snmp_data_collected_ind is specified, the field named in this input will be compared to the value in snmp_data_collected_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_snmp_data_collected_ind must be specified if op_snmp_data_collected_ind is specified.
:type val_f_snmp_data_collected_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_snmp_data_collected_ind: If op_snmp_data_collected_ind is specified, this value will be compared to the value in snmp_data_collected_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_snmp_data_collected_ind must be specified if op_snmp_data_collected_ind is specified.
:type val_c_snmp_data_collected_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_snmp_port: The operator to apply to the field snmp_port. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. snmp_port: The SNMP port of the device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_snmp_port: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_snmp_port: If op_snmp_port is specified, the field named in this input will be compared to the value in snmp_port using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_snmp_port must be specified if op_snmp_port is specified.
:type val_f_snmp_port: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_snmp_port: If op_snmp_port is specified, this value will be compared to the value in snmp_port using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_snmp_port must be specified if op_snmp_port is specified.
:type val_c_snmp_port: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_snmp_privacy_password: The operator to apply to the field snmp_privacy_password. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. snmp_privacy_password: The SNMP privacy password of the device (SNMPv3 only). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_snmp_privacy_password: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_snmp_privacy_password: If op_snmp_privacy_password is specified, the field named in this input will be compared to the value in snmp_privacy_password using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_snmp_privacy_password must be specified if op_snmp_privacy_password is specified.
:type val_f_snmp_privacy_password: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_snmp_privacy_password: If op_snmp_privacy_password is specified, this value will be compared to the value in snmp_privacy_password using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_snmp_privacy_password must be specified if op_snmp_privacy_password is specified.
:type val_c_snmp_privacy_password: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_snmp_privacy_protocol: The operator to apply to the field snmp_privacy_protocol. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. snmp_privacy_protocol: The SNMP privacy protocol of the device (SNMPv3 only). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_snmp_privacy_protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_snmp_privacy_protocol: If op_snmp_privacy_protocol is specified, the field named in this input will be compared to the value in snmp_privacy_protocol using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_snmp_privacy_protocol must be specified if op_snmp_privacy_protocol is specified.
:type val_f_snmp_privacy_protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_snmp_privacy_protocol: If op_snmp_privacy_protocol is specified, this value will be compared to the value in snmp_privacy_protocol using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_snmp_privacy_protocol must be specified if op_snmp_privacy_protocol is specified.
:type val_c_snmp_privacy_protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_snmp_version: The operator to apply to the field snmp_version. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. snmp_version: The SNMP version of the device (SNMPv1, SNMPv2, or SNMPv3). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_snmp_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_snmp_version: If op_snmp_version is specified, the field named in this input will be compared to the value in snmp_version using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_snmp_version must be specified if op_snmp_version is specified.
:type val_f_snmp_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_snmp_version: If op_snmp_version is specified, this value will be compared to the value in snmp_version using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_snmp_version must be specified if op_snmp_version is specified.
:type val_c_snmp_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_status: The operator to apply to the field status. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. status: The overall status of the device support request worksheet. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_status: If op_status is specified, the field named in this input will be compared to the value in status using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_status must be specified if op_status is specified.
:type val_f_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_status: If op_status is specified, this value will be compared to the value in status using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_status must be specified if op_status is specified.
:type val_c_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_status_msg: The operator to apply to the field status_msg. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. status_msg: Error message associated with worksheet status. Currently, only contain error messages for "Transfer Failed" status. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_status_msg: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_status_msg: If op_status_msg is specified, the field named in this input will be compared to the value in status_msg using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_status_msg must be specified if op_status_msg is specified.
:type val_f_status_msg: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_status_msg: If op_status_msg is specified, this value will be compared to the value in status_msg using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_status_msg must be specified if op_status_msg is specified.
:type val_c_status_msg: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_step_number: The operator to apply to the field step_number. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. step_number: The last step which the worksheet was saved at. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_step_number: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_step_number: If op_step_number is specified, the field named in this input will be compared to the value in step_number using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_step_number must be specified if op_step_number is specified.
:type val_f_step_number: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_step_number: If op_step_number is specified, this value will be compared to the value in step_number using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_step_number must be specified if op_step_number is specified.
:type val_c_step_number: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_syslogs_collected_ind: The operator to apply to the field syslogs_collected_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. syslogs_collected_ind: Flag indicating that the syslogs were collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_syslogs_collected_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_syslogs_collected_ind: If op_syslogs_collected_ind is specified, the field named in this input will be compared to the value in syslogs_collected_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_syslogs_collected_ind must be specified if op_syslogs_collected_ind is specified.
:type val_f_syslogs_collected_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_syslogs_collected_ind: If op_syslogs_collected_ind is specified, this value will be compared to the value in syslogs_collected_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_syslogs_collected_ind must be specified if op_syslogs_collected_ind is specified.
:type val_c_syslogs_collected_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_unit_id: The operator to apply to the field unit_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. unit_id: The internal identifier for the collector that is used to collect the device support data. Used in an OC only. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_unit_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_unit_id: If op_unit_id is specified, the field named in this input will be compared to the value in unit_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_unit_id must be specified if op_unit_id is specified.
:type val_f_unit_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_unit_id: If op_unit_id is specified, this value will be compared to the value in unit_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_unit_id must be specified if op_unit_id is specified.
:type val_c_unit_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_updated_at: The operator to apply to the field updated_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. updated_at: The date and time the record was last modified in the system. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_updated_at: If op_updated_at is specified, the field named in this input will be compared to the value in updated_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_updated_at must be specified if op_updated_at is specified.
:type val_f_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_updated_at: If op_updated_at is specified, this value will be compared to the value in updated_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_updated_at must be specified if op_updated_at is specified.
:type val_c_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_user_in_device_capabilities: The operator to apply to the field user_in_device_capabilities. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. user_in_device_capabilities: The device capabilities as determined by the user. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_user_in_device_capabilities: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_user_in_device_capabilities: If op_user_in_device_capabilities is specified, the field named in this input will be compared to the value in user_in_device_capabilities using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_user_in_device_capabilities must be specified if op_user_in_device_capabilities is specified.
:type val_f_user_in_device_capabilities: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_user_in_device_capabilities: If op_user_in_device_capabilities is specified, this value will be compared to the value in user_in_device_capabilities using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_user_in_device_capabilities must be specified if op_user_in_device_capabilities is specified.
:type val_c_user_in_device_capabilities: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_user_in_device_model: The operator to apply to the field user_in_device_model. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. user_in_device_model: The device model as determined by the user. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_user_in_device_model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_user_in_device_model: If op_user_in_device_model is specified, the field named in this input will be compared to the value in user_in_device_model using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_user_in_device_model must be specified if op_user_in_device_model is specified.
:type val_f_user_in_device_model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_user_in_device_model: If op_user_in_device_model is specified, this value will be compared to the value in user_in_device_model using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_user_in_device_model must be specified if op_user_in_device_model is specified.
:type val_c_user_in_device_model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_user_in_device_type: The operator to apply to the field user_in_device_type. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. user_in_device_type: The device type as determined by the user. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_user_in_device_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_user_in_device_type: If op_user_in_device_type is specified, the field named in this input will be compared to the value in user_in_device_type using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_user_in_device_type must be specified if op_user_in_device_type is specified.
:type val_f_user_in_device_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_user_in_device_type: If op_user_in_device_type is specified, this value will be compared to the value in user_in_device_type using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_user_in_device_type must be specified if op_user_in_device_type is specified.
:type val_c_user_in_device_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_user_in_device_vendor: The operator to apply to the field user_in_device_vendor. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. user_in_device_vendor: The device vendor as determined by the user. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_user_in_device_vendor: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_user_in_device_vendor: If op_user_in_device_vendor is specified, the field named in this input will be compared to the value in user_in_device_vendor using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_user_in_device_vendor must be specified if op_user_in_device_vendor is specified.
:type val_f_user_in_device_vendor: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_user_in_device_vendor: If op_user_in_device_vendor is specified, this value will be compared to the value in user_in_device_vendor using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_user_in_device_vendor must be specified if op_user_in_device_vendor is specified.
:type val_c_user_in_device_vendor: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_user_in_os_version: The operator to apply to the field user_in_os_version. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. user_in_os_version: The os version as determined by the user. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_user_in_os_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_user_in_os_version: If op_user_in_os_version is specified, the field named in this input will be compared to the value in user_in_os_version using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_user_in_os_version must be specified if op_user_in_os_version is specified.
:type val_f_user_in_os_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_user_in_os_version: If op_user_in_os_version is specified, this value will be compared to the value in user_in_os_version using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_user_in_os_version must be specified if op_user_in_os_version is specified.
:type val_c_user_in_os_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_virtual_network_id: The operator to apply to the field virtual_network_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. virtual_network_id: The internal identifier for the network which the device is associated to. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_virtual_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_virtual_network_id: If op_virtual_network_id is specified, the field named in this input will be compared to the value in virtual_network_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_virtual_network_id must be specified if op_virtual_network_id is specified.
:type val_f_virtual_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_virtual_network_id: If op_virtual_network_id is specified, this value will be compared to the value in virtual_network_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_virtual_network_id must be specified if op_virtual_network_id is specified.
:type val_c_virtual_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, device_id, device_ip_dotted, netmri_version, license_id, license_type, license_expiration, device_vendor, device_model, os_version, device_type, discovery_diagnostic_collected_ind, snmp_data_collected_ind, cli_session_collected_ind, priv_mode_info, syslogs_collected_ind, admin_guide_collected_ind, access_to_device, access_to_device_other, created_at, updated_at, user_in_device_type, user_in_device_vendor, user_in_device_model, user_in_os_version, user_in_device_capabilities, snmp_version, snmp_community_string, snmp_port, snmp_auth_username, snmp_auth_password, snmp_auth_protocol, snmp_privacy_password, snmp_privacy_protocol, preferred_cli, cli_username, cli_password, secure_version, package_name, device_discovered_ind, delivery_method, unit_id, status, step_number, delivery_addl_email, manual_data_entry_ind, status_msg, virtual_network_id, contact_method, customer_name, contact_name, contact_value.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each DeviceSupportWorksheet. Valid values are id, device_id, device_ip_dotted, netmri_version, license_id, license_type, license_expiration, device_vendor, device_model, os_version, device_type, discovery_diagnostic_collected_ind, snmp_data_collected_ind, cli_session_collected_ind, priv_mode_info, syslogs_collected_ind, admin_guide_collected_ind, access_to_device, access_to_device_other, created_at, updated_at, user_in_device_type, user_in_device_vendor, user_in_device_model, user_in_os_version, user_in_device_capabilities, snmp_version, snmp_community_string, snmp_port, snmp_auth_username, snmp_auth_password, snmp_auth_protocol, snmp_privacy_password, snmp_privacy_protocol, preferred_cli, cli_username, cli_password, secure_version, package_name, device_discovered_ind, delivery_method, unit_id, status, step_number, delivery_addl_email, manual_data_entry_ind, status_msg, virtual_network_id, contact_method, customer_name, contact_name, contact_value. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_support_worksheets: An array of the DeviceSupportWorksheet objects that match the specified input criteria.
:rtype device_support_worksheets: Array of DeviceSupportWorksheet
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified device support worksheet.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal system identifier of the associated device support request worksheet.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_support_worksheet: The device support worksheet identified by the specified id.
:rtype device_support_worksheet: DeviceSupportWorksheet
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
|
import pytest
import xrp.parser
import xrp.views
class TestResources:
def get_file_view(self, file_path):
with open(file_path) as f:
return self.get_string_view(f.read())
@staticmethod
def get_string_view(string):
parser = xrp.parser.XParser()
parser.parse(string)
return xrp.views.ResourcesView(resource_statements=parser.resources,
def_ctx=xrp.views.DefinitionsView(parser.defines))
@pytest.fixture(scope='class')
def strawberry_resources(self):
return self.get_file_view('tests/data/res/strawberry')
@pytest.mark.parametrize('file,filter_string,result_ids', [
('tests/data/res/strawberry', '*color0', ['URxvt*color0']),
('tests/data/res/strawberry', '*foreground', ['*foreground'])
])
def test_filter(self, file, filter_string, result_ids):
res = self.get_file_view(file)
filtered = res.filter(filter_string)
assert sorted(filtered.keys()) == sorted(result_ids)
@pytest.mark.parametrize('input_,expected', [
(
"""#define white #FFFFFF
#define hotpink #FF69b4
*color0: white
*color1: hotpink
""",
{'*color0': '#FFFFFF', '*color1': '#FF69b4'}
)
])
def test_defined(self, input_, expected):
res = self.get_string_view(input_)
for res_id, res_val in expected.items():
assert res[res_id] == res_val
class TestEmptyLines:
@pytest.mark.parametrize('file', [
'tests/data/res/strawberry'
])
def test_text_at_line(self, file):
with open(file) as f:
contents = f.read()
x_parser = xrp.XParser(contents)
lines = xrp.views.EmptyLinesView(whitespace_list=x_parser.whitespace,
empty_lines=x_parser.empty_lines)
file_lines = contents.splitlines()
empty_lines = [file_line_num for file_line_num, line_string in enumerate(file_lines) if not line_string.strip()]
assert sorted(lines) == sorted(empty_lines)
class TestXFileView:
@pytest.fixture(scope='class')
def contents(self):
with open('tests/data/res/strawberry') as f:
return f.read()
@pytest.fixture(scope='class')
def xfileview(self, contents):
return xrp.views.XFileView(xrp.XParser(contents))
def test_text_at_line(self, xfileview, contents):
for line_num, line_string in enumerate(contents.splitlines()):
if 'define' in line_string:
line_string = line_string.replace(' ', ' ')
elif ':' in line_string:
line_string = line_string.replace(' ', '')
assert xfileview.text_at_line(line_num) == line_string + '\n'
def test_full_text(self):
pass
|
counter = 0
cx = 0
cy = 0
nx = 0
ny = 0
def setup():
size(500,500)
smooth()
background(0)
strokeWeight(1)
def draw():
global counter, cx, cy, nx, ny
stroke(200,5)
for si in range(0, 6, 1):
for ci in range(0,6, 1):
nx = ci*80 + 50
ny = si*80 + 50
x1 = nx -sin(counter)*(50)
y1 = ny -cos(counter)*(50)
x2 = ny +sin(counter)*(50)
y2 = nx +cos(counter)*(50)
line(x1, y1, x2, y2)
counter += 0.1
if counter > 2*PI:
counter = 0
|
from harvester_manager import HarvesterManger
from harvester import Harvester
# Simple example of using captcha harvester with one Harvester and printing captcha responses to console.
def main():
url = 'https://www.google.com/recaptcha/api2/demo'
# Scraping sitekey from url
sitekey = Harvester.get_sitekey(url)
# Creating HarvesterManager object with additional argument response_callback which is function,
# that will be called everytime HarvesterManager pull captcha response from Harvseter ( after user solve captcha )
harvester_manager = HarvesterManger(response_callback=lambda x: print(x['response']))
# Adding Harvester object to HarvesterManager object with url and sitekey as arguments
harvester_manager.add_harvester(Harvester(url, sitekey))
# Launching all harvesters
harvester_manager.start_harvesters()
# Starting main_loop inside HarvesterManager object, that will manage all Harvesters
harvester_manager.main_loop()
if __name__ == '__main__':
main()
|
def practice_assert(arg):
print(f"is {arg!r} True?", bool(arg))
assert arg, f"arg == {arg!r}"
print("this will pass ".ljust(40, '='))
practice_assert("abc")
print('''I said "This will pass"''')
try:
print("This will raise an exception ".ljust(40, '='))
practice_assert('')
except AssertionError as e:
print('''I told you "This will raise an exception" :''', e)
finally:
print("end of try-except block")
print("this will raise an exception ".ljust(40, '='))
practice_assert([])
print('''This line will not show up''')
|
import xml.etree.ElementTree as ET
from datetime import timezone
from ..v3_0 import dto, namespaces as ns
from .serialize_element import serialize_text_element
from ois_api_client.xml.get_full_tag import get_full_tag
def serialize_header(data: dto.BasicHeader) -> ET.Element:
result = ET.Element(get_full_tag(ns.COMMON, 'header'))
serialize_text_element(result, 'requestId', data.request_id, ns.COMMON)
serialize_text_element(result, 'timestamp',
f'{data.timestamp.astimezone(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]}Z',
ns.COMMON)
serialize_text_element(result, 'requestVersion', data.request_version, ns.COMMON)
serialize_text_element(result, 'headerVersion', data.header_version, ns.COMMON)
return result
|
m, n = [int(x) for x in input().split()]
haydict = {}
for i in range(m):
a, b = input().split()
haydict.update({a:int(b)})
count = 0
while(True):
try:
line = input().split()
if len(line)>0:
if line[0] == ".":
print(count)
count = 0
else:
for word in line:
if haydict.get(word):
count += haydict.get(word)
except EOFError:
break; |
"""
INSTALLED_APPS = [
"paper_admin",
"paper_admin.patches.logentry_admin",
...
"logentry_admin",
]
PAPER_MENU = [
...
dict(
label=_("Logs"),
icon="fa fa-fw fa-lg fa-history",
perms="admin.view_logentry",
models=[
"admin.LogEntry"
]
),
]
"""
default_app_config = "paper_admin.patches.logentry_admin.apps.Config"
|
"""Virtual Sudoku Game Board
=== Module description
This module contains a Sudoku class that keep tracks of various game variables
and keep track of any errors made my the users
@date: 7/17/2018
@author: Larry Shi
"""
from typing import Union, Tuple, List
__all__ = ['Sudoku', 'CELL_IDX']
# Constants
CELL_IDX = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
# Main class
class Sudoku:
"""A Sudoku game board
=== Attributes ===
dim: the dimension of the board
moves: a list that stores all the previous moves made by player
board: a 2D-list that represents a sudoku game board
"""
_dim: int
moves: List[Tuple[Tuple[int, int], int, bool]]
_board: List[List[int]]
def __init__(self, dim, board=None) -> None:
self._dim = dim
self.moves = []
if board is None:
# Create empty board
self._board = [[0 for _ in range(dim)]
for _ in range(dim)]
else:
# Copy board grid
self._board = [[board[row][col] for col in range(dim)]
for row in range(dim)]
def __str__(self) -> str:
"""Human readable representation of the board."""
rep = ""
for row in range(self._dim):
for col in range(self._dim):
rep += str(self._board[row][col])
if col == self._dim - 1:
rep += "\n"
else:
rep += " | "
if row != self._dim - 1:
rep += "-" * (4 * self._dim - 3)
rep += "\n"
return rep
def get_dim(self) -> int:
"""Return the dimension of the board
>>> game = Sudoku(9)
>>> game.get_dim()
9
>>> game = Sudoku(3)
>>> game.get_dim()
3
"""
return self._dim
def get_board(self) -> List[List[int]]:
"""Return the board as a 2-dimension list
>>> game = Sudoku(3)
>>> game.get_board()
[[0, 0, 0], [0, 0, 0], [0, 0, 0]]
"""
return self._board
def get_square(self, row: int, col: int) -> int:
"""Get the contents of the board at position (row, col).
=== Attributes ===
row: the row on the board
col: the column on the board
>>> game = Sudoku(9)
>>> game.get_square(1, 1)
0
"""
return self._board[row][col]
def get_last_move(self) -> Union[Tuple, None]:
"""Get the last move made by the user.
=== Returns ===
the last ID in the moves list
"""
if self.moves:
return self.moves.pop()
else:
return None
def set_square(self, row: int, col: int, num: int) -> None:
"""Place number on the board at position (row, col).
=== Attributes ===
row: the row of the board
col: the column of the board
num: the number that is filled into the square
>>> game = Sudoku(3)
>>> game.set_square(1, 1, 3)
>>> game.get_square(1, 1)
3
"""
self._board[row][col] = num
def add_move(self, pos: Tuple[int, int], num: int, pencil: bool) -> None:
"""Add the ID of the square to moves list
=== Attributes ===
pos: the position of the square
num: the number placed on the move
pencil: bool that indicate whether pencil is toggled
>>> game = Sudoku(3)
>>> game.add_move((1, 1), 3, True)
>>> game.moves
[((1, 1), 3, True)]
"""
self.moves.append((pos, num, pencil))
def get_pos_from_num(self, num: int) -> List[Tuple[int, int]]:
"""Get all number: num, positions (row, col) on the board
=== Attributes ===
num: the number that will be searched
=== Returns ===
a list of (row, col) positions
"""
# initialize variable
result = []
# search for num
for row in range(self._dim):
for col in range(self._dim):
if self._board[row][col] == num:
result.append((row, col))
return result
def get_row_col_cell(self) -> Tuple[List[list], List[list], List[list]]:
"""Get all the rows, columns, and cells from board variable
=== Returns ===
rows, columns and cells of the board
"""
# row
rows = self._board.copy()
# column
cols = []
for col in range(self._dim):
column = []
for row in range(self._dim):
column.append(self._board[row][col])
cols.append(column)
# cell
cells = []
for idx1 in CELL_IDX:
for idx2 in CELL_IDX:
cell = []
for row in idx1:
for col in idx2:
cell.append(self._board[row][col])
cells.append(cell)
# return row, column and cell in one 2D-list
return rows, cols, cells
def verify_board(self) -> Union[bool, set]:
"""Verify whether the board is complete
=== Returns ===
the state of the board as a bool or a set of duplicated numbers
"""
# store all rows, columns, and cells in one list
row_col_cells = self.get_row_col_cell()
lst = [config for item in row_col_cells for config in item]
state = True
# check whether the board is incomplete or not
for row in self._board:
if 0 in row:
state = False
# check for duplicate number
duplicate_num = set([])
for config in lst:
for num in set(config):
if config.count(num) > 1 and num != 0:
duplicate_num.add(num)
if not duplicate_num:
return state
else:
return duplicate_num
if __name__ == '__main__':
import doctest
doctest.testmod()
|
from discord.ext import commands
import discord
#from discord_slash import cog_ext, SlashContext, manage_commands, error
from tabulate import tabulate
import mariadb
import pydest
class Public(commands.Cog):
def __init__(self, bot):
self.bot = bot
g_guilds = []
# for lang in self.bot.langs:
# g_guilds.append([])
# for guild in self.bot.guilds:
# guild_lang = self.bot.guild_lang(guild.id)
# if self.bot.guild_lang(guild.id) == lang:
# g_guilds[-1].append(guild.id)
# translations = self.bot.translations[lang]
# if len(g_guilds[-1]) > 0:
# try:
# self.bot.slash.add_slash_command(self.sl_online, name=translations['help']['commands']['online']['name'], description=translations['help']['online'], guild_ids=g_guilds[-1])
# except error.DuplicateCommand:
# try:
# self.bot.slash.add_slash_command(self.sl_online, name="online_{}".format(lang),
# description=translations['help']['online'], guild_ids=g_guilds[-1])
# except error.DuplicateCommand:
# pass
@commands.command(
aliases=['gtop', 'globaltop']
)
@commands.guild_only()
async def top(self, ctx, metric, number=10):
ctx.bot.guild_cursor.execute('''SELECT clan_id FROM clans WHERE server_id=?''', (ctx.guild.id,))
clan_id = ctx.bot.guild_cursor.fetchone()
lang = ctx.bot.guild_lang(ctx.message.guild.id)
translations = ctx.bot.translations[lang]['top']
if ctx.invoked_with in ['gtop', 'globaltop']:
is_global = True
else:
is_global = False
if clan_id is None:
clan_id = [0]
if clan_id[0] == 0:
await ctx.channel.send(translations['no_clan'], delete_after=60)
return
if len(clan_id) > 0:
clan_ids = [clan_id[0]]
try:
int(metric)
is_time = False
is_kda = False
except ValueError:
try:
internal_db = mariadb.connect(host=ctx.bot.api_data['db_host'], user=ctx.bot.api_data['cache_login'],
password=ctx.bot.api_data['pass'], port=ctx.bot.api_data['db_port'],
database='metrics')
internal_cursor = internal_db.cursor()
internal_cursor.execute('''SELECT hash FROM seasonsmetrics WHERE name=? and is_working=1
UNION ALL
SELECT hash FROM accountmetrics WHERE name=? and is_working=1
UNION ALL
SELECT hash FROM cruciblemetrics WHERE name=? and is_working=1
UNION ALL
SELECT hash FROM destinationmetrics WHERE name=? and is_working=1
UNION ALL
SELECT hash FROM gambitmetrics WHERE name=? and is_working=1
UNION ALL
SELECT hash FROM raidsmetrics WHERE name=? and is_working=1
UNION ALL
SELECT hash FROM strikesmetrics WHERE name=? and is_working=1
UNION ALL
SELECT hash FROM trialsofosirismetrics WHERE name=? and is_working=1''',
(metric.lower(), metric.lower(),
metric.lower(), metric.lower(),
metric.lower(), metric.lower(),
metric.lower(), metric.lower()))
metric_id = internal_cursor.fetchone()
if 'kda' in metric.lower():
is_kda = True
else:
is_kda = False
if 'speed' in metric.lower():
is_time = True
else:
is_time = False
if metric_id is not None:
if len(metric_id) > 0:
metric = metric_id[0]
else:
internal_db.close()
raise mariadb.Error
else:
internal_db.close()
raise mariadb.Error
internal_db.close()
except mariadb.Error:
await ctx.channel.send(translations['unknown_metric'].format(metric), delete_after=10)
if ctx.guild.me.permissions_in(ctx.message.channel).manage_messages:
try:
await ctx.message.delete()
except discord.NotFound:
pass
return
try:
top_name = await ctx.bot.data.destiny.decode_hash(metric, 'DestinyMetricDefinition', language=lang)
except pydest.pydest.PydestException:
await ctx.channel.send(translations['unknown_metric'].format(metric), delete_after=10)
if ctx.guild.me.permissions_in(ctx.message.channel).manage_messages:
await ctx.message.delete()
return
await ctx.channel.send(translations['in_progress'], delete_after=30)
if is_global:
clan_ids_c = ctx.bot.guild_cursor.execute('''SELECT clan_id FROM clans''')
clan_ids_c = clan_ids_c.fetchall()
clan_ids = []
for clan_id in clan_ids_c:
if clan_id[0] not in clan_ids:
clan_ids.append(clan_id[0])
top_list = await ctx.bot.data.get_clan_leaderboard(clan_ids, metric, number, is_time, is_kda, is_global)
max_len = min(number, len(top_list))
if len(top_list) > 0:
msg = '{}```{}```'.format(top_name['displayProperties']['description'], tabulate(top_list, tablefmt='plain', colalign=('left', 'left')))
if len(msg) > 2000:
msg_strs = msg.splitlines()
msg = ''
for line in msg_strs:
if len(msg) + len(line) <= 1990:
msg = '{}{}\n'.format(msg, line)
else:
msg = '{}```'.format(msg)
await ctx.channel.send(msg)
msg = '```{}\n'.format(line)
if len(msg) > 0:
msg = '{}'.format(msg)
await ctx.channel.send(msg)
else:
await ctx.channel.send(msg)
else:
await ctx.channel.send(translations['no_data'])
else:
await ctx.channel.send(translations['no_clan'], delete_after=10)
if ctx.guild.me.permissions_in(ctx.message.channel).manage_messages:
try:
await ctx.message.delete()
except discord.NotFound:
pass
@commands.command()
@commands.guild_only()
async def prefix(self, ctx):
lang = ctx.bot.guild_lang(ctx.message.guild.id)
prefixes = ctx.bot.guild_prefix(ctx.guild.id)
if len(prefixes) > 0:
msg = '{}\n'.format(ctx.bot.translations[lang]['msg']['prefixes'].format(ctx.message.guild.me.display_name, prefixes[0]))
prefix_list = ''
for prefix in prefixes:
prefix_list = '{} {},'.format(prefix_list, prefix)
prefix_list = prefix_list[1:-1]
msg = '{}```{}```'.format(msg, prefix_list)
else:
msg = '{}\n'.format(ctx.bot.translations[lang]['msg']['no_prefixes'].format(ctx.message.guild.me.display_name))
await ctx.message.channel.send(msg)
if ctx.guild.me.permissions_in(ctx.message.channel).manage_messages:
await ctx.message.delete()
@commands.command()
async def support(self, ctx):
await ctx.channel.send('https://discord.gg/JEbzECp')
@commands.command()
@commands.guild_only()
async def online(self, ctx):
ctx.bot.guild_cursor.execute('''SELECT clan_id FROM clans WHERE server_id=?''', (ctx.guild.id,))
clan_id = ctx.bot.guild_cursor.fetchone()
lang = ctx.bot.guild_lang(ctx.guild.id)
translations = ctx.bot.translations[lang]['top']
if clan_id is None:
clan_id = [0]
if clan_id[0] == 0:
# if type(ctx) == SlashContext:
# return translations['no_clan']
await ctx.channel.send(translations['no_clan'], delete_after=60)
return
if len(clan_id) > 0:
clan_ids = clan_id[0]
data = await ctx.bot.data.get_online_clan_members(clan_ids, lang)
if len(data) > 1:
msg = '```{}```'.format(
tabulate(data, tablefmt='simple', colalign=('left', 'left'), headers='firstrow'))
else:
msg = '```{}```'.format(
tabulate(data, tablefmt='simple', colalign=('left', 'left')))
# if type(ctx) == SlashContext:
# return msg
if len(msg) > 2000:
msg_strs = msg.splitlines()
msg = ''
for line in msg_strs:
if len(msg) + len(line) <= 1990:
msg = '{}{}\n'.format(msg, line)
else:
msg = '{}```'.format(msg)
await ctx.channel.send(msg)
msg = '```{}\n'.format(line)
if len(msg) > 0:
msg = '{}'.format(msg)
await ctx.channel.send(msg)
else:
await ctx.channel.send(msg)
else:
# if type(ctx) == SlashContext:
# return translations['no_clan']
await ctx.channel.send(translations['no_clan'], delete_after=10)
# async def sl_online(self, ctx):
# await ctx.defer()
# msg = await self.online(ctx)
# if len(msg) > 2000:
# msg_strs = msg.splitlines()
# msg = ''
# for line in msg_strs:
# if len(msg) + len(line) <= 1990:
# msg = '{}{}\n'.format(msg, line)
# else:
# msg = '{}```'.format(msg)
# await ctx.send(msg)
# else:
# await ctx.send(msg)
def setup(bot):
bot.add_cog(Public(bot))
|
from typing import Optional, List, Dict
from .easy_access import EasyAccessDict
class AvatarDetail(EasyAccessDict):
user_type: int
identity_level: int
identity_icon_url: str
class Creator(EasyAccessDict):
default_avatar: bool
province: int
auth_status: int
followed: bool
avatar_url: str
account_status: int
gender: int
city: int
birthday: int
user_id: int
user_type: int
nickname: str
signature: str
description: str
detail_description: str
avatar_img_id: float
background_img_id: float
background_url: str
authority: int
mutual: bool
expert_tags: Optional[List[str]]
experts: Optional[Dict[str, str]]
dj_status: int
vip_type: int
remark_name: None
authentication_types: int
avatar_detail: Optional[AvatarDetail]
anchor: bool
background_img_id_str: str
avatar_img_id_str: str
creator_avatar_img_id_str: Optional[str]
class Playlist(EasyAccessDict):
name: str
id: int
track_number_update_time: int
status: int
user_id: int
create_time: int
update_time: int
subscribed_count: int
track_count: int
cloud_track_count: int
cover_img_url: str
cover_img_id: float
description: str
tags: List[str]
play_count: int
track_update_time: int
special_type: int
total_duration: int
creator: Creator
tracks: None
subscribers: List[Creator]
subscribed: bool
comment_thread_id: str
new_imported: bool
ad_type: int
high_quality: bool
privacy: int
ordered: bool
anonimous: bool
cover_status: int
recommend_info: None
share_count: int
cover_img_id_str: str
comment_count: int
alg: str
class GetTopPlaylistsResp(EasyAccessDict):
playlists: List[Playlist]
total: int
code: int
more: bool
cat: str
|
from .base import *
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEBUG = True
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static', 'local')
]
# WEBPACK_MANIFEST_FILE = os.path.join(BASE_DIR, '../webpack-stats.local.json') |
from __future__ import annotations
from typing import Any, Optional
from datastax.linkedlists.doubly_linked_list import DoublyLinkedList
from datastax.linkedlists.private_lists import doubly_circular_llist
class DoublyCircularList(doubly_circular_llist.DoublyCircularList,
DoublyLinkedList):
def _construct(self, array: Optional[list[Any]]) -> DoublyCircularList:
if array and array[0] is not None:
for item in array:
self.append(item)
return self
def append(self, data: Any) -> None:
super().append(data)
self.head.prev, self.tail.next = self.tail, self.head
def insert(self, data: Any) -> None:
super().insert(data)
self.head.prev, self.tail.next = self.tail, self.head
|
"""Test module for the plane endpoint"""
import pytest
from django.urls import resolve, reverse
from src.apps.core.utilities.messages import ERRORS
from tests.fixtures.plane import NEW_PLANE, SEATS
from tests.fixtures.user import USER
PLANE_URL = reverse('flight:plane-list')
GET_A_PLANE_URL = 'flight:plane-detail'
@pytest.mark.django_db
class TestPlaneView:
"""Class to test plane views"""
def test_plane_url_succeeds(self):
"""Test the paths"""
assert resolve(PLANE_URL).view_name == 'flight:plane-list'
assert resolve(reverse('flight:plane-detail',
args=['pk'])).view_name == 'flight:plane-detail'
def test_getting_all_plane_succeeds(self, auth_header, add_planes, client):
"""Test getting available plane."""
response = client.get(PLANE_URL, **auth_header)
resp_data = response.data
data = resp_data['data']
assert response.status_code == 200
assert resp_data['status'] == 'success'
assert len(data) == 2
assert data[0]['model'] == str(NEW_PLANE[0]['model'])
def test_getting_all_plane_without_auth_fails(self, client):
"""Test getting available planes fails"""
response = client.get(PLANE_URL)
resp_data = response.data
assert response.status_code == 401
assert resp_data['status'] == 'error'
assert resp_data['user_message'] == ERRORS['AUTH_01']
def test_getting_a_plane_succeeds(self, client, auth_header, add_planes):
"""Test getting a plane."""
plane = add_planes[0]
plane_url = reverse(GET_A_PLANE_URL, args=[plane.id])
response = client.get(plane_url, **auth_header)
resp_data = response.data
data = resp_data['data']
assert response.status_code == 200
assert resp_data['status'] == 'success'
assert isinstance(data['seats'], list)
assert data['model'] == plane.model
def test_getting_a_plane_without_auth_fails(self, client, add_planes):
"""Test getting a plane fails"""
plane = add_planes[0]
plane_url = reverse(GET_A_PLANE_URL, args=[plane.id])
response = client.get(plane_url)
resp_data = response.data
assert response.status_code == 401
assert resp_data['status'] == 'error'
assert resp_data['user_message'] == ERRORS['AUTH_01']
def test_getting_a_plane_with_invalid_pk_fails(self, client, auth_header):
"""Test getting a plane fails"""
plane_url = reverse(GET_A_PLANE_URL, args=['invalid'])
response = client.get(plane_url, **auth_header)
resp_data = response.data
assert response.status_code == 404
assert resp_data['status'] == 'error'
assert resp_data['user_message'] == ERRORS['USR_09']
def test_updating_a_plane_with_invalid_data_fails(
self,
client,
add_planes,
create_superuser,
generate_token,
):
"""Test update flight with invalid data"""
user = create_superuser(USER)
token = generate_token(user)
auth_header = {'HTTP_AUTHORIZATION': f'Bearer {token}'}
plane = add_planes[0]
plane_url = reverse(GET_A_PLANE_URL, args=[plane.id])
NEW_PLANE[0]['model'] = ''
response = client.patch(plane_url,
content_type='application/json',
data=NEW_PLANE[0],
**auth_header)
resp_data = response.data
data = resp_data['errors']
assert response.status_code == 400
assert resp_data['status'] == 'error'
assert data['model'][0] == ERRORS['USR_07']
def test_only_admin_can_update_a_plane_succeeds(self, client,
create_superuser,
generate_token,
add_planes):
"""Test that admin should be able to update plane"""
user = create_superuser(USER)
token = generate_token(user)
plane = add_planes[1]
auth_header = {'HTTP_AUTHORIZATION': f'Bearer {token}'}
plane_url = reverse(GET_A_PLANE_URL, args=[plane.id])
NEW_PLANE[1]['model'] = 'PT=JET'
# NEW_PLANE[1]['seats'] = SEATS
response = client.patch(plane_url,
content_type='application/json',
data=NEW_PLANE[1],
**auth_header)
resp_data = response.data
data = resp_data['data']
assert response.status_code == 200
assert resp_data['status'] == 'success'
assert data['model'] == NEW_PLANE[1]['model']
def test_users_cannot_update_a_plane_fails(self, client, auth_header,
add_planes):
"""Test that admin should be able to update flight"""
plane = add_planes[1]
plane_url = reverse(GET_A_PLANE_URL, args=[plane.id])
NEW_PLANE[0]['model'] = 'EUT-123IR'
response = client.patch(plane_url,
content_type='application/json',
data=NEW_PLANE[0],
**auth_header)
resp_data = response.data
assert response.status_code == 403
assert resp_data['status'] == 'error'
assert resp_data['user_message'] == ERRORS['AUTH_02']
def test_users_creating_a_plane_fails(self, client, auth_header):
"""Test that users should not be able to create plane"""
NEW_PLANE[0]['seats'] = SEATS
response = client.post(PLANE_URL,
content_type='application/json',
data=NEW_PLANE,
**auth_header)
resp_data = response.data
assert response.status_code == 403
assert resp_data['status'] == 'error'
assert resp_data['user_message'] == ERRORS['AUTH_02']
def test_only_admin_can_create_plane_succeed(self, client,
create_superuser,
generate_token):
"""Test that admin should be able to create plane"""
user = create_superuser(USER)
token = generate_token(user)
auth_header = {'HTTP_AUTHORIZATION': f'Bearer {token}'}
NEW_PLANE[0]['seats'] = SEATS
response = client.post(PLANE_URL,
content_type='application/json',
data=NEW_PLANE[0],
**auth_header)
resp_data = response.data
data = resp_data['data']
assert response.status_code == 201
assert resp_data['status'] == 'success'
assert data['model'] == NEW_PLANE[0]['model']
def test_creating_a_flight_with_invalid_data_fails(self, client,
create_superuser,
generate_token):
"""Test create flight with invalid data"""
user = create_superuser(USER)
token = generate_token(user)
auth_header = {'HTTP_AUTHORIZATION': f'Bearer {token}'}
NEW_PLANE[0]['model'] = ''
response = client.post(PLANE_URL,
content_type='application/json',
data=NEW_PLANE[0],
**auth_header)
resp_data = response.data
data = resp_data['errors']
assert response.status_code == 400
assert resp_data['status'] == 'error'
assert data['model'][0] == ERRORS['USR_07']
def test_admin_can_update_plane_with_invalid_id_fails(
self, client, create_superuser, generate_token):
"""Test that admin should not be able to update a plane with
invalid param id"""
user = create_superuser(USER)
token = generate_token(user)
auth_header = {'HTTP_AUTHORIZATION': f'Bearer {token}'}
plane_url = reverse(GET_A_PLANE_URL, args=['invalid'])
response = client.patch(plane_url,
content_type='application/json',
data=NEW_PLANE[0],
**auth_header)
resp_data = response.data
assert response.status_code == 404
assert resp_data['status'] == 'error'
assert resp_data['user_message'] == ERRORS['USR_09']
|
import insightconnect_plugin_runtime
from .schema import SearchInput, SearchOutput, Output, Input
# Custom imports below
from typing import Optional
class Search(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="search", description="Search Zendesk", input=SearchInput(), output=SearchOutput()
)
def run(self, params={}):
s_type = params.get(Input.TYPE)
results = self.connection.client.search(params.get(Input.ITEM), type=params.get(Input.TYPE).lower())
objs = []
if len(results) == 0:
return {"error": "Could not find item"}
for item in results:
if s_type == "Organization" and results is not None:
organization_obj = {
"created_at": item.created_at,
"details": item.details,
"external_id": item.external_id,
"group_id": item.group_id,
"id": item.id,
"name": item.name,
"notes": item.notes,
"shared_comments": item.shared_comments,
"shared_tickets": item.shared_tickets,
"tags": item.tags,
"updated_at": item.updated_at,
"url": item.url,
}
objs.append(organization_obj)
if s_type == "Ticket" and results is not None:
ticket_obj = {
"assignee_id": item.assignee_id,
"brand_id": item.brand_id,
"collaborator_ids": item.collaborator_ids,
"created_at": item.created_at,
"description": item.description,
"due_at": item.due_at,
"external_id": item.external_id,
"forum_topic_id": item.forum_topic_id,
"group_id": item.group_id,
"has_incidents": item.has_incidents,
"id": item.id,
"organization_id": item.organization_id,
"priority": item.priority,
"problem_id": item.problem_id,
"raw_subject": item.raw_subject,
"recipient": item.recipient,
"requester_id": item.requester_id,
"sharing_agreement_ids": item.sharing_agreement_ids,
"status": item.status,
"subject": item.subject,
"submitter_id": item.submitter_id,
"tags": item.tags,
"type": item.type,
"updated_at": item.updated_at,
"url": item.url,
}
objs.append(ticket_obj)
if s_type == "User" and results is not None:
user_obj = {
"active": item.active,
"alias": item.alias,
"chat_only": item.chat_only,
"created_at": item.created_at,
"custom_role_id": item.custom_role_id,
"details": item.details,
"email": item.email,
"external_id": item.external_id,
"id": item.id,
"last_login_at": item.last_login_at,
"locale": item.locale,
"locale_id": item.locale_id,
"moderator": item.moderator,
"name": item.name,
"notes": item.notes,
"only_private_comments": item.only_private_comments,
"organization_id": item.organization_id,
"phone": item.phone,
"photo": item.photo,
"restricted_agent": item.restricted_agent,
"role": item.role,
"shared": item.shared,
"shared_agent": item.shared_agent,
"signature": item.signature,
"suspended": item.suspended,
"tags": item.tags,
"ticket_restriction": item.ticket_restriction,
"time_zone": item.time_zone,
"two_factor_auth_enabled": item.two_factor_auth_enabled,
"updated_at": item.updated_at,
"url": item.url,
"verified": item.verified,
}
objs.append(user_obj)
if s_type == "Organization":
return {Output.ORGANIZATIONS: insightconnect_plugin_runtime.helper.clean(objs)}
elif s_type == "Ticket":
return {Output.TICKETS: insightconnect_plugin_runtime.helper.clean(objs)}
else:
return {Output.USERS: insightconnect_plugin_runtime.helper.clean(objs)}
@staticmethod
def convert_to_string(values: Optional[int]) -> Optional[str]:
if not values:
return None
return str(values)
@staticmethod
def convert_array(values: Optional[list]) -> Optional[list]:
converted_array = []
for item in values:
converted_array.append(str(item))
return converted_array
|
from eth_utils import (
ValidationError,
decode_hex,
to_bytes,
)
import pytest
import rlp
from eth.exceptions import UnrecognizedTransactionType
from eth.rlp.receipts import Receipt
from eth.vm.forks import (
BerlinVM,
LondonVM,
)
from eth.vm.forks.berlin.receipts import (
TypedReceipt,
)
# The type of receipt is based on the type of the transaction. So we are
# checking the type of the receipt against known transaction types.
# Add recognized types here if any fork knows about it. Then,
# add manual unrecognized types for older forks. For example,
# (BerlinVM, to_bytes(2), UnrecognizedTransactionType) should be added explicitly.
RECOGNIZED_TRANSACTION_TYPES = {1, 2}
UNRECOGNIZED_TRANSACTION_TYPES = tuple(
(to_bytes(val), UnrecognizedTransactionType)
for val in range(0, 0x80)
if val not in RECOGNIZED_TRANSACTION_TYPES
)
# These are valid RLP byte-strings, but invalid for EIP-2718
INVALID_TRANSACTION_TYPES = tuple(
(rlp.encode(to_bytes(val)), ValidationError)
for val in range(0x80, 0x100)
)
@pytest.mark.parametrize('vm_class', [BerlinVM, LondonVM])
@pytest.mark.parametrize(
'encoded, expected',
(
(
decode_hex('f90185a01e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e01b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002f85ef85c942d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2df842a00000000000000000000000000000000000000000000000000000000000000003a00000000000000000000000000000000000000000000000000000000000000004829999'), # noqa: E501
Receipt(
state_root=b'\x1e' * 32,
gas_used=1,
bloom=2,
logs=[
[b'\x2d' * 20, [3, 4], b'\x99' * 2],
],
),
),
(
decode_hex('01f90185a01e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e1e01b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002f85ef85c942d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2df842a00000000000000000000000000000000000000000000000000000000000000003a00000000000000000000000000000000000000000000000000000000000000004829999'), # noqa: E501
TypedReceipt(
type_id=1,
proxy_target=Receipt(
state_root=b'\x1e' * 32,
gas_used=1,
bloom=2,
logs=[
[b'\x2d' * 20, [3, 4], b'\x99' * 2],
],
),
),
),
)
)
def test_receipt_decode(vm_class, encoded, expected):
expected_encoding = expected.encode()
assert encoded == expected_encoding
sedes = vm_class.get_receipt_builder()
decoded = sedes.decode(encoded)
assert decoded == expected
@pytest.mark.parametrize('vm_class', [BerlinVM, LondonVM])
@pytest.mark.parametrize(
'encoded, expected_failure',
(
(
decode_hex('0xc0'),
rlp.exceptions.DeserializationError,
),
)
+ UNRECOGNIZED_TRANSACTION_TYPES
+ INVALID_TRANSACTION_TYPES
)
def test_receipt_decode_failure(vm_class, encoded, expected_failure):
sedes = vm_class.get_receipt_builder()
with pytest.raises(expected_failure):
rlp.decode(encoded, sedes=sedes)
@pytest.mark.parametrize(
'vm_class, encoded, expected_failure',
(
(
BerlinVM,
to_bytes(2),
UnrecognizedTransactionType,
),
)
)
def test_receipt_decode_failure_by_vm(vm_class, encoded, expected_failure):
sedes = vm_class.get_receipt_builder()
with pytest.raises(expected_failure):
rlp.decode(encoded, sedes=sedes)
@pytest.mark.parametrize('is_legacy', (True, False))
@pytest.mark.parametrize('is_rlp_encoded', (True, False))
def test_EIP2930_receipt_decode(is_legacy, is_rlp_encoded):
expected_vals = dict(state_root=b'\xee', gas_used=1, bloom=2, logs=[
dict(address=b'\x0f' * 20, topics=(3, 4), data=b'\xaa'),
])
legacy_encoded = b'\xf9\x01e\x81\xee\x01\xb9\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\xf8]\xf8[\x94\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\xf8B\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\xa0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x81\xaa' # noqa: E501
if is_legacy:
encoded = legacy_encoded
if is_rlp_encoded:
pytest.skip('No examples where legacy receipts are double-encoded')
else:
encoded = b'\x01' + legacy_encoded
receipt_builder = BerlinVM.get_receipt_builder()
if is_rlp_encoded:
double_encoded = rlp.encode(encoded)
receipt = rlp.decode(double_encoded, sedes=receipt_builder)
assert rlp.encode(receipt) == double_encoded
assert rlp.encode(receipt, cache=False) == double_encoded
else:
receipt = receipt_builder.decode(encoded)
re_encoded = receipt.encode()
assert encoded == re_encoded
assert receipt.state_root == expected_vals['state_root']
assert receipt.gas_used == expected_vals['gas_used']
assert receipt.bloom == expected_vals['bloom']
expected_logs = expected_vals['logs']
assert len(receipt.logs) == len(expected_logs)
for log, expected_log in zip(receipt.logs, expected_logs):
assert log.address == expected_log['address']
assert log.topics == expected_log['topics']
assert log.data == expected_log['data']
|
import random
# This is a statistical sampling approximation algorithm that simply simulates the game for a fixed number of dice rolls.
# An exact algorithm would involve calculating the eigenvector of the largest eigenvalue of the transition matrix (which is practical),
# but averaging over all possible permutations of both the Chance and Community Chest decks (which is computationally infeasible).
def problem084():
"""
In the game, Monopoly, the standard board is set up in the following way:
GO A1 CC1 A2 T1 R1 B1 CH1 B2 B3 JAIL
H2 C1
T2 U1
H1 C2
CH3 C3
R4 R2
G3 D1
CC3 CC2
G2 D2
G1 D3
G2J F3 U2 F2 F1 R3 E3 E2 CH2 E1 FP
A player starts on the GO square and adds the scores on two 6-sided dice
to determine the number of squares they advance in a clockwise direction.
Without any further rules we would expect to visit each square with equal
probability: 2.5%. However, landing on G2J (Go To Jail), CC (community
chest), and CH (chance) changes this distribution.
In addition to G2J, and one card from each of CC and CH, that orders the
player to go directly to jail, if a player rolls three consecutive
doubles, they do not advance the result of their 3rd roll. Instead they
proceed directly to jail.
At the beginning of the game, the CC and CH cards are shuffled. When a
player lands on CC or CH they take a card from the top of the respective
pile and, after following the instructions, it is returned to the bottom
of the pile. There are sixteen cards in each pile, but for the purpose of
this
Problem we are only concerned with cards that order a movement; any
instruction not concerned with movement will be ignored and the player
will remain on the CC/CH square.
• Community Chest (2/16 cards):
1. Advance to GO
2. Go to JAIL
• Chance (10/16 cards):
1. Advance to GO
2. Go to JAIL
3. Go to C1
4. Go to E3
5. Go to H2
6. Go to R1
7. Go to next R (railway company)
8. Go to next R
9. Go to next U (utility company)
10. Go back 3 squares.
The heart of this
Problem concerns the likelihood of visiting a particular
square. That is, the probability of finishing at that square after a roll.
For this reason it should be clear that, with the exception of G2J for
which the probability of finishing on it is zero, the CH squares will have
the lowest probabilities, as 5/8 request a movement to another square, and
it is the final square that the player finishes at on each roll that we
are interested in. We shall make no distinction between "Just Visiting"
and being sent to JAIL, and we shall also ignore the rule about requiring
a double to "get out of jail", assuming that they pay to get out on their
next turn.
By starting at GO and numbering the squares sequentially from 00 to 39 we
can concatenate these two-digit numbers to produce strings that correspond
with sets of squares.
Statistically it can be shown that the three most popular squares, in
order, are JAIL (6.24%) = Square 10, E3 (3.18%) = Square 24, and GO
(3.09%) = Square 00. So these three most popular squares can be listed
with the six-digit modal string: 102400.
If, instead of using two 6-sided dice, two 4-sided dice are used, find the
six-digit modal string.
"""
TRIALS = 10 ** 7
visitcounts = [0] * 40
chance = CardDeck(16)
communitychest = CardDeck(16)
consecutivedoubles = 0
location = 0
for i in range(TRIALS):
# Roll tetrahedral dice
die0 = random.randint(1, 4)
die1 = random.randint(1, 4)
consecutivedoubles = (consecutivedoubles + 1) if (die0 == die1) else 0
if consecutivedoubles < 3:
location = (location + die0 + die1) % 40
else:
location = 30
consecutivedoubles = 0
# Process actions for some locations
if location in (7, 22, 36): # Chance
card = chance.next_card()
if card == 0:
location = 0
elif card == 1:
location = 10
elif card == 2:
location = 11
elif card == 3:
location = 24
elif card == 4:
location = 39
elif card == 5:
location = 5
elif card in (6, 7): # Next railway
location = (location + 5) // 10 % 4 * 10 + 5
elif card == 8: # Next utility
location = 28 if (12 < location < 28) else 12
elif card == 9:
location -= 3
else:
pass
elif location == 30: # Go to jail
location = 10
else:
pass
if location in (2, 17, 33): # Community chest
card = communitychest.next_card()
if card == 0:
location = 0
elif card == 1:
location = 10
visitcounts[location] += 1
temp = sorted(enumerate(visitcounts), key=(lambda ic: -ic[1]))
ans = "".join("{:02d}".format(i) for (i, c) in temp[:3])
return int(ans)
class CardDeck(object):
def __init__(self, size):
self.cards = list(range(size))
self.index = size
def next_card(self):
if self.index == len(self.cards):
random.shuffle(self.cards)
self.index = 0
result = self.cards[self.index]
self.index += 1
return result
if __name__ == "__main__":
print(problem084())
|
#!/usr/bin/env python
# fork multiagent-particle-envs from openai and uncomment the following:
# line 7 in multiagent.multi_discrete.py: # from gym.spaces import prng
# line 14 in multiagent.rendering.py: # from gym.utils import reraise
import argparse
from robot.robot_policy import HierPolicy
from robot.robot_environment import HierEnv
import robot.robot_scenarios as scenarios
if __name__ == '__main__':
# parse arguments
parser = argparse.ArgumentParser(description=None)
parser.add_argument('-s', '--scenario', default='hierarchical_collaboration.py', help='Path of the scenario Python script.')
args = parser.parse_args()
# load scenario from script
scenario = scenarios.load(args.scenario).Scenario()
# create world
world = scenario.make_world()
# create multiagent environment
env = HierEnv(world, scenario.reset_world, scenario.reward, scenario.observation, info_callback=None,
shared_viewer=True, discrete_action=True)
# render call to create viewer window (necessary only for interactive policies)
env.render()
# create interactive policies for each agent
policies = [HierPolicy(env)]
# execution loop
obs_n = env.reset()
while True:
# query for action from each agent's policy
act_n = []
for i, policy in enumerate(policies):
act_n.append(policy.action(obs_n[i]))
# step environment
obs_n, reward_n, done_n, _ = env.step(act_n)
# show current state of environment
env.render()
# debugging
# print(f'done = {done_n}')
# print(f'observation_n = {obs_n}')
# print(f" who is grabbing: {world.objects[0].state.who_grabbed}")
print(f'reward = {reward_n}')
# print(f"grasping = {world.agents[0].state.grasp}")
if done_n[0]: break |
import os
PROJECT_PATH = os.path.abspath(os.getcwd())
DEFAULT_DATA_PATH = os.path.abspath(os.path.join(PROJECT_PATH, 'gaia_data'))
|
import tensorflow as tf
from model_fn.model_fn_2d.util_2d.graphs_2d import Graph2D
import model_fn.util_model_fn.keras_compatible_layers as layers
from util.flags import update_params
class GraphConv2MultiFF(Graph2D):
def __init__(self, params):
super(GraphConv2MultiFF, self).__init__(params)
# v0.1
self.graph_params["mid_layer_activation"] = "leaky_relu"
self.graph_params["conv_layer_activation"] = "leaky_relu"
self.graph_params["input_dropout"] = 0.0
self.graph_params["batch_norm"] = False
self.graph_params["dense_layers"] = [256, 128]
self.graph_params = update_params(self.graph_params, self._flags.graph_params, "graph")
def infer(self, inputs, is_training):
if self.graph_params["conv_layer_activation"] == "None":
conv_layer_activation_fn = None
else:
conv_layer_activation_fn = getattr(layers, self.graph_params["conv_layer_activation"])
if self.graph_params["mid_layer_activation"] == "None":
mid_layer_activation_fn = None
else:
mid_layer_activation_fn = getattr(layers, self.graph_params["mid_layer_activation"])
fc = tf.cast(inputs['fc'], dtype=tf.float32)
fc = tf.reshape(fc, [-1, 3, self._flags.data_len, 1])
# Conv1
with tf.compat.v1.variable_scope("conv1"):
kernel_dims = [3, 6, 1, 8]
conv_strides = [1, 1, 3, 1]
conv1 = layers.conv2d_bn_lrn_drop(inputs=fc, kernel_shape=kernel_dims, is_training=is_training,
strides=conv_strides, activation=conv_layer_activation_fn,
use_bn=False, use_lrn=False, padding='SAME')
conv1_len = int((self._flags.data_len + conv_strides[2] - 1) / conv_strides[2])
# Conv2
with tf.compat.v1.variable_scope("conv2"):
kernel_dims = [1, 8, 8, 16]
conv_strides = [1, 1, 6, 1]
conv2 = layers.conv2d_bn_lrn_drop(inputs=conv1, kernel_shape=kernel_dims, is_training=is_training,
strides=conv_strides, activation=conv_layer_activation_fn,
use_bn=False, use_lrn=False, padding='SAME')
conv2_len = int((conv1_len + conv_strides[2] - 1) / conv_strides[2])
ff_in = tf.reshape(conv2, [-1, conv2_len * 16 * 3])
ff_in = fc
for index, nhidden in enumerate(self.graph_params["dense_ keraslayers"]):
ff_in = layers.ff_layer(inputs=ff_in, outD=nhidden,
is_training=is_training, activation=mid_layer_activation_fn,
use_bn=self.graph_params["batch_norm"], name="ff_{}".format(index + 1))
ff_final = layers.ff_layer(inputs=ff_in, outD=self._flags.max_edges * 2,
is_training=is_training, activation=None, name="ff_final")
radius_final = layers.ff_layer(inputs=ff_final,
outD=1,
is_training=is_training,
activation=None,
name="radius_final")
rotation_final = layers.ff_layer(inputs=ff_final,
outD=1,
is_training=is_training,
activation=None,
name="rotation_final")
translation_final = layers.ff_layer(inputs=ff_final,
outD=2, # 2 dimension problem
is_training=is_training,
activation=None,
name="translation_final")
edge_final = layers.ff_layer(inputs=ff_in,
outD=self._flags.max_edges - 3, # at least a triangle!
is_training=is_training,
activation= layers.softmax,
name="edge_final")
return {"radius_pred": radius_final,
"rotation_pred": rotation_final,
"translation_pred": translation_final,
"edges_pred": edge_final}
class GraphMultiFF(Graph2D):
def __init__(self, params):
super(GraphMultiFF, self).__init__(params)
# v0.2
self.graph_params["mid_layer_activation"] = "leaky_relu"
self.graph_params["batch_norm"] = False
self.graph_params["dense_layers"] = [512, 256, 128, 64]
self.graph_params["dense_dropout"] = [] # [0.0, 0.0] dropout after each dense layer
self.graph_params["input_dropout"] = 0.01
self.graph_params["abs_as_input"] = False
self.graph_params = update_params(self.graph_params, self._flags.graph_params, "graph")
def infer(self, inputs, is_training):
if self.graph_params["mid_layer_activation"] == "None":
mid_layer_activation_fn = None
else:
mid_layer_activation_fn = getattr(layers, self.graph_params["mid_layer_activation"])
if self.graph_params["abs_as_input"]:
z_values = tf.slice(inputs['fc'], [0, 1, 0], [-1, 2, -1])
z_squared = tf.square(z_values)
abs_in = tf.sqrt(tf.reduce_sum(z_squared, axis=1, keep_dims=True))
ff_in = tf.stack([abs_in, tf.slice(inputs['fc'], [0, 0, 0], [-1, 1, -1])])
ff_in = tf.reshape(ff_in, (-1, 2 * self._flags.data_len))
else:
ff_in = tf.reshape(inputs['fc'], (-1, 3 * self._flags.data_len))
if is_training and self.graph_params["input_dropout"] > 0:
ff_in = tf.nn.dropout(ff_in, keep_prob=1.0 - self.graph_params["input_dropout"])
for index, nhidden in enumerate(self.graph_params["dense_layers"]):
ff_in = layers.ff_layer(inputs=ff_in, outD=nhidden,
is_training=is_training, activation=mid_layer_activation_fn,
use_bn=self.graph_params["batch_norm"], name="ff_{}".format(index + 1))
if is_training and self.graph_params["dense_dropout"] and float(
self.graph_params["dense_dropout"][index]) > 0.0:
ff_in = tf.nn.dropout(ff_in, keep_prob=1.0 - self.graph_params["input_dropout"])
ff_final = ff_in
radius_final = layers.ff_layer(inputs=ff_final,
outD=1,
is_training=is_training,
activation=None,
name="radius_final")
rotation_final = layers.ff_layer(inputs=ff_final,
outD=1,
is_training=is_training,
activation=None,
name="rotation_final")
translation_final = layers.ff_layer(inputs=ff_final,
outD=2, # 2 dimension problem
is_training=is_training,
activation=None,
name="translation_final")
edge_final = layers.ff_layer(inputs=ff_in,
outD=self._flags.max_edges - 3, # at least a triangle!
is_training=is_training,
activation=layers.softmax,
name="edge_final")
return {"radius_pred": radius_final,
"rotation_pred": rotation_final,
"translation_pred": translation_final,
"edges_pred": edge_final}
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test how the setup.py script installs SCons.
Note that this is an installation test, not a functional test, so the
name of this script doesn't end in *Tests.py.
"""
import os
import os.path
import shutil
import sys
try: WindowsError
except NameError: WindowsError = OSError
import TestSCons
version = TestSCons.TestSCons.scons_version
scons_version = 'scons-%s' % version
python = TestSCons.python
class MyTestSCons(TestSCons.TestSCons):
_lib_modules = [
# A representative smattering of build engine modules.
'__init__.py',
'Action.py',
'Builder.py',
'Environment.py',
'Util.py',
]
_base_scripts = [
'scons',
'sconsign',
]
_version_scripts = [
'scons-%s' % version,
'sconsign-%s' % version,
]
_bat_scripts = [
'scons.bat',
]
_bat_version_scripts = [
'scons-%s.bat' % version,
]
_man_pages = [
'scons.1',
'sconsign.1',
]
def __init__(self):
TestSCons.TestSCons.__init__(self)
self.root = self.workpath('root')
self.prefix = self.root + os.path.splitdrive(sys.prefix)[1]
if sys.platform == 'win32':
self.bin_dir = os.path.join(self.prefix, 'Scripts')
self.bat_dir = self.prefix
self.standalone_lib = os.path.join(self.prefix, 'scons')
self.standard_lib = os.path.join(self.prefix,
'Lib',
'site-packages',
'')
self.version_lib = os.path.join(self.prefix, scons_version)
self.man_dir = os.path.join(self.prefix, 'Doc')
else:
self.bin_dir = os.path.join(self.prefix, 'bin')
self.bat_dir = self.bin_dir
self.lib_dir = os.path.join(self.prefix, 'lib')
self.standalone_lib = os.path.join(self.lib_dir, 'scons')
self.standard_lib = os.path.join(self.lib_dir,
'python%s' % sys.version[:3],
'site-packages',
'')
self.version_lib = os.path.join(self.lib_dir, scons_version)
self.man_dir = os.path.join(self.prefix, 'man', 'man1')
self.prepend_bin_dir = lambda p: os.path.join(self.bin_dir, p)
self.prepend_bat_dir = lambda p: os.path.join(self.bat_dir, p)
self.prepend_man_dir = lambda p: os.path.join(self.man_dir, p)
def run(self, *args, **kw):
kw['chdir'] = scons_version
kw['program'] = python
kw['stderr'] = None
return TestSCons.TestSCons.run(self, *args, **kw)
def remove(self, dir):
try: shutil.rmtree(dir)
except (OSError, WindowsError): pass
def stdout_lines(self):
return self.stdout().split('\n')
def lib_line(self, lib):
return 'Installed SCons library modules into %s' % lib
def lib_paths(self, lib_dir):
return [os.path.join(lib_dir, 'SCons', p) for p in self._lib_modules]
def scripts_line(self):
return 'Installed SCons scripts into %s' % self.bin_dir
def base_script_paths(self):
scripts = self._base_scripts
return list(map(self.prepend_bin_dir, scripts))
def version_script_paths(self):
scripts = self._version_scripts
return list(map(self.prepend_bin_dir, scripts))
def bat_script_paths(self):
scripts = self._bat_scripts + self._bat_version_scripts
return list(map(self.prepend_bat_dir, scripts))
def man_page_line(self):
return 'Installed SCons man pages into %s' % self.man_dir
def man_page_paths(self):
return list(map(self.prepend_man_dir, self._man_pages))
def must_have_installed(self, paths):
for p in paths:
self.must_exist(p)
def must_not_have_installed(self, paths):
for p in paths:
self.must_not_exist(p)
try:
cwd = os.environ['SCONS_CWD']
except KeyError:
cwd = os.getcwd()
test = MyTestSCons()
test.subdir(test.root)
tar_gz = os.path.join(cwd, 'build', 'dist', '%s.tar.gz' % scons_version)
zip = os.path.join(cwd, 'build', 'dist', '%s.zip' % scons_version)
if os.path.isfile(zip):
try:
import zipfile
except ImportError:
pass
else:
with zipfile.ZipFile(zip, 'r') as zf:
for name in zf.namelist():
dname = os.path.dirname(name)
try:
os.makedirs(dname)
except FileExistsError:
pass
# if the file exists, then delete it before writing
# to it so that we don't end up trying to write to a symlink:
if os.path.isfile(name) or os.path.islink(name):
os.unlink(name)
if not os.path.isdir(name):
with open(name, 'w') as ofp:
ofp.write(zf.read(name))
if not os.path.isdir(scons_version) and os.path.isfile(tar_gz):
# Unpack the .tar.gz file. This should create the scons_version/
# subdirectory from which we execute the setup.py script therein.
os.system("gunzip -c %s | tar xf -" % tar_gz)
if not os.path.isdir(scons_version):
print("Cannot test package installation, found none of the following packages:")
print("\t" + tar_gz)
print("\t" + zip)
test.no_result(1)
# Verify that a virgin installation installs the version library,
# the scripts and (on UNIX/Linux systems) the man pages.
test.run(arguments = 'setup.py install --root=%s' % test.root)
test.fail_test(not test.lib_line(test.version_lib) in test.stdout_lines())
test.must_have_installed(test.lib_paths(test.version_lib))
# Verify that --standard-lib installs into the Python standard library.
test.run(arguments = 'setup.py install --root=%s --standard-lib' % test.root)
test.fail_test(not test.lib_line(test.standard_lib) in test.stdout_lines())
test.must_have_installed(test.lib_paths(test.standard_lib))
# Verify that --standalone-lib installs the standalone library.
test.run(arguments = 'setup.py install --root=%s --standalone-lib' % test.root)
test.fail_test(not test.lib_line(test.standalone_lib) in test.stdout_lines())
test.must_have_installed(test.lib_paths(test.standalone_lib))
# Verify that --version-lib installs into a version-specific library directory.
test.run(arguments = 'setup.py install --root=%s --version-lib' % test.root)
test.fail_test(not test.lib_line(test.version_lib) in test.stdout_lines())
# Now that all of the libraries are in place,
# verify that a default installation still installs the version library.
test.run(arguments = 'setup.py install --root=%s' % test.root)
test.fail_test(not test.lib_line(test.version_lib) in test.stdout_lines())
test.remove(test.version_lib)
# Now with only the standard and standalone libraries in place,
# verify that a default installation still installs the version library.
test.run(arguments = 'setup.py install --root=%s' % test.root)
test.fail_test(not test.lib_line(test.version_lib) in test.stdout_lines())
test.remove(test.version_lib)
test.remove(test.standalone_lib)
# Now with only the standard libraries in place,
# verify that a default installation still installs the version library.
test.run(arguments = 'setup.py install --root=%s' % test.root)
test.fail_test(not test.lib_line(test.version_lib) in test.stdout_lines())
#
test.run(arguments = 'setup.py install --root=%s' % test.root)
test.fail_test(not test.scripts_line() in test.stdout_lines())
if sys.platform == 'win32':
test.must_have_installed(test.base_script_paths())
test.must_have_installed(test.version_script_paths())
test.must_have_installed(test.bat_script_paths())
else:
test.must_have_installed(test.base_script_paths())
test.must_have_installed(test.version_script_paths())
test.must_not_have_installed(test.bat_script_paths())
test.remove(test.prefix)
test.run(arguments = 'setup.py install --root=%s --no-install-bat' % test.root)
test.fail_test(not test.scripts_line() in test.stdout_lines())
test.must_have_installed(test.base_script_paths())
test.must_have_installed(test.version_script_paths())
test.must_not_have_installed(test.bat_script_paths())
test.remove(test.prefix)
test.run(arguments = 'setup.py install --root=%s --install-bat' % test.root)
test.fail_test(not test.scripts_line() in test.stdout_lines())
test.must_have_installed(test.base_script_paths())
test.must_have_installed(test.version_script_paths())
test.must_have_installed(test.bat_script_paths())
test.remove(test.prefix)
test.run(arguments = 'setup.py install --root=%s --no-scons-script' % test.root)
test.fail_test(not test.scripts_line() in test.stdout_lines())
test.must_not_have_installed(test.base_script_paths())
test.must_have_installed(test.version_script_paths())
# Doesn't matter whether we installed the .bat scripts or not.
test.remove(test.prefix)
test.run(arguments = 'setup.py install --root=%s --no-version-script' % test.root)
test.fail_test(not test.scripts_line() in test.stdout_lines())
test.must_have_installed(test.base_script_paths())
test.must_not_have_installed(test.version_script_paths())
# Doesn't matter whether we installed the .bat scripts or not.
test.remove(test.man_dir)
test.run(arguments = 'setup.py install --root=%s' % test.root)
if sys.platform == 'win32':
test.fail_test(test.man_page_line() in test.stdout_lines())
test.must_not_have_installed(test.man_page_paths())
else:
test.fail_test(not test.man_page_line() in test.stdout_lines())
test.must_have_installed(test.man_page_paths())
test.remove(test.man_dir)
test.run(arguments = 'setup.py install --root=%s --no-install-man' % test.root)
test.fail_test(test.man_page_line() in test.stdout_lines())
test.must_not_have_installed(test.man_page_paths())
test.remove(test.man_dir)
test.run(arguments = 'setup.py install --root=%s --install-man' % test.root)
test.fail_test(not test.man_page_line() in test.stdout_lines())
test.must_have_installed(test.man_page_paths())
# Verify that we don't warn about the directory in which we've
# installed the modules when using a non-standard prefix.
other_prefix = test.workpath('other-prefix')
test.subdir(other_prefix)
test.run(arguments = 'setup.py install --prefix=%s' % other_prefix)
test.fail_test("you'll have to change the search path yourself" in test.stderr())
# All done.
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
import unittest
import sys
import yaml
from beluga import Client
try:
with open('test/expect_state.yaml') as file:
expect_state = yaml.safe_load(file)
except Exception as e:
print('Exception occurred while loading YAML...', file=sys.stderr)
print(e, file=sys.stderr)
sys.exit(1)
class TestBeruga(unittest.TestCase):
def __init__(self):
self.client=Client(
expect_state['access'][0]['username'],
expect_state['access'][0]['password'],
scopes= ['access'][0]
)
def test_list_menu(self):
self.assertEqual(expect_state['menus'], self.client.list_menu())
def test_check_in(self):
params = {
'storeId': '1',
'tables': [{
'id': '1'
}],
'number': 1,
}
result=self.client.check_in(params)
self.store=result['id']
self.assertEqual(expect_state['menus'], result)
def test_order(self):
params = {
'items':
[{
'menuId': '1',
'quantity': 5
}, {
'menuId': '2',
'quantity': 5
}]
}
self.assertEqual(expect_state['menus'], self.client.order(self.store, params))
if __name__ == "__main__":
unittest.main()
|
#! /usr/bin/env python
import sys
buf = open(sys.argv[1], "r").read().lower()
if buf.find("pgq consumer") >= 0:
print "-a pgq"
|
import sys
import glob
import argparse
import os
import shutil
import datetime
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--name', required=True, type=str, help='Name of new project')
parser.add_argument('--template', type=str, help='Source template directory')
parser.add_argument('--dest', type=str, help='Destination directory')
parser.add_argument('--shortname', type=str, help='Short name, default auto-detect')
args = parser.parse_args(argv[1:])
name = args.name
year = datetime.datetime.now().year
if args.template is not None:
source_dir = args.template
else:
source_dir = os.path.join(os.path.dirname(argv[0]), 'files')
if args.dest is not None:
dest_dir = args.dest
else:
dest_dir = os.path.relpath(os.path.join(source_dir, '..', '..', '..', 'source'))
target_dir = os.path.realpath(os.path.join(dest_dir, name))
if args.shortname:
shortname = args.shortname
else:
shortname = name if not name.startswith('lib') else name[3:]
print('Creating `{1}` from `{2}`'.format(name, target_dir, source_dir))
for source_file in glob.iglob(pathname=os.path.join(source_dir, '**', '*'), recursive=True):
if os.path.isdir(source_file):
continue
relative_path=os.path.relpath(source_file, source_dir)
dir_name=os.path.dirname(relative_path)
target_dir_name=os.path.join(target_dir, dir_name).replace('__name__', name).replace('__shortname__', shortname)
root_file, ext = os.path.splitext(relative_path)
if not os.path.isdir(target_dir_name):
os.makedirs(target_dir_name)
if ext == '.in':
target_file=os.path.join(target_dir, root_file).replace('__name__', name).replace('__shortname__', shortname)
with open(source_file, 'rt') as template_file:
template = template_file.read()
processed = template.replace('@NAME@', name).replace('@SHORTNAME_UPPER@', shortname.upper()).replace('@SHORTNAME@', shortname)
print('write template', target_file)
with open(target_file, 'wt') as output_file:
output_file.write(processed)
else:
target_file=os.path.join(target_dir, relative_path).replace('__name__', name).replace('__shortname__', shortname)
print('copy to', target_file)
shutil.copyfile(src=source_file, dst=target_file)
cmakelist_path = os.path.join(dest_dir, 'CMakeLists.txt')
if os.path.isfile(cmakelist_path):
with open(cmakelist_path, 'at') as cmake_file:
cmake_file.write('add_subdirectory({0})\n'.format(name))
if __name__ == '__main__':
main(sys.argv)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.