content
stringlengths
5
1.05M
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: mediapipe/modules/face_geometry/protos/mesh_3d.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='mediapipe/modules/face_geometry/protos/mesh_3d.proto', package='mediapipe.face_geometry', syntax='proto2', serialized_pb=_b('\n4mediapipe/modules/face_geometry/protos/mesh_3d.proto\x12\x17mediapipe.face_geometry\"\xf9\x01\n\x06Mesh3d\x12?\n\x0bvertex_type\x18\x01 \x01(\x0e\x32*.mediapipe.face_geometry.Mesh3d.VertexType\x12\x45\n\x0eprimitive_type\x18\x02 \x01(\x0e\x32-.mediapipe.face_geometry.Mesh3d.PrimitiveType\x12\x15\n\rvertex_buffer\x18\x03 \x03(\x02\x12\x14\n\x0cindex_buffer\x18\x04 \x03(\r\"\x1b\n\nVertexType\x12\r\n\tVERTEX_PT\x10\x00\"\x1d\n\rPrimitiveType\x12\x0c\n\x08TRIANGLE\x10\x00\x42\x38\n)com.google.mediapipe.modules.facegeometryB\x0bMesh3dProto') ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _MESH3D_VERTEXTYPE = _descriptor.EnumDescriptor( name='VertexType', full_name='mediapipe.face_geometry.Mesh3d.VertexType', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='VERTEX_PT', index=0, number=0, options=None, type=None), ], containing_type=None, options=None, serialized_start=273, serialized_end=300, ) _sym_db.RegisterEnumDescriptor(_MESH3D_VERTEXTYPE) _MESH3D_PRIMITIVETYPE = _descriptor.EnumDescriptor( name='PrimitiveType', full_name='mediapipe.face_geometry.Mesh3d.PrimitiveType', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='TRIANGLE', index=0, number=0, options=None, type=None), ], containing_type=None, options=None, serialized_start=302, serialized_end=331, ) _sym_db.RegisterEnumDescriptor(_MESH3D_PRIMITIVETYPE) _MESH3D = _descriptor.Descriptor( name='Mesh3d', full_name='mediapipe.face_geometry.Mesh3d', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='vertex_type', full_name='mediapipe.face_geometry.Mesh3d.vertex_type', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='primitive_type', full_name='mediapipe.face_geometry.Mesh3d.primitive_type', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='vertex_buffer', full_name='mediapipe.face_geometry.Mesh3d.vertex_buffer', index=2, number=3, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='index_buffer', full_name='mediapipe.face_geometry.Mesh3d.index_buffer', index=3, number=4, type=13, cpp_type=3, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _MESH3D_VERTEXTYPE, _MESH3D_PRIMITIVETYPE, ], options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=82, serialized_end=331, ) _MESH3D.fields_by_name['vertex_type'].enum_type = _MESH3D_VERTEXTYPE _MESH3D.fields_by_name['primitive_type'].enum_type = _MESH3D_PRIMITIVETYPE _MESH3D_VERTEXTYPE.containing_type = _MESH3D _MESH3D_PRIMITIVETYPE.containing_type = _MESH3D DESCRIPTOR.message_types_by_name['Mesh3d'] = _MESH3D Mesh3d = _reflection.GeneratedProtocolMessageType('Mesh3d', (_message.Message,), dict( DESCRIPTOR = _MESH3D, __module__ = 'mediapipe.modules.face_geometry.protos.mesh_3d_pb2' # @@protoc_insertion_point(class_scope:mediapipe.face_geometry.Mesh3d) )) _sym_db.RegisterMessage(Mesh3d) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n)com.google.mediapipe.modules.facegeometryB\013Mesh3dProto')) # @@protoc_insertion_point(module_scope)
# '669': {'mean': 0.028571428571428574, # 'raw': [0.0, 0.04, 0.0, 0.02, 0.04, 0.1, 0.0], # 'stdev': 0.033563828927059232}, # 'sum': 764.7600000000009}, # 'total frames': 299}, def rho_v_r(data, feed): key = [i for i in data.keys() if 'prod-' in i][0] figure = plt.figure() values = data[key]['mol8'] LMN = list(map(int,str(max(int(k) for k in values.keys() if k != 'sum')))) supercell = [i + i*j for (i,j) in zip(uc_vectors,LMN)] H = np.zeros((7,7,10)) S = np.zeros((7,7,10)) for lmn, vals in values.items(): if lmn == 'sum': continue l, m, n = map(int,list(lmn)) my_center = [l+2, m+2, n+2] if my_center[0] > 6: my_center[0] = my_center[0] - 7 if my_center[1] > 6: my_center[1] = my_center[1] - 7 if my_center[2] > 9: my_center[2] = my_center[2] - 10 i,j,k = my_center H[i,j,k] = vals['mean'] S[i,j,k] = vals['stdev'] print('max S:',np.max(S)) print('min S:',np.min(S)) print('max H:',np.max(H)) print('min H:',np.min(H)) nx, ny, nz = np.shape(H) Z, Y = np.meshgrid(list(range(nz+1)), list(range(ny+1))) # xy histogram nrows, ncolumns= nx, 2 hmin, hmax = 0., np.max(H) smin, smax = 0., np.max(S) xlabel, ylabel = '$\mathbf{c}$', '$\mathbf{b}$' for x in range(nx): ax_mean = figure.add_subplot(nrows,ncolumns,x*ncolumns+1) ax_stdev = figure.add_subplot(nrows,ncolumns,x*ncolumns+2) h = H[x,:,:] s = S[x,:,:] image_H = ax_mean.pcolormesh(Z,Y,h,vmin=hmin,vmax=hmax,cmap='plasma') image_S = ax_stdev.pcolormesh(Z,Y,s,vmin=smin,vmax=smax,cmap='plasma') for ax in [ax_mean, ax_stdev]: ax.tick_params(axis='both',which='both',direction='out') set_x_ticks(ax, [0,2,4,6,8,10]) set_y_ticks(ax, [0,1,2,3,4,5,6,7]) ax.tick_params(axis='y',which='minor',left='off',right='off') label_subplot_axes(ax_mean, x*ncolumns+1, ncolumns, nrows, xlabel, ylabel, tight=True) label_subplot_axes(ax_stdev, x*ncolumns+2, ncolumns, nrows, xlabel, ylabel, tight=True) ax_stdev.tick_params(axis='both',which='both',left='off',right='off') minimum_ticks(ax_stdev, x*ncolumns+2, ncolumns, nrows) minimum_ticks(ax_mean, x*ncolumns+1, ncolumns, nrows) my_bottom = 0.055 my_top=0.99 plt.subplots_adjust(left=0.11,right=0.7,hspace=0.,wspace=0., bottom=my_bottom, top=my_top) figure.set_size_inches(3.25,7.0) if '450' in feed: ticks=[0,0.4,0.8,1.2,1.6,2.0] s_ticks=np.arange(0.0,2.0,0.1) cbar_ax = figure.add_axes([0.82, my_bottom, 0.06, my_top-my_bottom]) else: cbar_ax = figure.add_axes([0.8, my_bottom, 0.08, my_top-my_bottom]) ticks=[0,1,2,3,4,5,6,7,8] s_ticks=np.arange(0.0,2.0,0.2) cbar = figure.colorbar(image_H, cax=cbar_ax, orientation='vertical') cax2 = cbar_ax.twinx() cbar.set_ticks(ticks) cbar.set_label('$Q\mathrm{\;[\;molec\;/\;uc\;]}$') cbar.ax.yaxis.set_label_position("left") # cbar_ax.tick_params(which='both',direction='out') # cbar_ax.tick_params(which='minor',left='off') # cax2.set_yticks(s_ticks) set_y_ticks(cax2, s_ticks) # cax2.set_yticks([0.1+i for i in s_ticks], minor=True) cax2.set_ylim([smin, smax]) cax2.set_ylabel('$\sigma_Q\mathrm{\;[\;molec\;/\;uc\;]}$') figure.savefig('FIG-test.pdf') from MCFlow.calc_tools import get_vector from plotting.util import set_x_ticks, set_y_ticks, label_subplot_axes, minimum_ticks import matplotlib.pyplot as plt import numpy as np uc_vectors = [20.022,19.899,13.383] if __name__ == '__main__': import argparse, shelve parser = argparse.ArgumentParser() parser.add_argument('-feed','--feed',help='feed',type=str,default='7x7x10') parser.add_argument('-f','--file',help='feed',type=str,default='uc-dens-data.db') args = vars(parser.parse_args()) with shelve.open(args['file']) as db: data = db[args['feed']] rho_v_r(data, args['feed'])
import math import random inside = 1 outside = 1 count = 0 def inCircle(x, y): #function that sees if the coordinates are contained within the circle, either returning false or true return math.sqrt( (x**2) + (y**2) ) <= 1 while True: count = count + 1 #random.uniform generates a 'random' number between the given values, for example, 'random.uniform(-1, 1)' generates .21946219 x = random.uniform(-1, 1) y = random.uniform(-1, 1) #if its in the circle, add one to the amount of points inside the circle if (inCircle(x, y)): inside = inside + 1 #and one if the coordinate is not in the circle else: outside = outside + 1 #this prints the ratio of coordinates inside the circle with the points outside the circle #each 100 is printed to reduce the amount of clutter if count % 100: print(inside / outside)
from unittest import TestCase from mock import MagicMock import probe.helpers.celerytasks as module class TestCelerytasks(TestCase): def test_async_call_raise(self): app = MagicMock() app.send_task.side_effect = TimeoutError("whatever") path, name = "p_test", "n_test" expected = "Celery error - %s" % name with self.assertRaises(module.IrmaTaskError) as context: module.async_call(app, path, name) self.assertEqual(str(context.exception), expected) def test_async_call_ok(self): app = MagicMock() path, name, karg = "p_test", "n_test", "k_test" expected = ("s_test", "r_test") args = (("%s.%s" % (path, name),), {"karg": karg}) app.send_task.return_value = expected result = module.async_call(app, path, name, karg=karg) self.assertEqual(result, expected) self.assertEqual(app.send_task.call_args, args)
# -*- coding: utf-8 -*- """ Created on Sat Aug 12 12:31:44 2017 @author: Wayne """ from importlib import reload import pandas as pd import xgboost as xgb import numpy as np from sklearn.model_selection import train_test_split import AuxFun reload(AuxFun) import logging import time import pickle #%% Kick off outliers mydf1= mydf[outliers.outliers==False] z = np.log(data[outliers.outliers==False].trip_duration+1) X = mydf1 data_test = xgb.DMatrix(testdf) #%% Without using outliers z = np.log(data.trip_duration+1) X = mydf data_test = xgb.DMatrix(testdf) #%% test_parms = {'max_depth':[14,14,14,14], #maximum depth of a tree 'eta' :[0.025], 'subsample':[0.8],#SGD will use this percentage of data 'lambda ' :[3], #L2 regularization term,>1 more conservative 'colsample_bytree ':[.8], 'colsample_bylevel':[1], 'min_child_weight': [10], 'objective':['reg:linear'], 'nthread' :[-1]} #%% if np.all(X.keys()==testdf.keys()): print('Good! The keys of training feature is identical to those of test feature.') print('They both have %d features, as follows:'%len(X.keys())) print(list(X.keys())) else: print('Oops, something is wrong, keys in training and testing are not matching') #%% XX = X.iloc[:500] zz = z.iloc[:500] #%% List_parm = AuxFun.genGrid(test_parms) N = len(List_parm) logging.basicConfig(filename='xgb_cv3.log',level=logging.DEBUG,format='%(asctime)s:%(message)s\n') #%% ztest_mat = np.zeros((len(testdf),N)) #%% logging.info('='*40 ) #parms_df = pd.DataFrame(columns = test_parms.keys()) count = 1 for i,parms in enumerate(List_parm): print('%d of %d searching:' % (count,N)) print('-'*20+'\n') print(parms) logging.info('=====%d of %d searching:=====' % (count,N)) count+=1 tic = time.time() Xtrain, Xval, Ztrain, Zval = train_test_split(X, z, test_size=0.2, random_state=i+15) data_tr = xgb.DMatrix(Xtrain, label=Ztrain) data_val = xgb.DMatrix(Xval , label=Zval) evallist = [(data_tr, 'train'), (data_val, 'valid')] model = xgb.train(parms, data_tr, num_boost_round=2000, evals = evallist, early_stopping_rounds=30, maximize=False, verbose_eval=100) err = model.best_score ztest_mat[:,i] = model.predict(data_test) print('score = %1.5f, n_boost_round =%d, took %d second' %(err,model.best_iteration,round(time.time()-tic))) logging.info('score = %1.5f, n_boost_round =%d.'%(model.best_score,model.best_iteration)) logging.info(parms) logging.shutdown() #%% with open('ztest_mat_kmeans10.pickle', "wb") as output_file: pickle.dump(ztest_mat, output_file) #%% #%% 1. avg before exp 0.379 ztest = np.mean(ztest_mat,axis=1) ytest = np.exp(ztest)-1 submission = pd.DataFrame({'id': test.id, 'trip_duration': ytest}) submission.to_csv('submission13.csv', index=False) #%% 2. avg after exp ytest_mat = np.exp(ztest_mat)-1 ytest = np.mean(ytest_mat,axis=1) submission = pd.DataFrame({'id': test.id, 'trip_duration': ytest}) submission.to_csv('submission11.csv', index=False) #%% with open('ztest_mat2.pickle', "rb") as input_file: ttt = pickle.load(input_file) #%% xgb.plot_importance(model) #%% ztest_mat = np.hstack((ztest_mat,ttt))
import cv2 import numpy url = 'http://192.XXX.XX.X:8080/video' cap = cv2.VideoCapture(url) face = cv2.CascadeClassifier('cascade.xml') while(True): ret, frame = cap.read() if frame is not None: gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=50, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE ) for (x, y, w, h) in faces : cv2.rectangle(frame,(x,y),(x+w, x+h),(255,0,0),2) cv2.putText(frame, 'Face', (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36,255,12), 2) print(x) cv2.imshow('frame',frame) q = cv2.waitKey(1) if q == ord("q"): break cv2.destroyAllWindows()
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations from pants.backend.explorer.browser import Browser, BrowserRequest from pants.backend.explorer.graphql.setup import graphql_uvicorn_setup from pants.backend.explorer.graphql.subsystem import GraphQLSubsystem from pants.backend.explorer.server.uvicorn import UvicornServerSetup, UvicornServerSetupRequest from pants.backend.project_info.peek import TargetDatas from pants.base.specs import Specs from pants.engine.rules import Get, QueryRule, collect_rules, rule from pants.engine.target import AllUnexpandedTargets, UnexpandedTargets from pants.engine.unions import UnionRule class GraphQLUvicornServerSetupRequest(UvicornServerSetupRequest): pass @rule async def get_graphql_uvicorn_setup( request: GraphQLUvicornServerSetupRequest, graphql: GraphQLSubsystem ) -> UvicornServerSetup: browser = await Get(Browser, BrowserRequest, request.browser_request()) return UvicornServerSetup(graphql_uvicorn_setup(browser, graphql=graphql)) def rules(): return ( *collect_rules(), UnionRule(UvicornServerSetupRequest, GraphQLUvicornServerSetupRequest), # Root query data rules for graphql. QueryRule(AllUnexpandedTargets, ()), QueryRule(TargetDatas, (UnexpandedTargets,)), QueryRule(UnexpandedTargets, (Specs,)), )
import tests.perf.test_cycles_full_long_long as gen gen.test_nbrows_cycle(31000 , 380)
import os from ament_index_python.packages import get_package_share_directory from launch import LaunchDescription from launch.actions import ExecuteProcess from launch_ros.actions import Node from scripts import GazeboRosPaths from launch.actions import IncludeLaunchDescription from launch.launch_description_sources import PythonLaunchDescriptionSource def generate_launch_description(): return LaunchDescription( [ IncludeLaunchDescription( PythonLaunchDescriptionSource([os.path.join( get_package_share_directory('line'), 'launch'), '/controller1.launch.py']),), IncludeLaunchDescription( PythonLaunchDescriptionSource([os.path.join( get_package_share_directory('line'), 'launch'), '/controller2.launch.py']),), IncludeLaunchDescription( PythonLaunchDescriptionSource([os.path.join( get_package_share_directory('line'), 'launch'), '/controller3.launch.py']),), IncludeLaunchDescription( PythonLaunchDescriptionSource([os.path.join( get_package_share_directory('line'), 'launch'), '/controller4.launch.py']),), IncludeLaunchDescription( PythonLaunchDescriptionSource([os.path.join( get_package_share_directory('line'), 'launch'), '/controller5.launch.py']),), IncludeLaunchDescription( PythonLaunchDescriptionSource([os.path.join( get_package_share_directory('line'), 'launch'), '/gazebo1.launch.py']),), IncludeLaunchDescription( PythonLaunchDescriptionSource([os.path.join( get_package_share_directory('line'), 'launch'), '/gazebo2.launch.py']),), ] )
import pytest import difflib from chalicelib.checks.wrangler_checks import ( get_tokens_to_string, string_label_similarity ) @pytest.fixture def in_out_cmp_score(): return [ ("one", "one", "one", 1), ("one two", "onetwo", "onetwo", 1), ("One Two", "onetwo", "one two", 1), ("Two one Four", "twoonefour", "Too One Four", 0.9), ("One-One-Ones", "oneoneones", "FixFixFix", 0), ("22-33 44", "223344", "TwoTwoThreeThreeFourFour", 0), ("A one&a two and a-3", "aone&atwoanda3", "atwo&aone anda3", 0.64), ] def test_get_tokens_to_string(in_out_cmp_score): for tup in in_out_cmp_score: assert get_tokens_to_string(tup[0]) == tup[1] def test_string_label_similarity(in_out_cmp_score): for tup in in_out_cmp_score: assert round(string_label_similarity(tup[0], tup[2]), 2) == tup[3]
from setuptools import find_packages, setup setup( name='thetaexif', version='0.2', author='Regen', author_email='git@exadge.com', description='THETA EXIF Library', long_description=(open('README.rst').read() + '\n\n' + open('CHANGES.rst').read()), url='https://github.com/regen100/thetaexif', platforms='any', license='MIT', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Intended Audience :: End Users/Desktop', 'Environment :: Console', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Topic :: Utilities', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', ], packages=find_packages(exclude=['*.tests']), test_suite='thetaexif.tests', install_requires=['numpy', 'scipy', 'pillow'], entry_points={ 'console_scripts': ['theta-tool = thetaexif.cli:parse'], }, )
import warnings from sklearn.linear_model import LogisticRegression from sklearn.model_selection import RandomizedSearchCV from scipy.stats import loguniform import pandas as pd warnings.filterwarnings("ignore") class Matcher: """ Creates matched dataset based on propensity score. Class to create a matched dataset balanced for the control group to be the same size as the treatment group based on the variable of interest. ------ Inputs: - data = pandas dataframe or csv file (depending on is_csv param) with fully cleaned data (see demo or README) - treatment_column = string of column corresponding to treatment group, values should be binary with 1 representing minority treatment group """ def __init__(self, data, treatment_column): self.data = data self.treatment_column = treatment_column predictors = list(data.columns) predictors.remove(self.treatment_column) self.predictors = predictors def compute_matched_data(self): """ Creates and returned matched dataset based on data & treatment column. This function runs all the logic to create the matched dataset from creating an optimal Logistic Regression Model to matching treatment data to matching each treatment sample to a control sample. ---- Inputs: None Outputs: - matched_data = pandas dataframe of treatment group and matched control group """ print("Generating Logistic Regression Model...") self.__create_logistic_regression() print("Model Generated") self.__set_scores() print("Matching Propesensity Scores...") matched_data = self.__match() print("Matching Complete") return matched_data def __create_logistic_regression(self): model = LogisticRegression() # define search space params = {} params['solver'] = ['liblinear', 'newton-cg', 'lbfgs', 'saga'] params['class_weight'] = ['balanced'] params['penalty'] = ['l1', 'l2', 'elasticnet', 'none'] params['C'] = loguniform(1e-5, 100) search = RandomizedSearchCV(model, params, scoring="roc_auc", n_iter=100, cv=10, random_state=1) X = self.data[self.predictors] y = self.data[self.treatment_column] result = search.fit(X, y) final_model = LogisticRegression(**result.best_params_) final_model.fit(X, y) self._final_model = final_model def __set_scores(self): self.data['SCORE'] = [score[1] for score in self._final_model.predict_proba (self.data[self.predictors])] def __match(self): treatment_scores = self.data[self.data[self.treatment_column] == 1][['SCORE']] control_scores = self.data[self.data[self.treatment_column] == 0][['SCORE']] match_indices = [] for i in range(len(treatment_scores)): score = treatment_scores.iloc[i] temp_control = control_scores[~control_scores.index.isin( match_indices)] match = abs(temp_control - score).sort_values(by='SCORE').index[0] match_indices.append(match) treatment_group = self.data[self.data[self.treatment_column] == 1] matched_control_group = self.data[self.data.index.isin(match_indices)] matched_data = pd.concat([treatment_group, matched_control_group], axis=0) return matched_data
# python3 class Solution: def knightProbability(self, N, K, r, c): p = {(r, c): 1} for _ in range(K): p = {(r, c): sum(p.get((r + i, c + j), 0) + p.get((r + j, c + i), 0) for i in (1, -1) for j in (2, -2)) / 8 for r in range(N) for c in range(N)} return sum(p.values())
from django.contrib.auth import get_user_model from django.test import RequestFactory from ralph.assets.models.components import Ethernet, EthernetSpeed from ralph.networks.models import IPAddress from ralph.networks.tests.factories import IPAddressFactory from ralph.tests import RalphTestCase from ralph.tests.models import PolymorphicTestModel class NetworkInlineTestCase(RalphTestCase): def setUp(self): self.inline_prefix = 'ethernet_set-' self.obj1 = PolymorphicTestModel.objects.create(hostname='abc') self.obj2 = PolymorphicTestModel.objects.create(hostname='xyz') self.ip1 = IPAddressFactory( ethernet__base_object=self.obj1, address='127.0.0.1', is_management=True, ) self.ip2 = IPAddressFactory( ethernet__base_object=self.obj2, address='127.1.0.1', is_management=True, ) self.eth1 = self.ip1.ethernet self.user = get_user_model().objects.create_superuser( username='root', password='password', email='email@email.pl' ) result = self.client.login(username='root', password='password') self.assertEqual(result, True) self.factory = RequestFactory() def _prepare_inline_data(self, d): return { '{}{}'.format(self.inline_prefix, k): v for (k, v) in d.items() } def test_adding_new_record_should_pass(self): inline_data = { 'TOTAL_FORMS': 2, 'INITIAL_FORMS': 1, '0-id': self.eth1.id, '0-base_object': self.obj1.id, '0-hostname': self.ip1.hostname, '0-address': self.ip1.address, '0-mac': self.eth1.mac, '0-label': '', '0-is_management': 'on', '1-base_object': self.obj1.id, '1-hostname': 'def', '1-address': '127.0.0.2', '1-mac': '10:20:30:40:50:60', '1-label': 'eth1', } data = { 'hostname': self.obj1.hostname, 'id': self.obj1.id, } data.update(self._prepare_inline_data(inline_data)) response = self.client.post(self.obj1.get_absolute_url(), data) self.assertEqual(response.status_code, 302) ip = IPAddress.objects.get(address='127.0.0.2') self.assertEqual(ip.hostname, 'def') self.assertFalse(ip.is_management) self.assertFalse(ip.dhcp_expose) self.assertEqual(ip.ethernet.mac, '10:20:30:40:50:60') self.assertEqual(ip.ethernet.label, 'eth1') self.assertEqual(ip.ethernet.base_object.pk, self.obj1.pk) def test_adding_new_record_without_ip_and_hostname_should_pass(self): inline_data = { 'TOTAL_FORMS': 2, 'INITIAL_FORMS': 1, '0-id': self.eth1.id, '0-base_object': self.obj1.id, '0-hostname': self.ip1.hostname, '0-address': self.ip1.address, '0-mac': self.eth1.mac, '0-label': '', '0-is_management': 'on', '1-base_object': self.obj1.id, '1-mac': '10:20:30:40:50:60', '1-label': 'eth1', } data = { 'hostname': self.obj1.hostname, 'id': self.obj1.id, } data.update(self._prepare_inline_data(inline_data)) response = self.client.post(self.obj1.get_absolute_url(), data) self.assertEqual(response.status_code, 302) eth = Ethernet.objects.get(mac='10:20:30:40:50:60') self.assertEqual(eth.label, 'eth1') self.assertEqual(eth.base_object.pk, self.obj1.pk) # ip should not be created with self.assertRaises(IPAddress.DoesNotExist): eth.ipaddress def test_adding_new_record_without_mac_should_pass(self): inline_data = { 'TOTAL_FORMS': 2, 'INITIAL_FORMS': 1, '0-id': self.eth1.id, '0-base_object': self.obj1.id, '0-hostname': self.ip1.hostname, '0-address': self.ip1.address, '0-mac': self.eth1.mac, '0-label': '', '0-is_management': 'on', '1-base_object': self.obj1.id, '1-hostname': 'def', '1-address': '127.0.0.2', } data = { 'hostname': self.obj1.hostname, 'id': self.obj1.id, } data.update(self._prepare_inline_data(inline_data)) response = self.client.post(self.obj1.get_absolute_url(), data) self.assertEqual(response.status_code, 302) ip = IPAddress.objects.get(address='127.0.0.2') self.assertEqual(ip.hostname, 'def') self.assertFalse(ip.is_management) self.assertFalse(ip.dhcp_expose) self.assertFalse(bool(ip.ethernet.mac)) # mac is either None or '' self.assertFalse(bool(ip.ethernet.label)) self.assertEqual(ip.ethernet.base_object.pk, self.obj1.pk) def test_adding_multiple_new_record_without_mac_should_pass(self): # test for storing mac as null inline_data = { 'TOTAL_FORMS': 3, 'INITIAL_FORMS': 1, '0-id': self.eth1.id, '0-base_object': self.obj1.id, '0-hostname': self.ip1.hostname, '0-address': self.ip1.address, '0-mac': self.eth1.mac, '0-label': '', '0-is_management': 'on', '1-base_object': self.obj1.id, '1-hostname': 'def', '1-address': '127.0.0.2', '2-base_object': self.obj1.id, '2-hostname': 'def', '2-address': '127.0.0.3', } data = { 'hostname': self.obj1.hostname, 'id': self.obj1.id, } data.update(self._prepare_inline_data(inline_data)) response = self.client.post(self.obj1.get_absolute_url(), data) self.assertEqual(response.status_code, 302) def test_adding_new_record_without_mac_and_ip_should_not_pass(self): inline_data = { 'TOTAL_FORMS': 2, 'INITIAL_FORMS': 1, '0-id': self.eth1.id, '0-base_object': self.obj1.id, '0-hostname': self.ip1.hostname, '0-address': self.ip1.address, '0-mac': self.eth1.mac, '0-label': '', '0-is_management': 'on', '1-base_object': self.obj1.id, '1-label': 'eth1', } data = { 'hostname': self.obj1.hostname, 'id': self.obj1.id, } data.update(self._prepare_inline_data(inline_data)) response = self.client.post(self.obj1.get_absolute_url(), data) self.assertEqual(response.status_code, 200) for err in response.context_data['errors']: self.assertIn('At least one of mac, address is required', err) def test_adding_new_record_with_existing_ip_should_not_pass(self): inline_data = { 'TOTAL_FORMS': 2, 'INITIAL_FORMS': 1, '0-id': self.eth1.id, '0-base_object': self.obj1.id, '0-hostname': self.ip1.hostname, '0-address': self.ip1.address, '0-mac': self.eth1.mac, '0-label': '', '0-is_management': 'on', '1-base_object': self.obj1.id, '1-hostname': 'def', '1-mac': '11:12:13:14:15:16', '1-address': self.ip2.address, # duplicated ip! } data = { 'hostname': self.obj1.hostname, 'id': self.obj1.id, } data.update(self._prepare_inline_data(inline_data)) response = self.client.post(self.obj1.get_absolute_url(), data) self.assertEqual(response.status_code, 200) msg = 'Address {} already exist.'.format(self.ip2.address) self.assertTrue( any([msg in err for err in response.context_data['errors']]) ) def test_more_than_one_ip_is_management(self): inline_data = { 'TOTAL_FORMS': 2, 'INITIAL_FORMS': 1, '0-id': self.eth1.id, '0-base_object': self.obj1.id, '0-hostname': self.ip1.hostname, '0-address': self.ip1.address, '0-mac': self.eth1.mac, '0-label': '', '0-is_management': 'on', '1-hostname': 'def', '1-base_object': self.obj1.id, '1-address': '127.0.0.2', '1-mac': '', '1-label': '', '1-is_management': 'on', } data = { 'hostname': self.obj1.hostname, 'id': self.obj1.id, } data.update(self._prepare_inline_data(inline_data)) response = self.client.post(self.obj1.get_absolute_url(), data) self.assertEqual(response.status_code, 200) self.assertIn( 'Only one management IP address can be assigned to this asset', response.context_data['errors'] ) def test_empty_address_and_not_empty_hostname_should_not_pass(self): inline_data = { 'TOTAL_FORMS': 2, 'INITIAL_FORMS': 1, '0-id': self.eth1.id, '0-base_object': self.obj1.id, '0-hostname': self.ip1.hostname, '0-address': self.ip1.address, '0-mac': self.eth1.mac, '0-label': '', '0-is_management': 'on', '1-hostname': 'def', '1-base_object': self.obj1.id, '1-address': '', '1-mac': '10:20:30:40:50:60', '1-label': '', } data = { 'hostname': self.obj1.hostname, 'id': self.obj1.id, } data.update(self._prepare_inline_data(inline_data)) response = self.client.post(self.obj1.get_absolute_url(), data) self.assertEqual(response.status_code, 200) msg = ( 'Address is required when one of hostname, is_management, ' 'dhcp_expose is filled' ) self.assertTrue( any([msg in err for err in response.context_data['errors']]) ) class NetworkInlineWithDHCPExposeTestCase(RalphTestCase): def setUp(self): self.inline_prefix = 'ethernet_set-' self.obj1 = PolymorphicTestModel.objects.create(hostname='abc') self.ip1 = IPAddressFactory( ethernet__base_object=self.obj1, ethernet__mac='10:20:30:40:50:60', hostname='s11.dc.local', address='127.0.0.1', is_management=True, ) self.eth1 = self.ip1.ethernet self.user = get_user_model().objects.create_superuser( username='root', password='password', email='email@email.pl' ) result = self.client.login(username='root', password='password') self.assertEqual(result, True) self.factory = RequestFactory() def _prepare_inline_data(self, d): return { '{}{}'.format(self.inline_prefix, k): v for (k, v) in d.items() } def test_dhcp_expose_readonly_fields_should_not_change_their_value(self): self.ip1.dhcp_expose = True self.ip1.save() inline_data = { 'TOTAL_FORMS': 1, 'INITIAL_FORMS': 1, '0-id': self.eth1.id, '0-base_object': self.obj1.id, '0-label': 'eth10', # readonly fields modification! '0-hostname': 's222.dc.local', '0-address': '127.1.1.1', '0-mac': '11:11:11:11:11:11', # notice missing dhcp_expose field } data = { 'hostname': self.obj1.hostname, 'id': self.obj1.id, } data.update(self._prepare_inline_data(inline_data)) response = self.client.post(self.obj1.get_absolute_url(), data) self.assertEqual(response.status_code, 302) # besides 302, readonly fields are untouched self.ip1.refresh_from_db() self.assertEqual(self.ip1.address, '127.0.0.1') self.assertEqual(self.ip1.hostname, 's11.dc.local') self.assertEqual(self.ip1.dhcp_expose, True) self.eth1.refresh_from_db() self.assertEqual(self.eth1.mac, '10:20:30:40:50:60') # other fields could be changed self.assertEqual(self.eth1.label, 'eth10') def test_dhcp_expose_delete_should_not_work(self): self.ip1.dhcp_expose = True self.ip1.save() inline_data = { 'TOTAL_FORMS': 1, 'INITIAL_FORMS': 1, '0-id': self.eth1.id, '0-base_object': self.obj1.id, '0-label': 'eth10', '0-hostname': 's222.dc.local', '0-address': '127.1.1.1', '0-mac': '11:11:11:11:11:11', '0-DELETE': 'on', # deleting row with DHCP entry! } data = { 'hostname': self.obj1.hostname, 'id': self.obj1.id, } data.update(self._prepare_inline_data(inline_data)) response = self.client.post(self.obj1.get_absolute_url(), data) self.assertEqual(response.status_code, 200) msg = 'Cannot delete entry if its exposed in DHCP' self.assertTrue( any([msg in err for err in response.context_data['errors']]) ) def test_dhcp_expose_for_new_record_should_pass(self): self.ip1.dhcp_expose = True self.ip1.save() inline_data = { 'TOTAL_FORMS': 2, 'INITIAL_FORMS': 1, '0-id': self.eth1.id, '0-base_object': self.obj1.id, '0-label': 'eth10', '1-base_object': self.obj1.id, '1-hostname': 'def', '1-address': '127.0.0.2', '1-mac': '10:10:10:10:10:10', '1-label': 'eth10', '1-dhcp_expose': 'on', } data = { 'hostname': self.obj1.hostname, 'id': self.obj1.id, } data.update(self._prepare_inline_data(inline_data)) response = self.client.post(self.obj1.get_absolute_url(), data) self.assertEqual(response.status_code, 302) ip = IPAddress.objects.get(address='127.0.0.2') self.assertEqual(ip.hostname, 'def') self.assertTrue(ip.dhcp_expose) self.assertEqual(ip.ethernet.mac, '10:10:10:10:10:10') self.assertEqual(ip.ethernet.label, 'eth10') self.assertEqual(ip.ethernet.base_object.pk, self.obj1.pk) def test_dhcp_expose_for_existing_record_should_pass(self): inline_data = { 'TOTAL_FORMS': 1, 'INITIAL_FORMS': 1, '0-id': self.eth1.id, '0-base_object': self.obj1.id, '0-label': 'eth10', '0-hostname': 's11.dc.local', '0-address': '127.0.0.1', '0-mac': '10:20:30:40:50:60', '0-dhcp_expose': 'on', } data = { 'hostname': self.obj1.hostname, 'id': self.obj1.id, } data.update(self._prepare_inline_data(inline_data)) response = self.client.post(self.obj1.get_absolute_url(), data) self.assertEqual(response.status_code, 302) ip = IPAddress.objects.get(address='127.0.0.1') self.assertEqual(ip.hostname, 's11.dc.local') self.assertTrue(ip.dhcp_expose) self.assertEqual(ip.ethernet.mac, '10:20:30:40:50:60') self.assertEqual(ip.ethernet.label, 'eth10') self.assertEqual(ip.ethernet.base_object.pk, self.obj1.pk) def test_dhcp_expose_without_address_for_new_record_should_not_pass(self): self.ip1.dhcp_expose = True self.ip1.save() inline_data = { 'TOTAL_FORMS': 2, 'INITIAL_FORMS': 1, '0-id': self.eth1.id, '0-base_object': self.obj1.id, '0-label': 'eht10', '1-base_object': self.obj1.id, '1-hostname': '', '1-address': '', '1-mac': '10:10:10:10:10:10', '1-label': 'eth10', '1-dhcp_expose': 'on', } data = { 'hostname': self.obj1.hostname, 'id': self.obj1.id, } data.update(self._prepare_inline_data(inline_data)) response = self.client.post(self.obj1.get_absolute_url(), data) self.assertEqual(response.status_code, 200) msg = 'Cannot expose in DHCP without IP address' self.assertTrue( any([msg in err for err in response.context_data['errors']]) ) def test_dhcp_expose_with_address_exist_for_new_record_should_not_pass(self): self.ip1.dhcp_expose = True self.ip1.save() inline_data = { 'TOTAL_FORMS': 2, 'INITIAL_FORMS': 1, '0-id': self.eth1.id, '0-base_object': self.obj1.id, '0-label': 'eht10', '1-base_object': self.obj1.id, '1-hostname': '', '1-address': self.ip1.address, '1-mac': '10:10:10:10:10:10', '1-label': 'eth10', '1-dhcp_expose': 'on', } data = { 'hostname': self.obj1.hostname, 'id': self.obj1.id, } data.update(self._prepare_inline_data(inline_data)) response = self.client.post(self.obj1.get_absolute_url(), data) self.assertEqual(response.status_code, 200) error_messages = [ 'Cannot expose in DHCP without IP address', 'Address {} already exist'.format(self.ip1.address) ] for msg in error_messages: self.assertTrue( any([msg in err for err in response.context_data['errors']]) ) def test_dhcp_expose_without_mac_for_new_record_should_not_pass(self): self.ip1.dhcp_expose = True self.ip1.save() inline_data = { 'TOTAL_FORMS': 2, 'INITIAL_FORMS': 1, '0-id': self.eth1.id, '0-base_object': self.obj1.id, '0-label': 'eht10', '1-base_object': self.obj1.id, '1-hostname': 'def', '1-address': '127.0.0.2', '1-mac': '', '1-label': 'eth10', '1-dhcp_expose': 'on', } data = { 'hostname': self.obj1.hostname, 'id': self.obj1.id, } data.update(self._prepare_inline_data(inline_data)) response = self.client.post(self.obj1.get_absolute_url(), data) self.assertEqual(response.status_code, 200) msg = 'Cannot expose in DHCP without MAC address' self.assertTrue( any([msg in err for err in response.context_data['errors']]) ) def test_dhcp_expose_without_hostname_for_new_record_should_not_pass(self): self.ip1.dhcp_expose = True self.ip1.save() inline_data = { 'TOTAL_FORMS': 2, 'INITIAL_FORMS': 1, '0-id': self.eth1.id, '0-base_object': self.obj1.id, '0-label': 'eht10', '1-base_object': self.obj1.id, '1-hostname': '', '1-address': '127.0.0.2', '1-mac': '10:10:10:10:10:10', '1-label': 'eth10', '1-dhcp_expose': 'on', } data = { 'hostname': self.obj1.hostname, 'id': self.obj1.id, } data.update(self._prepare_inline_data(inline_data)) response = self.client.post(self.obj1.get_absolute_url(), data) self.assertEqual(response.status_code, 200) msg = 'Cannot expose in DHCP without hostname' self.assertTrue( any([msg in err for err in response.context_data['errors']]) ) def test_dhcp_expose_without_address_for_existing_record_should_not_pass(self): # noqa inline_data = { 'TOTAL_FORMS': 1, 'INITIAL_FORMS': 1, '0-id': self.eth1.id, '0-base_object': self.obj1.id, '0-label': 'eht10', '0-hostname': '', '0-address': '', '0-mac': '10:10:10:10:10:10', '0-dhcp_expose': 'on', } data = { 'hostname': self.obj1.hostname, 'id': self.obj1.id, } data.update(self._prepare_inline_data(inline_data)) response = self.client.post(self.obj1.get_absolute_url(), data) self.assertEqual(response.status_code, 200) msg = 'Cannot expose in DHCP without IP address' self.assertTrue( any([msg in err for err in response.context_data['errors']]) ) def test_dhcp_expose_without_mac_for_existing_record_should_not_pass(self): inline_data = { 'TOTAL_FORMS': 1, 'INITIAL_FORMS': 1, '0-id': self.eth1.id, '0-base_object': self.obj1.id, '0-label': 'eht10', '0-hostname': 'def', '0-address': '127.0.0.2', '0-mac': '', '0-dhcp_expose': 'on', } data = { 'hostname': self.obj1.hostname, 'id': self.obj1.id, } data.update(self._prepare_inline_data(inline_data)) response = self.client.post(self.obj1.get_absolute_url(), data) self.assertEqual(response.status_code, 200) msg = 'Cannot expose in DHCP without MAC address' self.assertTrue( any([msg in err for err in response.context_data['errors']]) ) def test_dhcp_expose_without_hostname_for_existing_record_should_not_pass(self): # noqa inline_data = { 'TOTAL_FORMS': 1, 'INITIAL_FORMS': 1, '0-id': self.eth1.id, '0-base_object': self.obj1.id, '0-label': 'eht10', '0-hostname': '', '0-address': '127.0.0.2', '0-mac': '10:10:10:10:10:10', '0-dhcp_expose': 'on', } data = { 'hostname': self.obj1.hostname, 'id': self.obj1.id, } data.update(self._prepare_inline_data(inline_data)) response = self.client.post(self.obj1.get_absolute_url(), data) self.assertEqual(response.status_code, 200) msg = 'Cannot expose in DHCP without hostname' self.assertTrue( any([msg in err for err in response.context_data['errors']]) )
from django.urls import path from AT.dialogue import views urlpatterns = [ path('command', views.ATCommand.as_view(), name='command'), path('clear-history', views.ATClearHistory.as_view(), name='clear-history'), ]
# coding: utf-8 # Remove a specific judge entry from database # Created by James Raphael Tiovalen (2021) import slack import ast import settings import config from slackers.hooks import commands conv_db = config.conv_handler @commands.on("deletejudge") def deletejudge(payload): return
import torch import numpy as np from Discriminator import Discriminator from Generator import Generator from viz import * import torch.nn.functional as F import pickle as pkl def test(FLAGS): sample_size = FLAGS.eval_size z_size = FLAGS.zsize cuda = FLAGS.cuda g_path = FLAGS.gpath d_path = FLAGS.dpath map_location = 'cuda' if cuda else 'cpu' # Load the models dckpt = torch.load(d_path, map_location=map_location) gckpt = torch.load(g_path, map_location=map_location) D = Discriminator(784, 128, 1) G = Generator(100, 32, 784) D.load_state_dict(dckpt['state_dict']) G.load_state_dict(gckpt['state_dict']) # Define some latent vectors z = np.random.uniform(-1, 1, size=(sample_size, z_size)) z = torch.from_numpy(z).float() if cuda: z = z.cuda() # Eval mode G.eval() rand_images = G(z) view_samples(0, [rand_images])
import sys # help string for the built-in len() function; note that it's "len" not "len()", # which is a call to the function, which we don't want help(len) help(sys) # dir() is like help() but just gives a quick list of its defined symbols, or "attributes" dir(sys) # help string for the exit() function in the sys module help(sys.exit) # help string for the split() method for string objects. # You can call help() with that object itself or an example of that object, plus its attribute. # For example, calling help('xyz'.split) is the same as calling help(str.split). help('xyz'.split) # help string for list objects help(list) # displays list object attributes, including its methods dir(list) # help string for the append() method for list objects help(list.append)
# Copyright 2021 Waseda Geophysics Laboratory # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np base = np.array([3.03159441e-19 ,3.35042997e-19 ,3.70279777e-19 ,4.09222441e-19, 4.52260741e-19,4.99825418e-19,5.52392516e-19,6.10488144e-19, 6.74693743e-19,7.45651903e-19,8.24072799e-19,9.10741292e-19, 1.00652479e-18,1.11238193e-18,1.22937215e-18,1.35866635e-18, 1.50155854e-18,1.65947883e-18,1.83400774e-18,2.02689202e-18, 2.24006211e-18,2.47565150e-18,2.73601804e-18,3.02376757e-18, 3.34177999e-18,3.69323806e-18,4.08165929e-18,4.51093115e-18, 4.98534992e-18,5.50966375e-18,6.08912014e-18,6.72951849e-18, 7.43726813e-18,8.21945245e-18,9.08389981e-18,1.00392619e-17, 1.10951003e-17,1.22619822e-17,1.35515861e-17,1.49768188e-17, 1.65519446e-17,1.82927278e-17,2.02165908e-17,2.23427882e-17, 2.46925998e-17,2.72895432e-17,3.01596095e-17,3.33315233e-17, 3.68370302e-17,4.07112145e-17,4.49928503e-17,4.97247897e-17, 5.49543915e-17,6.07339953e-17,6.71214453e-17,7.41806693e-17, 8.19823184e-17,9.06044741e-17,1.00133430e-16,1.10664555e-16, 1.22303247e-16,1.35165992e-16,1.49381524e-16,1.65092116e-16, 1.82455005e-16,2.01643965e-16,2.22851046e-16,2.46288496e-16, 2.72190883e-16,3.00817448e-16,3.32454695e-16,3.67419260e-16, 4.06061081e-16,4.48766898e-16,4.95964125e-16,5.48125127e-16, 6.05771950e-16,6.69481542e-16,7.39891530e-16,8.17706602e-16, 9.03705556e-16,9.98749099e-16,1.10378846e-15,1.21987490e-15, 1.34817027e-15,1.48995857e-15,1.64665888e-15,1.81983951e-15, 2.01123370e-15,2.22275700e-15,2.45652639e-15,2.71488153e-15, 3.00040811e-15,3.31596379e-15,3.66470674e-15,4.05012731e-15, 4.47608292e-15,4.94683667e-15,5.46710003e-15,6.04207995e-15, 6.67753105e-15,7.37981312e-15,8.15595484e-15,9.01372410e-15, 9.96170574e-15,1.10093875e-14,1.21672549e-14,1.34468962e-14, 1.48611187e-14,1.64240761e-14,1.81514113e-14,2.00604119e-14, 2.21701838e-14,2.45018424e-14,2.70787237e-14,2.99266179e-14, 3.30740278e-14,3.65524537e-14,4.03967088e-14,4.46452677e-14, 4.93406515e-14,5.45298532e-14,6.02648079e-14,6.66029130e-14, 7.36076026e-14,8.13489817e-14,8.99045288e-14,9.93598706e-14, 1.09809639e-13,1.21358420e-13,1.34121796e-13,1.48227509e-13, 1.63816732e-13,1.81045488e-13,2.00086208e-13,2.21129459e-13, 2.44385847e-13,2.70088131e-13,2.98493547e-13,3.29886388e-13, 3.64580842e-13,4.02924144e-13,4.45300046e-13,4.92132661e-13, 5.43890705e-13,6.01092189e-13,6.64309607e-13,7.34175658e-13, 8.11389586e-13,8.96724174e-13,9.91033478e-13,1.09526138e-12, 1.21045102e-12,1.33775527e-12,1.47844822e-12,1.63393798e-12, 1.80578073e-12,1.99569635e-12,2.20558557e-12,2.43754903e-12, 2.69390830e-12,2.97722910e-12,3.29034702e-12,3.63639584e-12, 4.01883893e-12,4.44150391e-12,4.90862095e-12,5.42486512e-12, 5.99540317e-12,6.62594522e-12,7.32280197e-12,8.09294777e-12, 8.94409052e-12,9.88474873e-12,1.09243368e-11,1.20732594e-11, 1.33430151e-11,1.47463123e-11,1.62971955e-11,1.80111865e-11, 1.99054395e-11,2.19989129e-11,2.43125587e-11,2.68695329e-11, 2.96954263e-11,3.28185215e-11,3.62700756e-11,4.00846327e-11, 4.43003704e-11,4.89594810e-11,5.41085945e-11,5.97992451e-11, 6.60883866e-11,7.30389629e-11,8.07205377e-11,8.92099907e-11, 9.85922874e-11,1.08961329e-10,1.20420892e-10,1.33085667e-10, 1.47082409e-10,1.62551201e-10,1.79646860e-10,1.98540486e-10, 2.19421171e-10,2.42497897e-10,2.68001623e-10,2.96187600e-10, 3.27337922e-10,3.61764351e-10,3.99811440e-10,4.41859977e-10, 4.88330796e-10,5.39688994e-10,5.96448581e-10,6.59177626e-10, 7.28503942e-10,8.05121371e-10,8.89796724e-10,9.83377463e-10, 1.08680017e-09,1.20109995e-09,1.32742073e-09,1.46702679e-09, 1.62131534e-09,1.79183056e-09,1.98027903e-09,2.18854679e-09, 2.41871827e-09,2.67309709e-09,2.95422916e-09,3.26492816e-09, 3.60830365e-09,3.98779225e-09,4.40719203e-09,4.87070046e-09, 5.38295650e-09,5.94908697e-09,6.57475791e-09,7.26623124e-09, 8.03042745e-09,8.87499488e-09,9.80838624e-09,1.08399432e-08, 1.19799900e-08,1.32399365e-08,1.46323928e-08,1.61712950e-08, 1.78720450e-08,1.97516643e-08,2.18289650e-08,2.41247373e-08, 2.66619581e-08,2.94660207e-08,3.25649891e-08,3.59898789e-08, 3.97749675e-08,4.39581374e-08,4.85812551e-08,5.36905903e-08, 5.93372789e-08,6.55778350e-08,7.24747161e-08,8.00969486e-08, 8.85208182e-08,9.78306339e-08,1.08119571e-07,1.19490606e-07, 1.32057543e-07,1.45946156e-07,1.61295447e-07,1.78259037e-07, 1.97006704e-07,2.17726080e-07,2.40624532e-07,2.65931234e-07, 2.93899466e-07,3.24809143e-07,3.58969619e-07,3.96722783e-07, 4.38446483e-07,4.84558302e-07,5.35519743e-07,5.91840846e-07, 6.54085291e-07,7.22876042e-07,7.98901579e-07,8.82922792e-07, 9.75780592e-07,1.07840433e-06,1.19182111e-06,1.31716603e-06, 1.45569359e-06,1.60879022e-06,1.77798816e-06,1.96498081e-06, 2.17163965e-06,2.40003298e-06,2.65244665e-06,2.93140690e-06, 3.23970566e-06,3.58042848e-06,3.95698543e-06,4.37314522e-06, 4.83307291e-06,5.34137163e-06,5.90312859e-06,6.52396604e-06, 7.21009754e-06,7.96839011e-06,8.80643302e-06,9.73261366e-06, 1.07562016e-05,1.18874412e-05,1.31376543e-05,1.45193534e-05, 1.60463672e-05,1.77339783e-05,1.95990771e-05,2.16603300e-05, 2.39383668e-05,2.64559869e-05,2.92383873e-05,3.23134153e-05, 3.57118469e-05,3.94676946e-05,4.36185483e-05,4.82059510e-05, 5.32758152e-05,5.88788816e-05,6.50712276e-05,7.19148283e-05, 7.94781769e-05,8.78369697e-05,9.70748644e-05,1.07284317e-04, 1.18567507e-04,1.31037361e-04,1.44818680e-04,1.60049394e-04, 1.76881936e-04,1.95484771e-04,2.16044084e-04,2.38765639e-04, 2.63876840e-04,2.91629010e-04,3.22299900e-04,3.56196477e-04, 3.93657987e-04,4.35059359e-04,4.80814951e-04,5.31382701e-04, 5.87268707e-04,6.49032297e-04,7.17291619e-04,7.92729837e-04, 8.76101962e-04,9.68242410e-04,1.07007335e-03,1.18261395e-03, 1.30699054e-03,1.44444794e-03,1.59636186e-03,1.76425270e-03, 1.94980077e-03,2.15486311e-03,2.38149204e-03,2.63195575e-03, 2.90876095e-03,3.21467801e-03,3.55276865e-03,3.92641659e-03, 4.33936142e-03,4.79573605e-03,5.30010801e-03,5.85752524e-03, 6.47356655e-03,7.15439748e-03,7.90683203e-03,8.73840082e-03, 9.65742645e-03,1.06731069e-02,1.17956073e-02,1.30361622e-02, 1.44071873e-02,1.59224044e-02,1.75969783e-02,1.94476687e-02, 2.14929978e-02,2.37534362e-02,2.62516068e-02,2.90125124e-02, 3.20637850e-02,3.54359627e-02,3.91627954e-02,4.32815826e-02, 4.78335464e-02,5.28642443e-02,5.84240255e-02,6.45685339e-02, 7.13592658e-02,7.88641853e-02,8.71584041e-02,9.63249335e-02, 1.06455515e-01,1.17651539e-01,1.30025060e-01,1.43699915e-01, 1.58812967e-01,1.75515472e-01,1.93974596e-01,2.14375082e-01, 2.36921106e-01,2.61838316e-01,2.89376092e-01,3.19810042e-01, 3.53444757e-01,3.90616867e-01,4.31698402e-01,4.77100519e-01, 5.27277618e-01,5.82731890e-01,6.44018338e-01,7.11750337e-01, 7.86605774e-01,8.69333825e-01,9.60762462e-01,1.06180673e+00, 1.17347792e+00,1.29689367e+00,1.43328917e+00,1.58402951e+00, 1.75062334e+00,1.93473801e+00,2.13821618e+00,2.36309434e+00, 2.61162314e+00,2.88628994e+00,3.18984371e+00,3.52532250e+00, 3.89608390e+00,4.30583862e+00,4.75868762e+00,5.25916317e+00, 5.81227419e+00,6.42355640e+00,7.09912773e+00,7.84574951e+00, 8.67089419e+00,9.58282009e+00,1.05906541e+01,1.17044829e+01, 1.29354541e+01,1.42958877e+01,1.57993993e+01,1.74610366e+01, 1.92974299e+01,2.13269583e+01,2.35699341e+01,2.60488057e+01, 2.87883825e+01,3.18160831e+01,3.51622098e+01,3.88602517e+01, 4.29472201e+01,4.74640186e+01,5.24558530e+01,5.79726832e+01, 6.40697236e+01,7.08079952e+01,7.82549371e+01,8.64850807e+01, 9.55807960e+01,1.05633116e+02,1.16742648e+02,1.29020579e+02, 1.42589792e+02,1.57586091e+02,1.74159565e+02,1.92476087e+02, 2.12718973e+02,2.35090823e+02,2.59815541e+02,2.87140580e+02, 3.17339418e+02,3.50714296e+02,3.87599241e+02,4.28363409e+02, 4.73414782e+02,5.23204249e+02,5.78230120e+02,6.39043113e+02, 7.06251864e+02,7.80529020e+02,8.62617974e+02,9.53340298e+02, 1.05360397e+03,1.16441247e+03,1.28687480e+03,1.42221660e+03, 1.57179243e+03,1.73709928e+03,1.91979161e+03,2.12169785e+03, 2.34483876e+03,2.59144761e+03,2.86399253e+03,3.16520126e+03, 3.49808838e+03,3.86598555e+03,4.27257480e+03,4.72192541e+03, 5.21853464e+03,5.76737272e+03,6.37393260e+03,7.04428495e+03, 7.78513886e+03,8.60390906e+03,9.50879008e+03,1.05088383e+04, 1.16140624e+04,1.28355240e+04,1.41854479e+04,1.56773445e+04, 1.73261452e+04,1.91483518e+04,2.11622015e+04,2.33878497e+04, 2.58475713e+04,2.85659841e+04,3.15702949e+04,3.48905717e+04, 3.85600452e+04,4.26154406e+04,4.70973456e+04,5.20506166e+04, 5.75248278e+04,6.35747667e+04,7.02609833e+04,7.76503954e+04, 8.58169588e+04,9.48424072e+04,1.04817070e+05,1.15840778e+05, 1.28023859e+05,1.41488245e+05,1.56368694e+05,1.72814133e+05, 1.90989154e+05,2.11075659e+05,2.33274680e+05,2.57808392e+05, 2.84922337e+05,3.14887881e+05,3.48004929e+05,3.84604926e+05, 4.25054180e+05,4.69757518e+05,5.19162347e+05,5.73763128e+05, 6.34106323e+05,7.00795867e+05,7.74499212e+05,8.55954005e+05, 9.45975473e+05,1.04546458e+06,1.15541705e+06,1.27693332e+06, 1.41122957e+06,1.55964988e+06,1.72367969e+06,1.90496067e+06, 2.10530713e+06,2.32672422e+06,2.57142794e+06,2.84186738e+06, 3.14074918e+06,3.47106465e+06,3.83611971e+06,4.23956794e+06, 4.68544719e+06,5.17821998e+06,5.72281812e+06,6.32469216e+06, 6.98986584e+06,7.72499645e+06,8.53744142e+06,9.43533197e+06, 1.04276545e+07,1.15243405e+07,1.27363660e+07,1.40758613e+07, 1.55562325e+07,1.71922958e+07,1.90004253e+07,2.09987175e+07, 2.32071719e+07,2.56478914e+07,2.83453037e+07,3.13264053e+07, 3.46210322e+07,3.82621579e+07,4.22862242e+07,4.67335052e+07, 5.16485108e+07,5.70804321e+07,6.30836336e+07,6.97181972e+07, 7.70505240e+07,8.51539984e+07,9.41097226e+07,1.04007329e+08, 1.14945875e+08,1.27034838e+08,1.40395208e+08,1.55160701e+08, 1.71479095e+08,1.89513709e+08,2.09445039e+08,2.31472566e+08, 2.55816749e+08,2.82721231e+08,3.12455283e+08,3.45316491e+08, 3.81633744e+08,4.21770515e+08,4.66128507e+08,5.15151671e+08, 5.69330645e+08,6.29207671e+08,6.95382020e+08,7.68515985e+08, 8.49341517e+08,9.38667544e+08,1.03738807e+09,1.14649113e+09, 1.26706865e+09,1.40032742e+09,1.54760115e+09,1.71036378e+09, 1.89024431e+09,2.08904304e+09,2.30874961e+09,2.55156293e+09, 2.81991314e+09,3.11648600e+09,3.44424969e+09,3.80648459e+09, 4.20681607e+09,4.64925078e+09,5.13821675e+09,5.67860773e+09, 6.27583211e+09,6.93586714e+09,7.66531865e+09,8.47148726e+09, 9.36244135e+09,1.03470979e+10,1.14353117e+10,1.26379739e+10, 1.39671212e+10,1.54360562e+10,1.70594804e+10,1.88536416e+10, 2.08364964e+10,2.30278899e+10,2.54497542e+10,2.81263282e+10, 3.10844000e+10,3.43535748e+10,3.79665718e+10,4.19595511e+10, 4.63724756e+10,5.12495114e+10,5.66394696e+10,6.25962946e+10, 6.91796043e+10,7.64552868e+10,8.44961595e+10,9.33826982e+10, 1.03203842e+11,1.14057885e+11,1.26053458e+11,1.39310616e+11, 1.53962041e+11,1.70154370e+11,1.88049661e+11,2.07827017e+11, 2.29684375e+11,2.53840492e+11,2.80537129e+11,3.10041477e+11, 3.42648823e+11,3.78685515e+11,4.18512218e+11,4.62527532e+11, 5.11171977e+11,5.64932404e+11,6.24346863e+11,6.90009996e+11, 7.62578981e+11,8.42780112e+11,9.31416070e+11,1.02937395e+12, 1.13763416e+12,1.25728019e+12,1.38950950e+12,1.53564549e+12, 1.69715073e+12,1.87564163e+12,2.07290459e+12,2.29091386e+12, 2.53185138e+12,2.79812851e+12,3.09241026e+12,3.41764188e+12, 3.77707842e+12,4.17431722e+12,4.61333400e+12,5.09852257e+12, 5.63473887e+12,6.22734953e+12,6.88228559e+12,7.60610189e+12, 8.40604261e+12,9.29011383e+12,1.02671636e+13,1.13469706e+13, 1.25403420e+13,1.38592212e+13,1.53168083e+13,1.69276911e+13, 1.87079919e+13,2.06755285e+13,2.28499929e+13,2.52531476e+13, 2.79090443e+13,3.08442641e+13,3.40881837e+13,3.76732693e+13, 4.16354016e+13,4.60142350e+13,5.08535943e+13,5.62019135e+13, 6.21127204e+13,6.86451722e+13,7.58646480e+13,8.38434027e+13, 9.26612903e+13,1.02406563e+14,1.13176756e+14,1.25079659e+14, 1.38234401e+14,1.52772640e+14,1.68839879e+14,1.86596924e+14, 2.06221494e+14,2.27909998e+14,2.51879502e+14,2.78369900e+14, 3.07646318e+14,3.40001764e+14,3.75760061e+14,4.15279092e+14, 4.58954375e+14,5.07223028e+14,5.60568140e+14,6.19523606e+14, 6.84679472e+14,7.56687841e+14,8.36269396e+14,9.24220616e+14, 1.02142175e+15,1.12884561e+15,1.24756734e+15,1.37877514e+15, 1.52378219e+15,1.68403976e+15,1.86115177e+15,2.05689081e+15, 2.27321590e+15,2.51229211e+15,2.77651217e+15,3.06852051e+15, 3.39123963e+15,3.74789941e+15,4.14206943e+15]) sin = np.array([ 4.519086190454403e-29, -9.339715413211410e-29, 1.907086488243319e-28, -2.926740419085585e-28, 4.016341837702778e-28, -5.205269186311504e-28, 6.517833605193130e-28, -7.967548954470954e-28, 9.568276718198612e-28, -1.135805242625555e-27, 1.341437536522928e-27, -1.584604744296628e-27, 1.876875146781600e-27, -2.228200512918392e-27, 2.645681468388641e-27, -3.133421673989624e-27, 3.693471763710930e-27, -4.327595342414010e-27, 5.039000943393479e-27, -5.833332055385210e-27, 6.719340442247252e-27, -7.710109799166923e-27, 8.824593601800077e-27, -1.008809737899913e-26, 1.153087819007531e-26, -1.318573428833282e-26, 1.508632848921759e-26, -1.726724501006653e-26, 1.976529598469166e-26, -2.262087271971451e-26, 2.587877027540831e-26, -2.958891513809359e-26, 3.380756725224869e-26, -3.859914775323363e-26, 4.403862178004457e-26, -5.021419292439053e-26, 5.722969551574750e-26, -6.520606418485730e-26, 7.428212579422239e-26, -8.461557977402292e-26, 9.638471499797232e-26, -1.097907018143775e-25, 1.250600971971666e-25, -1.424476805594983e-25, 1.622403124908015e-25, -1.847622752118429e-25, 2.103817835334522e-25, -2.395181391816518e-25, 2.726499236157535e-25, -3.103251036607772e-25, 3.531729413199891e-25, -4.019163393605676e-25, 4.573837905690991e-25, -5.205218550552940e-25, 5.924100002797128e-25, -6.742788104825517e-25, 7.675312493515405e-25, -8.737657967045221e-25, 9.948010684981015e-25, -1.132702852601557e-24, 1.289815274051798e-24, -1.468796997184372e-24, 1.672663454907744e-24, -1.904836740815919e-24, 2.169205882613385e-24, -2.470198700421404e-24, 2.812864533285517e-24, -3.202964869374407e-24, 3.647071555907292e-24, -4.152674762505081e-24, 4.728305934616842e-24, -5.383678610889903e-24, 6.129850575416668e-24, -6.979408353295871e-24, 7.946678501710549e-24, -9.047967050654569e-24, 1.030183273201298e-23, -1.172939535974881e-23, 1.335468970408732e-23, -1.520506989792332e-23, 1.731167873568262e-23, -1.970998238900646e-23, 2.244038375223756e-23, -2.554891109141771e-23, 2.908800212317214e-23, -3.311738161673479e-23, 3.770506652613824e-23, -4.292849755751231e-23, 4.887584419793589e-23, -5.564747348119901e-23, 6.335764555403829e-23, -7.213641884192625e-23, 8.213185708614141e-23, -9.351250590466155e-23, 1.064702652070505e-22, -1.212235989202909e-22, 1.380212637669216e-22, -1.571464627021063e-22, 1.789216833692896e-22, -2.037140677202572e-22, 2.319416886961351e-22, -2.640804920121902e-22, 3.006724483628496e-22, -3.423345344607581e-22, 3.897693304501770e-22, -4.437766344557916e-22, 5.052672367873663e-22, -5.752779264041773e-22, 6.549893990733216e-22, -7.457456427231128e-22, 8.490772387335222e-22, -9.667263925372798e-22, 1.100677264803472e-21, -1.253188268156373e-21, 1.426831572556448e-21, -1.624534745260255e-21, 1.849632235986410e-21, -2.105919014055614e-21, 2.397717720265674e-21, -2.729947694949692e-21, 3.108212647296628e-21, -3.538889390172954e-21, 4.029242408342459e-21, -4.587537765144882e-21, 5.223192986426113e-21, -5.946923044249243e-21, 6.770936697567055e-21, -7.709123239579199e-21, 8.777310068392340e-21, -9.993501047681958e-21, 1.137821494603412e-20, -1.295478887104638e-20, 1.474982378032821e-20, -1.679356954738640e-20, 1.912051261376801e-20, -2.176986279743765e-20, 2.478633017643540e-20, -2.822073770460957e-20, 3.213105082813427e-20, -3.658314411923333e-20, 4.165216959294550e-20, -4.742350946454843e-20, 5.399460006022600e-20, -6.147610566668155e-20, 6.999435769179613e-20, -7.969278499925461e-20, 9.073518741196294e-20, -1.033074536152729e-19, 1.176219711201683e-19, -1.339196499267872e-19, 1.524758875590042e-19, -1.736028891015458e-19, 1.976577702338448e-19, -2.250451123066827e-19, 2.562280190911125e-19, -2.917307540064815e-19, 3.321538976139149e-19, -3.781767593800005e-19, 4.305782565276538e-19, -4.902385383010367e-19, 5.581678864101238e-19, -6.355065796403268e-19, 7.235650952324198e-19, -8.238205902566872e-19, 9.379730988161989e-19, -1.067935983426557e-18, 1.215914876207947e-18, -1.384387860555074e-18, 1.576216891832665e-18, -1.794611006906676e-18, 2.043284318190411e-18, -2.326391762796314e-18, 2.648754138285296e-18, -3.015750110424166e-18, 3.433638103530407e-18, -3.909379456027323e-18, 4.451100704654134e-18, -5.067809360821606e-18, 5.770060327858650e-18, -6.569505440663543e-18, 7.479857530176959e-18, -8.516184129746393e-18, 9.696306653881282e-18, -1.103970228659850e-17, 1.256954070708544e-17, -1.431098828414357e-17, 1.629418091510444e-17, -1.855161949305473e-17, 2.112251982431457e-17, -2.404883030225413e-17, 2.738161098629106e-17, -3.117496715967868e-17, 3.549542289668629e-17, -4.041270829342227e-17, 4.601355794199069e-17, -5.238775618130511e-17, 5.964848186475663e-17, -6.791121691023222e-17, 7.732378583243061e-17, -8.803453656899252e-17, 1.002367506589228e-16, -1.141207151607239e-16, 1.299394517387315e-16, -1.479366207688721e-16, 1.684438994882969e-16, -1.917726554733372e-16, 2.183583387005876e-16, -2.485978764937954e-16, 2.830639416126730e-16, -3.222610765667943e-16, 3.669438668141347e-16, -4.177514390747528e-16, 4.756802050039284e-16, -5.415364164475966e-16, 6.166389337226809e-16, -7.019997076546317e-16, 7.993689686576395e-16, -9.100089857117708e-16, 1.036249133688131e-15, -1.179651807131579e-15, 1.343326900422591e-15, -1.529189515686284e-15, 1.741405751368545e-15, -1.982293701749077e-15, 2.257454882110670e-15, -2.569648892095082e-15, 2.926436822402734e-15, -3.331029913939900e-15, 3.793676909909576e-15, -4.317994472774402e-15, 4.917935321484997e-15, -5.597373071100983e-15, 6.375390057182862e-15, -7.255792277807143e-15, 8.264801128606280e-15, -9.405537165055569e-15, 1.071420644035709e-14, -1.219214915931198e-14, 1.388960394313136e-14, -1.580427268552795e-14, 1.800620968515390e-14, -2.048641568507301e-14, 2.334305662501363e-14, -2.655548553367103e-14, 3.026192648607248e-14, -3.442221623150520e-14, 3.923190211789030e-14, -4.461893198803410e-14, 5.086121105139434e-14, -5.783551844666699e-14, 6.593852855730308e-14, -7.496602415439665e-14, 8.548655485671044e-14, -9.716902808177407e-14, 1.108315208765312e-13, -1.259458312388695e-13, 1.436933683252974e-13, -1.632417225266264e-13, 1.863027688321447e-13, -2.115771091886518e-13, 2.415529925720814e-13, -2.742172912312053e-13, 3.131970387082062e-13, -3.553922261911766e-13, 4.061035675187256e-13, -4.605809413353380e-13, 5.265892490959544e-13, -5.968795167949857e-13, 6.828504569587442e-13, -7.734770560272592e-13, 8.855241689907420e-13, -1.002271124202202e-12, 1.148416995714124e-12, -1.298663208549853e-12, 1.489453100122077e-12, -1.682586391404361e-12, 1.931907280346413e-12, -2.179832324535619e-12, 2.506009817027883e-12, -2.823763626714695e-12, 3.251036383068994e-12, -3.657522083511441e-12, 4.218031428263349e-12, -4.736873852220176e-12, 5.473359729916149e-12, -6.133871906007721e-12, 7.103341924139109e-12, -7.941565166210412e-12, 9.220310756445447e-12, -1.028004550802915e-11, 1.197053221213684e-11, -1.330420077896753e-11, 1.554457869729160e-11, -1.721363713771749e-11, 2.019093243336348e-11, -2.226535054417105e-11, 2.623385317715458e-11, -2.878986806971887e-11, 3.409688864363201e-11, -3.721174703331319e-11, 4.433387089687385e-11, -4.807551416154768e-11, 5.766987237954167e-11, -6.207834479976159e-11, 7.505545387359396e-11, -8.011101399321720e-11, 9.773871151888171e-11, -1.033088762718738e-10, 1.273612451487001e-10, -1.331148100865271e-10, 1.660864014336979e-10, -1.713561270888201e-10, 2.167712428480381e-10, -2.203372791851255e-10, 2.831980148718330e-10, -2.829496057001224e-10, 3.703869422887365e-10, -3.627980397893734e-10, 4.850207201792002e-10, -4.643421461158355e-10, 6.360231449851816e-10, -5.930443189593078e-10, 8.353515059977503e-10, -7.555102296535103e-10, 1.099086883072858e-09, -9.595938165682892e-10, 1.448941662513427e-09, -1.214418403431928e-09, 1.914354201982380e-09, -1.530232541828562e-09, 2.535413701041661e-09, -1.917967238636924e-09, 3.366964327985339e-09, -2.388280225383556e-09, 4.484391763894045e-09, -2.949747512489630e-09, 5.991820026480242e-09, -3.605669450820265e-09, 8.033774905885646e-09, -4.348662889559140e-09, 1.081185161881095e-08, -5.151760029833092e-09, 1.460863092797377e-08, -5.954048869440942e-09, 1.982212746243125e-08, -6.637850452739543e-09, 2.701558565884670e-08, -6.992855428994142e-09, 3.698969905307490e-08, -6.660268099150364e-09, 5.088766862234823e-08, -5.046427612646975e-09, 7.034845702641431e-08, -1.189990213788075e-09, 9.773091311662091e-08, 6.441339406826419e-09, 1.364422877792574e-07, 2.026069653953343e-08, 1.914207530941986e-07, 4.403297240671824e-08, 2.698454243003982e-07, 8.358850780049443e-08, 3.821828678435025e-07, 1.479022772000465e-07, 5.437318168094116e-07, 2.507226084953915e-07, 7.769062342581499e-07, 4.130252690891796e-07, 1.114613518638701e-06, 6.667060493527094e-07, 1.605258236346765e-06, 1.060130280292916e-06, 2.320160162069900e-06, 1.666464612725961e-06, 3.364560133136435e-06, 2.596174981355472e-06, 4.893960156014256e-06, 4.015759771727380e-06, 7.138397535039002e-06, 6.175810373704275e-06, 1.043852386492282e-05, 9.453018910283816e-06, 1.529925306344126e-05, 1.441303290654501e-05, 2.246956262435983e-05, 2.190445818635350e-05, 3.306123161466435e-05, 3.319938341030514e-05, 4.872554987486263e-05, 5.020335754327977e-05, 7.191633170111086e-05, 7.576900218644445e-05, 1.062813859178838e-04, 1.141641643802547e-04, 1.572450969332647e-04, 1.717703221930222e-04, 2.328751169425117e-04, 2.581236364185096e-04, 3.451709156798280e-04, 3.874650245753196e-04, 5.119775374980107e-04, 5.810445069347065e-04, 7.598232475621811e-04, 8.705390306182757e-04, 1.128116358716784e-03, 1.303104717626917e-03, 1.675327916301618e-03, 1.948807411825891e-03, 2.488044976374601e-03, 2.911467293668464e-03, 3.694103557020186e-03, 4.344298949393989e-03, 5.481360712089968e-03, 6.472046368434359e-03, 8.123895409393892e-03, 9.621355456586993e-03, 1.201713561934862e-02, 1.426025794636169e-02, 1.772160768805842e-02, 2.104435746141294e-02, 2.600944034318140e-02, 3.085828026010451e-02, 3.789358111566572e-02, 4.481923933568410e-02, 5.458716136402632e-02, 6.416184799000865e-02, 7.727199941242613e-02, 8.982755932806265e-02, 1.064254421326244e-01, 1.214134956891936e-01, 1.402491646624913e-01, 1.549103199447484e-01, 1.715485184963992e-01, 1.786033886777877e-01, 1.827088851861087e-01, 1.675422102829242e-01, 1.407983439987153e-01, 8.162579672849707e-02, 2.858447717701069e-03, -1.152686206415223e-01, -2.421516386308682e-01, -3.812981622302894e-01, -4.639824707365586e-01, -4.676754754771468e-01, -2.983922369012484e-01, 1.962768518309340e-02, 4.615749449787655e-01, 7.571539594142682e-01, 6.725778299920618e-01, -2.282965939549971e-02, -8.636206091858209e-01, -1.015125148503644e+00, 2.432665171231830e-01, 1.368855569487176e+00, 2.856162774022951e-01, -1.804149507828080e+00, 4.533604735331026e-01, 1.511758555237049e+00, -1.909629765056948e+00, 1.069005333649147e+00, -1.178375735699469e-01, -4.119085433366451e-01, 5.558053315672025e-01, -5.042611820681206e-01, 3.952161415642411e-01, -2.910574896760750e-01, 2.096946427258310e-01, -1.509580770735921e-01, 1.097522132187282e-01, -8.093658545655293e-02, 6.057900864438948e-02, -4.595801396907676e-02, 3.526341496281775e-02, -2.730346497971181e-02, 2.128837318669908e-02, -1.668596224808419e-02, 1.312967661769313e-02, -1.036095586387300e-02, 8.193157054943619e-03, -6.488727811475892e-03, 5.144495374133109e-03, -4.081960541714881e-03, 3.240723341083809e-03, -2.573908860969868e-03, 2.044902564517464e-03, -1.624966594846773e-03, 1.291465382262761e-03, -1.026523739285122e-03, 8.159991714412245e-04, -6.486871353788918e-04, 5.157019263358405e-04, -4.099917485806924e-04, 3.259573396167529e-04, -2.591511272468608e-04, 2.060393933618002e-04, -1.638139563220660e-04, 1.302428944388295e-04, -1.035521166699833e-04, 8.233134695725269e-05, -6.545945702523444e-05, 5.204514861201607e-05, -4.137981961749043e-05, 3.290010486595251e-05, -2.615810167862320e-05, 2.079770185899022e-05, -1.653577691984197e-05, 1.314722074735249e-05, -1.045305829437006e-05, 8.310991516938568e-06, -6.607882958945550e-06, 5.253779852702802e-06, -4.177162948246520e-06, 3.321169005036239e-06, -2.640587391141323e-06, 2.099472167888885e-06, -1.669243535881050e-06, 1.327178346968277e-06, -1.055209941709689e-06, 8.389739249503043e-07, -6.670494862601122e-07, 5.303561942591048e-07, -4.216744016303114e-07, 3.352639284891773e-07, -2.665608855859320e-07, 2.119366259147424e-07, -1.685060930903601e-07, 1.339754432660642e-07, -1.065208923693739e-07, 8.469239020916003e-08, -6.733703409093301e-08, 5.353817677449518e-08, -4.256701249734076e-08, 3.384408402893838e-08, -2.690867780042694e-08, 2.139449069787337e-08, -1.701028327972431e-08, 1.352449755955155e-08, -1.075302693551076e-08, 8.549492340901277e-09, -6.797511037206932e-09, 5.404549702807423e-09, -4.297037155315087e-09, 3.416478584420968e-09, -2.716366067339561e-09, 2.159722190085306e-09, -1.717147034705988e-09, 1.365265381135002e-09, -1.085492111933322e-09, 8.630506137215972e-10, -6.861923302473438e-10, 5.455762461365737e-10, -4.337755280138903e-10, 3.448852659061754e-10, -2.742105973115728e-10, 2.180187416052945e-10, -1.733418480408352e-10, 1.378202445493526e-10, -1.095778083781342e-10, 8.712287608917021e-11, -6.926945929969819e-11, 5.507460505331545e-11, -4.378859244194077e-11, 3.481533505726639e-11, -2.768089786642390e-11, 2.200846567772674e-11, -1.749844112121302e-11, 1.391262099629252e-11, -1.106161524020212e-11, 8.794844030523375e-12, -6.992584703096528e-12, 5.559648432809331e-12, -4.420352703548207e-12, 3.514524031458616e-12, -2.794319819206459e-12, 2.221701482790797e-12, -1.766425390825141e-12, 1.404445505192163e-12, -1.116643356280387e-12, 8.878182745412279e-13, -7.058845460186685e-13, 5.612330885763100e-13, -4.462239349060042e-13, 3.547827170807447e-13, -2.820798403939499e-13, 2.242754016089481e-13, -1.783163791396228e-13, 1.417753834852610e-13, -1.127224512924624e-13, 8.962311166429105e-14, -7.125734094978236e-14, 5.665512550244704e-14, -4.504522906558543e-14, 3.581445886076434e-14, -2.847527896066489e-14, 2.264006040254001e-14, -1.800060802704875e-14, 1.431188272386055e-14, -1.137905935138482e-14, 9.047236776658255e-15, -7.193256557121187e-15, 5.719198156731723e-15, -4.547207137145127e-15, 3.615383167608584e-15, -2.874510673128837e-15, 2.285459445620894e-15, -1.817117927723045e-15, 1.444750012769246e-15, -1.148688573014634e-15, 9.132967130053839e-16, -7.261418852616671e-16, 5.773392480475140e-16, -4.590295837506217e-16, 3.649642034053721e-16, -2.901749135190327e-16, 2.307116140436942e-16, -1.834336683660563e-16, 1.458440262302222e-16, -1.159573385656211e-16, 9.219509852266826e-17, -7.330227044488768e-17, 5.828100342071381e-17, -4.633792840399525e-17, 3.684225532762789e-17, -2.929245705146663e-17, 2.328978051105255e-17, -1.851718602164869e-17, 1.472260238767692e-17, -1.170561341297906e-17, 9.306872641520070e-18, -7.399687253379369e-18, 5.883326607803712e-18, -4.677702014739752e-18, 3.719136739613880e-18, -2.957002828302571e-18, 2.351047121536452e-18, -1.869265228476306e-18, 1.486211170428986e-18, -1.181653416196487e-18, 9.395063257031646e-19, -7.469805646126344e-19, 5.939076179015729e-19, -4.722027256379258e-19, 3.754378751739167e-19, -2.985022967459849e-19, 2.373325310854237e-19, -1.886978121843854e-19, 1.500294299097576e-19, -1.192850600158091e-19, 9.484089595818352e-20, -7.540588530116191e-20, 5.995354099387067e-20, -4.766772603294524e-20, 3.789954805412417e-20, -3.013308718206562e-20, 2.395814700817093e-20, -1.904858950112549e-20, 1.514510957672512e-20, -1.204153954009819e-20, 9.573960050057665e-21, -7.612042487983742e-21, 6.052165469011701e-21, -4.811941935554926e-21, 3.825867772694447e-21, -3.041862118819870e-21, 2.418516642409082e-21, -1.922908502426471e-21, 1.528861481986123e-21, -1.215563454903229e-21, 9.664671642003589e-22, -7.684162553978440e-22, 6.109504047471497e-22, -4.857528416823409e-22, 3.862110855330143e-22, -3.070676966569801e-22, 2.441425971113178e-22, -1.941122955877971e-22, 1.543343556015419e-22, -1.227078345565842e-22, 9.756232141971099e-23, -7.756971208813704e-23, 6.167406266892664e-23, -4.903581613923544e-23, 3.898745781389350e-23, -3.099825880130062e-23, 2.464624570708451e-23, -1.959591536406367e-23, 1.558051651712082e-23, -1.238796173479821e-23, 9.849625195167609e-24, -7.831435654861331e-24, 6.226795451209084e-24, -4.950951685405124e-24, 3.936520905478440e-24, -3.129929802298849e-24, 2.488584770157100e-24, -1.978622297628744e-24, 1.573121213803113e-24, -1.250680505229501e-24, 9.942866354722078e-25, -7.904119407074957e-25, 6.282983380266727e-25, -4.993888550242444e-25, 3.968764501205991e-25, -3.153465565806142e-25, 2.504952335417932e-25, -1.989067071293495e-25, 1.578744037087885e-25, -1.252528942885154e-25, 9.933547263601893e-26, -7.875756004541075e-26, 6.242484682723995e-26, -4.946074258240189e-26, 3.916606766138584e-26, -3.098609190290296e-26, 2.448266612392087e-26, -1.931034281011451e-26, 1.519618180226741e-26, -1.192353131496834e-26, 9.319757733847061e-27, -7.247010142017766e-27, 5.595281281317008e-27, -4.277815801389812e-27, 3.228199604942252e-27, -2.397244120563833e-27, 1.748239591243191e-27, -1.251355005015151e-27, 8.794257877849950e-28, -6.065291790558063e-28, 4.090426687827314e-28, -2.676669295980934e-28, 1.687971022918484e-28, -1.044337763175647e-28, 7.050578391076381e-29, -6.406578964496667e-29, 8.047966445795522e-29, -1.121534977637725e-28, 1.496948518403447e-28, -1.846610467538164e-28, 2.120865254113486e-28, -2.314246444782675e-28, 2.456355516626332e-28, -2.590895710468579e-28, 2.753571339247125e-28, -2.961734255384709e-28, 3.222852751815456e-28, -3.554096868418778e-28, 3.994470369697907e-28, -4.595315984451220e-28, 5.391210400875100e-28, -6.364075603215328e-28, 7.416537866632483e-28, -8.373375968928217e-28, 9.020712825598172e-28, -9.167392508097227e-28, 8.694251225012477e-28, -7.570921079500922e-28, 5.849601896353324e-28, -3.650523966203355e-28, 1.141119602484722e-28, 1.491703156191346e-28, -4.067578199248336e-28, 6.427512747502163e-28, -8.440676076304842e-28, 1.001129119104450e-27, -1.108757116989416e-27, 1.166928305547677e-27, -1.180901419514423e-27, 1.160312647193067e-27, -1.116872557002111e-27, 1.060982755116985e-27, -9.988396142394386e-28, 9.317938546853034e-28, -8.581732798065989e-28, 7.758565868660186e-28, -6.839003286369139e-28, 5.828348637958973e-28, -4.740105434766476e-28, 3.587009166849001e-28, -2.377198496417097e-28, 1.119160774411077e-28, 1.670017331897950e-29, -1.438060858130858e-28, 2.622750906188666e-28, -3.630328238235802e-28, 4.369496766662814e-28, -4.761183982698324e-28, -3.708525899969828e-28]) cos = np.array([7.214369775966785e-20, 5.997984537445829e-20, 1.383536819510307e-20, 6.127201193993877e-20, 2.735622069700930e-20, 6.567948836420383e-20, 4.144963335850363e-20, 7.316414067200350e-20, 5.682375914662966e-20, 8.391977074915078e-20, 7.418756524583309e-20, 9.829637687190485e-20, 9.430643800653847e-20, 1.168146262188112e-19, 1.180370735968097e-19, 1.401723019040171e-19, 1.463726071463266e-19, 1.692722072070252e-19, 1.804796158499069e-19, 2.052560499147526e-19, 2.217507732438609e-19, 2.495469564846162e-19, 2.718603842873614e-19, 3.039069705922034e-19, 3.328334008394297e-19, 3.705052796297763e-19, 4.071277819975917e-19, 4.520053409594589e-19, 4.977334107366132e-19, 5.516707191291291e-19, 6.082931168675559e-19, 6.734956703766505e-19, 7.432489554623685e-19, 8.223651399147256e-19, 9.080210233648037e-19, 1.004250388267800e-18, 1.109225156214032e-18, 1.226448534750949e-18, 1.354938655056596e-18, 1.497875155579711e-18, 1.655024636692164e-18, 1.829422009902478e-18, 2.021527957180686e-18, 2.234394042862191e-18, 2.469158736824458e-18, 2.729043278909879e-18, 3.015882778812807e-18, 3.333221019045560e-18, 3.683642665131121e-18, 4.071174485366807e-18, 4.499238428427072e-18, 4.972519918024098e-18, 5.495403162992602e-18, 6.073431145514256e-18, 6.712116746365455e-18, 7.418091347704607e-18, 8.198210388921290e-18, 9.060466264497684e-18, 1.001332641867938e-17, 1.106647001686341e-17, 1.223031194783507e-17, 1.351661046246575e-17, 1.493814249254853e-17, 1.650922025025269e-17, 1.824549287949245e-17, 2.016440324953847e-17, 2.228509875325462e-17, 2.462885473506622e-17, 2.721908372832262e-17, 3.008174877960754e-17, 3.324546598231868e-17, 3.674192913569353e-17, 4.060610542324258e-17, 4.487669220181069e-17, 4.959641037849226e-17, 5.481251456381401e-17, 6.057719336989671e-17, 6.694815564512041e-17, 7.398915178848498e-17, 8.177066132132114e-17, 9.037055462918574e-17, 9.987491078055815e-17, 1.103788451159722e-16, 1.219874911140742e-16, 1.348170262066998e-16, 1.489958578076007e-16, 1.646658879212839e-16, 1.819839514458913e-16, 2.011233698894207e-16, 2.222757000537238e-16, 2.456526388749016e-16, 2.714881529754608e-16, 3.000408107960083e-16, 3.315963787425073e-16, 3.664706739627943e-16, 4.050127315080793e-16, 4.476082920363670e-16, 4.946836672898304e-16, 5.467100025245505e-16, 6.042079955957903e-16, 6.677531050397348e-16, 7.379813122861424e-16, 8.155954842977402e-16, 9.013724102689123e-16, 9.961705740887021e-16, 1.100938748010566e-15, 1.216725486808607e-15, 1.344689623369201e-15, 1.486111865526057e-15, 1.642407614840039e-15, 1.815141131499014e-15, 2.006041190779248e-15, 2.217018384471440e-15, 2.450184243392977e-15, 2.707872369692257e-15, 2.992661792874233e-15, 3.307402781094011e-15, 3.655245368051253e-15, 4.039670879180488e-15, 4.464526774284602e-15, 4.934065153895433e-15, 5.452985315986473e-15, 6.026480787914038e-15, 6.660291305149181e-15, 7.360760256360466e-15, 8.134898170257041e-15, 8.990452879276204e-15, 9.935987062502841e-15, 1.098096394385775e-14, 1.213584200318437e-14, 1.341217964828528e-14, 1.482275089528562e-14, 1.638167321535499e-14, 1.810454882702344e-14, 2.000862084851265e-14, 2.211294587257239e-14, 2.443858469135401e-14, 2.700881307980678e-14, 2.984935474755050e-14, 3.298863879030854e-14, 3.645808421795958e-14, 4.029241440643229e-14, 4.453000462105175e-14, 4.921326608894885e-14, 5.438907046503769e-14, 6.010921893911273e-14, 6.643096067976429e-14, 7.341756580308676e-14, 8.113895860149252e-14, 8.967241736929777e-14, 9.910334783010448e-14, 1.095261379057530e-13, 1.210451023825933e-13, 1.337755269287210e-13, 1.478448219118764e-13, 1.633937975650728e-13, 1.805780732628623e-13, 1.995696350122467e-13, 2.205585567465074e-13, 2.437549026489779e-13, 2.693908295460095e-13, 2.977229104105259e-13, 3.290347022305518e-13, 3.636395839428896e-13, 4.018838928348062e-13, 4.441503908040617e-13, 4.908620951685787e-13, 5.424865123659980e-13, 5.995403169151822e-13, 6.625945224685207e-13, 7.322801967084261e-13, 8.092947772848716e-13, 8.944090520057436e-13, 9.884748731403624e-13, 1.092433683043238e-12, 1.207325936425662e-12, 1.334301513576084e-12, 1.474631228748613e-12, 1.629719548899119e-12, 1.801118650062676e-12, 1.990543952052933e-12, 2.199891286960273e-12, 2.431255873276498e-12, 2.686953285545802e-12, 2.969542629413028e-12, 3.281852154013172e-12, 3.627007558039277e-12, 4.008463272785582e-12, 4.430037035256956e-12, 4.895948097364050e-12, 5.410859453614547e-12, 5.979924509929487e-12, 6.608838660661838e-12, 7.303896290017477e-12, 8.072053768367932e-12, 8.920999073943177e-12, 9.859228736701785e-12, 1.089613287445852e-11, 1.204208917233957e-11, 1.330856674614333e-11, 1.470824092910627e-11, 1.625512013089818e-11, 1.796468603849469e-11, 1.985404856210394e-11, 2.194211707689892e-11, 2.424978967439970e-11, 2.680016231759770e-11, 2.961875999311579e-11, 3.273379217385409e-11, 3.617643514887572e-11, 3.998114404618718e-11, 4.418599767123930e-11, 4.883307961241208e-11, 5.396889942771051e-11, 5.964485812805529e-11, 6.591776261587440e-11, 7.285039422767879e-11, 8.051213707077629e-11, 8.897967244274265e-11, 9.833774628361575e-11, 1.086800173417544e-10, 1.201099945420632e-10, 1.327420729381141e-10, 1.467026786162787e-10, 1.621315340105112e-10, 1.791830562914075e-10, 1.980279028251780e-10, 2.188546791698937e-10, 2.418718267033471e-10, 2.673097087743666e-10, 2.954229162567076e-10, 3.264928155800021e-10, 3.608303647396648e-10, 3.987792254688925e-10, 4.407192027209688e-10, 4.870700458846789e-10, 5.382956497775456e-10, 5.949086974607432e-10, 6.574757913439202e-10, 7.266231239320192e-10, 8.030427449710128e-10, 8.874994877135167e-10, 9.808386236281220e-10, 1.083994322159010e-09, 1.197999000209434e-09, 1.323993654914953e-09, 1.463239283128961e-09, 1.617129501899646e-09, 1.787204496262075e-09, 1.975166433922344e-09, 2.182896501130837e-09, 2.412473730218034e-09, 2.666195807259519e-09, 2.946602068077095e-09, 3.256498912782063e-09, 3.598987893149563e-09, 3.977496754017933e-09, 4.395813739277522e-09, 4.858125505931142e-09, 5.369059025511281e-09, 5.933727892433384e-09, 6.557783502483194e-09, 7.247471613991360e-09, 8.009694857348590e-09, 8.852081819018630e-09, 9.783063390784292e-09, 1.081195714921208e-08, 1.194906060875559e-08, 1.320575428316232e-08, 1.459461558495058e-08, 1.612954470504804e-08, 1.782590372973567e-08, 1.970067039062624e-08, 2.177260798218037e-08, 2.406245315273551e-08, 2.659312344174916e-08, 2.938994664888302e-08, 3.248091431980495e-08, 3.589696189917651e-08, 3.967227833770833e-08, 4.384464827330457e-08, 4.845583018407081e-08, 5.355197433170284e-08, 5.918408463559961e-08, 6.540852915386353e-08, 7.228760421284378e-08, 7.989015791604288e-08, 8.829227916594097e-08, 9.757805922900159e-08, 1.078404332968648e-07, 1.191821106789995e-07, 1.317166026689236e-07, 1.455693587079098e-07, 1.608790217936311e-07, 1.777988162313823e-07, 1.964980809461758e-07, 2.171639645456637e-07, 2.400032980365736e-07, 2.652446652738443e-07, 2.931406901825997e-07, 3.239705657602287e-07, 3.580428475071237e-07, 3.956985425939703e-07, 4.373145214673157e-07, 4.833072913425415e-07, 5.341371626757850e-07, 5.903128587132423e-07, 6.523966036832935e-07, 7.210097538541495e-07, 7.968390110811429e-07, 8.806433020866372e-07, 9.732613658282036e-07, 1.075620158230134e-06, 1.188744116446123e-06, 1.313765428158270e-06, 1.451935342270991e-06, 1.604636717777632e-06, 1.773397831228256e-06, 1.959907713317686e-06, 2.166033001576880e-06, 2.393836687356070e-06, 2.645598681084377e-06, 2.923838733370935e-06, 3.231341523918154e-06, 3.571184694601016e-06, 3.946769446344899e-06, 4.361854837678969e-06, 4.820595081762782e-06, 5.327581531949061e-06, 5.887888119174313e-06, 6.507122780830562e-06, 7.191482772393097e-06, 7.947817716468041e-06, 8.783696866498923e-06, 9.707486485040472e-06, 1.072843153422521e-05, 1.185675077161778e-05, 1.310373578995573e-05, 1.448186809502301e-05, 1.600493890578862e-05, 1.768819362222417e-05, 1.954847630132444e-05, 2.160440843572022e-05, 2.387656249074371e-05, 2.638768394666778e-05, 2.916289862392297e-05, 3.222998971512441e-05, 3.561964367629314e-05, 3.936579782365431e-05, 4.350592904974602e-05, 4.808149299156779e-05, 5.313825827671661e-05, 5.872686606041739e-05, 6.490320915255368e-05, 7.172915206849267e-05, 7.927294798468421e-05, 8.761017620761336e-05, 9.682417843295337e-05, 1.070072955978771e-04, 1.182612851235724e-04, 1.306989769939818e-04, 1.444446003274482e-04, 1.596360362963627e-04, 1.764249271609239e-04, 1.949797924244976e-04, 2.154857030671910e-04, 2.381486646105023e-04, 2.631944925246626e-04, 2.908750792099106e-04, 3.214658697246949e-04, 3.552749625435381e-04, 3.926382043270680e-04, 4.339325952975191e-04, 4.795674127479124e-04, 5.300042093562213e-04, 5.857414026355948e-04, 6.473444397414629e-04, 7.154197401707392e-04, 7.906606243262904e-04, 8.738040302727717e-04, 9.657009935888906e-04, 1.067245638145834e-03, 1.179484028621435e-03, 1.303498707764836e-03, 1.440577691237741e-03, 1.592027938865682e-03, 1.759438818176274e-03, 1.944382214020240e-03, 2.148824632015574e-03, 2.374646777242952e-03, 2.624289840901410e-03, 2.899987938462482e-03, 3.204783728012370e-03, 3.541304571287609e-03, 3.913361077715114e-03, 4.323998734848948e-03, 4.778017035442578e-03, 5.278871213895021e-03, 5.832645828904957e-03, 6.443132211847618e-03, 7.118100704687155e-03, 7.861484687059508e-03, 8.683286454219962e-03, 9.587172959576953e-03, 1.058612645311708e-02, 1.168276512339872e-02, 1.289407692301174e-02, 1.422020567085629e-02, 1.568354709989395e-02, 1.727924763496293e-02, 1.903701004445868e-02, 2.094259894090355e-02, 2.303555498203885e-02, 2.528473397535577e-02, 2.774280095909549e-02, 3.034889679856765e-02, 3.317292189089636e-02, 3.610269051747732e-02, 3.923023471609136e-02, 4.235591398256915e-02, 4.559945470018810e-02, 4.861418172220856e-02, 5.155399423688033e-02, 5.382905665985834e-02, 5.563737547309198e-02, 5.599656739496778e-02, 5.517328802198061e-02, 5.157565446188783e-02, 4.561585237274122e-02, 3.481744626013846e-02, 1.997678484763328e-02, -2.511444299727086e-03, -3.078890380569448e-02, -6.952663437748715e-02, -1.140926319655417e-01, -1.692861783153246e-01, -2.240265004914591e-01, -2.809223452446239e-01, -3.165386782849084e-01, -3.295050746499982e-01, -2.805919713655642e-01, -1.744060875765448e-01, 2.722628846693606e-02, 2.668949880744598e-01, 5.262102231394616e-01, 6.256684356927903e-01, 4.995016301447683e-01, -1.002368152582941e-02, -6.114010724740713e-01, -9.727382503860407e-01, -3.838420705230950e-01, 7.198704705669955e-01, 1.262041888009595e+00, -2.998397076312483e-01, -1.479978761932394e+00, 1.886890549669046e-01, 1.961538671802124e+00, -2.104506074490929e+00, 7.701373097387101e-01, 4.062497351127477e-01, -8.229740504000808e-01, 7.307456920106093e-01, -4.903037312539515e-01, 2.839808721720737e-01, -1.517915989046718e-01, 7.860615976683388e-02, -4.139435902417716e-02, 2.340179865400356e-02, -1.488928090494461e-02, 1.080204283974104e-02, -8.695630540330540e-03, 7.448774255862835e-03, -6.571253694245813e-03, 5.859704720178251e-03, -5.235154219023063e-03, 4.669537109654244e-03, -4.153880559277143e-03, 3.685278478886407e-03, -3.262012231674279e-03, 2.882025619739767e-03, -2.542670610556139e-03, 2.240859550470028e-03, -1.973292341858488e-03, 1.736649256291777e-03, -1.527725614465373e-03, 1.343513590939351e-03, -1.181244115916277e-03, 1.038401885876272e-03, -9.127236961818876e-04, 8.021869803583510e-04, -7.049929363136232e-04, 6.195471678105551e-04, -5.444398377266471e-04, 4.784265058211163e-04, -4.204101656165671e-04, 3.694246665626042e-04, -3.246196272200836e-04, 2.852468930079681e-04, -2.506484828993674e-04, 2.202458813636377e-04, -1.935305291014704e-04, 1.700554065180346e-04, -1.494276181460851e-04, 1.313018693894386e-04, -1.153747197310416e-04, 1.013795159657149e-04, -8.908193308740761e-05, 7.827605834070905e-05, -6.878095175364698e-05, 6.043762035968366e-05, -5.310635544925448e-05, 4.666439257514449e-05, -4.100385733848758e-05, 3.602996086454923e-05, -3.165941281357187e-05, 2.781902585783293e-05, -2.444448983707277e-05, 2.147929539928508e-05, -1.887378820764930e-05, 1.658433732127529e-05, -1.457260421932255e-05, 1.280490076321861e-05, -1.125162529600942e-05, 9.886767060922143e-06, -8.687470528401389e-06, 7.633652544743499e-06, -6.707666049011719e-06, 5.894004649462370e-06, -5.179042976527735e-06, 4.550808463155193e-06, -3.998780821517730e-06, 3.513715894699684e-06, -3.087490887193285e-06, 2.712968339794580e-06, -2.383876585436306e-06, 2.094704718364733e-06, -1.840610321658703e-06, 1.617338386451554e-06, -1.421150054108675e-06, 1.248759998169063e-06, -1.097281408784448e-06, 9.641776560691427e-07, -8.472198144303782e-07, 7.444493348026655e-07, -6.541452446960252e-07, 5.747953299561648e-07, -5.050708135463763e-07, 4.438041043830051e-07, -3.899692438669907e-07, 3.426647247474678e-07, -3.010983952488172e-07, 2.645741945310525e-07, -2.324804964806138e-07, 2.042798670572760e-07, -1.795000644284691e-07, 1.577261311964345e-07, -1.385934457978807e-07, 1.217816165104314e-07, -1.070091160694850e-07, 9.402856728542481e-08, -8.262260063861433e-08, 7.260021429510923e-08, -6.379357556568284e-08, 5.605521036390507e-08, -4.925553366022761e-08, 4.328067952283362e-08, -3.803059434473390e-08, 3.341736133872807e-08, -2.936372828943425e-08, 2.580181391877248e-08, -2.267197117462836e-08, 1.992178838990867e-08, -1.750521159700888e-08, 1.538177331647477e-08, -1.351591490408599e-08, 1.187639109711096e-08, -1.043574678473461e-08, 9.169857246988264e-09, -8.057524168015392e-09, 7.080120656351000e-09, -6.221279323637567e-09, 5.466618198282681e-09, -4.803499887007487e-09, 4.220819952418786e-09, -3.708820961440479e-09, 3.258929089378474e-09, -2.863610543832675e-09, 2.516245405206862e-09, -2.211016771314404e-09, 1.942813349072686e-09, -1.707143861761559e-09, 1.500061838825614e-09, -1.318099529115311e-09, 1.158209830829835e-09, -1.017715265474667e-09, 8.942631413031228e-10, -7.857861555682931e-10, 6.904677759378704e-10, -6.067118212948776e-10, 5.331157324405220e-10, -4.684470851019305e-10, 4.116229519995125e-10, -3.616917683963526e-10, 3.178173974200357e-10, -2.792651282909316e-10, 2.453893729983475e-10, -2.156228554187546e-10, 1.894671118403349e-10, -1.664841438026135e-10, 1.462890834648734e-10, -1.285437486867098e-10, 1.129509799028343e-10, -9.924966395649082e-11, 8.721036155589632e-11, -7.663146513121262e-11, 6.733582275589511e-11, -5.916777159927192e-11, 5.199053123157253e-11, -4.568391312831626e-11, 4.014230801823632e-11, -3.527291737272422e-11, 3.099419942203976e-11, -2.723450367478780e-11, 2.393087107406673e-11, -2.102797969829416e-11, 1.847721835214320e-11, -1.623587253411553e-11, 1.426640914879302e-11, -1.253584798559500e-11, 1.101520943914518e-11, -9.679029223044069e-12, 8.504931950483330e-12, -7.473256440847272e-12, 6.566726477759826e-12, -5.770161505244737e-12, 5.070222417415932e-12, -4.455188184705872e-12, 3.914759576021876e-12, -3.439886690015256e-12, 3.022617407370815e-12, -2.655964226336168e-12, 2.333787251527801e-12, -2.050691376558676e-12, 1.801935938738539e-12, -1.583355332954674e-12, 1.391289255348560e-12, -1.222521408656179e-12, 1.074225642782795e-12, -9.439186286983481e-13, 8.294182731437524e-13, -7.288071777679118e-13, 6.404005307872462e-13, -5.627178934884672e-13, 4.944584091193514e-13, -4.344790190215893e-13, 3.817753212167385e-13, -3.354647509064286e-13, 2.947718012314345e-13, -2.590150368003291e-13, 2.275956825191015e-13, -1.999875966322778e-13, 1.757284600660376e-13, -1.544120345321836e-13, 1.356813597490631e-13, -1.192227758615040e-13, 1.047606709603187e-13, -9.205286574443716e-14, 8.088655803832021e-14, -7.107475925247343e-14, 6.245316311274798e-14, -5.487739422278246e-14, 4.822059038461202e-14, -4.237127819154857e-14, 3.723150631847816e-14, -3.271520525003134e-14, 2.874674597896990e-14, -2.525967353907224e-14, 2.219559416454687e-14, -1.950319744058413e-14, 1.713739707017873e-14, -1.505857586868545e-14, 1.323192234295437e-14, -1.162684774554722e-14, 1.021647384214807e-14, -8.977182814427699e-15, 7.888221761131355e-15, -6.931355174452619e-15, 6.090559572138626e-15, -5.351755171700232e-15, 4.702570113399845e-15, -4.132133283746073e-15, 3.630892270162917e-15, -3.190453398341683e-15, 2.803441173574897e-15, -2.463374772306652e-15, 2.164559515653694e-15, -1.901991507536473e-15, 1.671273840510759e-15, -1.468542966100438e-15, 1.290403996644843e-15, -1.133873855239388e-15, 9.963313217707399e-16, -8.754731385280971e-16, 7.692754403444974e-16, -6.759598633855865e-16, 5.939637650509565e-16, -5.219140562969427e-16, 4.586042081825544e-16, -4.029740475950767e-16, 3.540920038189227e-16, -3.111395086526072e-16, 2.733972888415509e-16, -2.402333212827215e-16, 2.110922493015534e-16, -1.854860827684001e-16, 1.629860263206755e-16, -1.432152988478505e-16, 1.258428239959391e-16, -1.105776860340182e-16, 9.716425824191095e-17, -8.537792224005708e-17, 7.502130657839168e-17, -6.592098159645411e-17, 5.792455520756561e-17, -5.089812097369260e-17, 4.472401573699795e-17, -3.929884925786503e-17, 3.453177286415005e-17, -3.034295811884372e-17, 2.666226003023219e-17, -2.342804241895063e-17, 2.058614577177349e-17, -1.808898029804633e-17, 1.589472900128195e-17, -1.396664742072978e-17, 1.227244831653922e-17, -1.078376099458355e-17, 9.475656216910048e-18, -8.326228742065685e-18, 7.316230504610306e-18, -6.428748291129759e-18, 5.648920515191333e-18, -4.963688348418389e-18, 4.361577040171507e-18, -3.832503763852548e-18, 3.367608772061774e-18, -2.959107033168789e-18, 2.600157864838119e-18, -2.284750381424515e-18, 2.007602836968745e-18, -1.764074178215185e-18, 1.550086326535024e-18, -1.362055887301009e-18, 1.196834143131819e-18, -1.051654326148005e-18, 9.240852862763527e-19, -8.119907797435101e-19, 7.134936960083838e-19, -6.269446240781204e-19, 5.508942318228495e-19, -4.840689957627215e-19, 4.253498749090647e-19, -3.737535715383304e-19, 3.284160650943604e-19, -2.885781434802982e-19, 2.535726894517719e-19, -2.228135092144265e-19, 1.957855161528666e-19, -1.720361053077579e-19, 1.511675741544441e-19, -1.328304627571508e-19, 1.167177017717951e-19, -1.025594703000911e-19, 9.011867747602604e-20, -7.918699208456320e-20, 6.958135363559505e-20, -6.114090626414241e-20, 5.372430364847189e-20, -4.720733874362162e-20, 4.148085614846149e-20, -3.644890635898519e-20, 3.202709755606534e-20, -2.814108611035396e-20, 2.472510802483146e-20, -2.172035832750181e-20, 1.907280017594962e-20, -7.276969157651721e-21])
"""Test parsing imports from a Python file.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest from pazel.parse_imports import get_imports class TestParseImports(unittest.TestCase): """Test helper functions.""" def test_get_imports(self): """Test parse_enclosed_expression.""" script_source = """ import ast from ast import parse from foo import bar as abc from asd import \ wasd """ packages, from_imports = get_imports(script_source, ".", ".") expected_packages = [("ast", None)] expected_from_imports = [("ast", "parse"), ("foo", "bar"), ("asd", "wasd")] self.assertEqual(packages, expected_packages) self.assertEqual(from_imports, expected_from_imports) if __name__ == "__main__": unittest.main()
##### # This script creates the data for the facet grid to be created in R ##### import numpy as np import sys import os p1 = sys.argv[1] # phage environment p2 = sys.argv[2] # bacterial environment base_folder = 'alt_run/' file_name = 'facet_plot/facet_data.txt' if not os.path.exists(file_name): with open(file_name,'w') as f: f.write('p1\tp2\tMoI\tP(lyso)\n') moi_range = 100 opt_plyso = [0.0 for x in xrange(moi_range)] for x in xrange(moi_range): # print x fname = base_folder+'/p1_' + str(p1) + ',p2_' + str(p2) + '/moi_' + str(0.01*(x+1)) + '.csv' with open(fname) as f: diff = 1000000 for lines in f: plyso = float(lines.rstrip().split(',')[0]) final_moi = float(lines.rstrip().split(',')[1]) # print plyso # print final_moi if np.abs(1- final_moi) < diff: opt_plyso[x] = plyso # print "new opt", plyso diff = np.abs(1 - final_moi) with open(file_name,'a') as f: f.write('p1='+str(p1)+'\tp2='+str(p2)+'\t'+str(0.01*(x+1))+'\t'+str(opt_plyso[x])+'\n')
""" Functional Principal Component Analysis ======================================= Explores the two possible ways to do functional principal component analysis. """ # Author: Yujian Hong # License: MIT import skfda from skfda.datasets import fetch_growth from skfda.exploratory.visualization import plot_fpca_perturbation_graphs from skfda.preprocessing.dim_reduction.projection import FPCA from skfda.representation.basis import BSpline, Fourier, Monomial import matplotlib.pyplot as plt import numpy as np ############################################################################## # In this example we are going to use functional principal component analysis to # explore datasets and obtain conclusions about said dataset using this # technique. # # First we are going to fetch the Berkeley Growth Study data. This dataset # correspond to the height of several boys and girls measured from birth to # when they are 18 years old. The number and time of the measurements are the # same for each individual. To better understand the data we plot it. dataset = skfda.datasets.fetch_growth() fd = dataset['data'] y = dataset['target'] fd.plot() ############################################################################## # FPCA can be done in two ways. The first way is to operate directly with the # raw data. We call it discretized FPCA as the functional data in this case # consists in finite values dispersed over points in a domain range. # We initialize and setup the FPCADiscretized object and run the fit method to # obtain the first two components. By default, if we do not specify the number # of components, it's 3. Other parameters are weights and centering. For more # information please visit the documentation. fpca_discretized = FPCA(n_components=2) fpca_discretized.fit(fd) fpca_discretized.components_.plot() ############################################################################## # In the second case, the data is first converted to use a basis representation # and the FPCA is done with the basis representation of the original data. # We obtain the same dataset again and transform the data to a basis # representation. This is because the FPCA module modifies the original data. # We also plot the data for better visual representation. dataset = fetch_growth() fd = dataset['data'] basis = skfda.representation.basis.BSpline(n_basis=7) basis_fd = fd.to_basis(basis) basis_fd.plot() ############################################################################## # We initialize the FPCABasis object and run the fit function to obtain the # first 2 principal components. By default the principal components are # expressed in the same basis as the data. We can see that the obtained result # is similar to the discretized case. fpca = FPCA(n_components=2) fpca.fit(basis_fd) fpca.components_.plot() ############################################################################## # To better illustrate the effects of the obtained two principal components, # we add and subtract a multiple of the components to the mean function. # We can then observe now that this principal component represents the # variation in the mean growth between the children. # The second component is more interesting. The most appropriate explanation is # that it represents the differences between girls and boys. Girls tend to grow # faster at an early age and boys tend to start puberty later, therefore, their # growth is more significant later. Girls also stop growing early plot_fpca_perturbation_graphs(basis_fd.mean(), fpca.components_, 30, fig=plt.figure(figsize=(6, 2 * 4))) ############################################################################## # We can also specify another basis for the principal components as argument # when creating the FPCABasis object. For example, if we use the Fourier basis # for the obtained principal components we can see that the components are # periodic. This example is only to illustrate the effect. In this dataset, as # the functions are not periodic it does not make sense to use the Fourier # basis dataset = fetch_growth() fd = dataset['data'] basis_fd = fd.to_basis(BSpline(n_basis=7)) fpca = FPCA(n_components=2, components_basis=Fourier(n_basis=7)) fpca.fit(basis_fd) fpca.components_.plot() ############################################################################## # We can observe that if we switch to the Monomial basis, we also lose the # key features of the first principal components because it distorts the # principal components, adding extra maximums and minimums. Therefore, in this # case the best option is to use the BSpline basis as the basis for the # principal components dataset = fetch_growth() fd = dataset['data'] basis_fd = fd.to_basis(BSpline(n_basis=7)) fpca = FPCA(n_components=2, components_basis=Monomial(n_basis=4)) fpca.fit(basis_fd) fpca.components_.plot()
token = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" respect_admin = False logchannel = 000000000000000000 reportchannel = 000000000000000000
# Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry.core.platform.profiler import iprofiler_profiler from telemetry.core.platform.profiler import java_heap_profiler from telemetry.core.platform.profiler import perf_profiler from telemetry.core.platform.profiler import sample_profiler from telemetry.core.platform.profiler import tcmalloc_heap_profiler _PROFILERS = [ iprofiler_profiler.IprofilerProfiler, java_heap_profiler.JavaHeapProfiler, perf_profiler.PerfProfiler, sample_profiler.SampleProfiler, tcmalloc_heap_profiler.TCMallocHeapProfiler, ] def FindProfiler(name): for profiler in _PROFILERS: if profiler.name() == name: return profiler return None def GetAllAvailableProfilers(): return [p.name() for p in _PROFILERS]
#-*-encoding:utf8-*- from django.shortcuts import render from django.http import HttpResponseRedirect from django.template import loader from django.http import HttpResponse from .forms import Permiso import datetime import locale # Create your views here. formato_local = "%x " locale.setlocale(locale.LC_ALL, "es_GT.utf8") def procesar_permiso(request): if request.method == 'POST': form = Permiso(request.POST) if form.is_valid(): fichero = open('permiso/machote/datos.tex','w') registro = form.cleaned_data['registro_personal'] fichero.write('\\def \\registro : { ' + str(registro) + ' }\n' ) nombre = form.cleaned_data['nombre_profesional'] fichero.write('\\def \\nombre : { ' + str(nombre) + ' }\n' ) nombre_evento = form.cleaned_data['nombre_evento'] fichero.write('\\def \\evento : { ' + str(nombre_evento) + ' }\n' ) fecha = form.cleaned_data['fecha_evento'] fichero.write('\\def \\dias : { ' + datetime.datetime.strftime(fecha,"%d %B de %Y") + ' }\n' ) lugar = form.cleaned_data['lugar_evento'] fichero.write('\\def \\lugar : { ' + str(lugar) + ' }\n' ) template = loader.get_template('base.html') context ={} return HttpResponse(template.render(request)) else: form = Permiso() return render(request, 'permiso/permiso.html', {'form':form} )
import numpy as np import xarray as xr ######################### # Density layer version # ######################### # for later: handle the intervals just like groupby. For now workaround # Does not handle decreasing values, this is probably related to the above def _groupby_vert(data, group_data, bins): # Replicates the behaviour of xarrays `groupby_bins` along one dimension # with numpy axis = -1 # xr.apply_ufunc transposes core dims to the end layers = [] for b in range(len(bins) - 1): bb = [bins[b], bins[b + 1]] # This should be customizable like in groupby mask = np.logical_and(bb[0] < group_data, bb[1] >= group_data) data_masked = data.copy() data_masked[~mask] = np.nan nanmask = np.all(~mask, axis=axis) # There were some problems with passing the function as func=... # kwarg. So for now I will hardcode the solution # layer = func(data_masked, axis=axis) layer = np.nansum(data_masked, axis=axis) # there might be an exeption when this is run on a 1d vector... # but for now this works... # I formerly did this with ma.masked_array, # but somehow that did not work with apply_ufunc # special treatment for 1d input arrays if not isinstance(layer, np.ndarray): layer = np.array(layer) layer[nanmask] = np.nan layers.append(layer) return np.stack(layers, axis=-1) def xr_1d_groupby(data, group_data, bins, dim): """Short summary. Parameters ---------- data : type Description of parameter `data`. group_data : type Description of parameter `group_data`. bins : type Description of parameter `bins`. dim : type Description of parameter `dim`. func : type Description of parameter `func` (the default is np.nansum). Returns ------- xr.DataArray Remapped data """ bin_name = group_data.name bin_dim = "%s_layer" % bin_name bin_center = (bins[:-1] + bins[1:]) / 2 name = group_data.name if name is None: raise ValueError("`group_data` array must have name") remapped = xr.apply_ufunc( _groupby_vert, data, group_data, bins, input_core_dims=[[dim], [dim], ["bins"]], output_core_dims=[[bin_dim]], dask="parallelized", output_dtypes=[data.dtype], output_sizes={bin_dim: len(bins) - 1}, ) remapped.coords[bin_dim] = bin_center remapped.coords[bin_dim + "_lower"] = (bin_dim, bins[:-1]) remapped.coords[bin_dim + "_upper"] = (bin_dim, bins[1:]) # remapped.coords[bin_dim+'_intervals'] = (['']list(zip(,bins[1:])) return remapped ############### # Needs tests # ############### def xr_remapping( da_data, da_group, bins, dim, distance_coord, content_var=False, return_average=True ): """Performs conservative remapping into another tracer coordinate system. Parameters ---------- da_data : xr.DataArray Data array to be remapped. da_group : xr.DataArray Data array of values to remap onto (e.g. potential density). bins : array-like Spacing for new coordinates, in units of `da_group`. `da_data` values are binned between values of `bins`. dim : str Dimension along remapping is performed (e.g. depth). distance_coord : str Name of coordinate in `da_data` which contains distances along `dim`. Required to acurately weight data points. content_var : bool Option for preweighted values. If True, `distance_coord` will not be multiplied with `da_data` before binning (the default is False). return_average : bool Option to return layer averages (True) or layer integrals (False). Returns ------- xr.DataArray Remapped data with additional coordinates; `{da_group.name}_layer_up/down`(upper/lower bound of remapped layer) `{da_group.name}_layer_{distance_coord.name}` (thickness of remapped layer) `{da_group.name}_layer_{dim}` (mean position of layer along `dim`, e.g. mean depth of isopycnal layer) """ da_data = da_data.copy() da_group = da_group.copy() if not (set(da_data.dims) == set(da_group.dims)): raise ValueError( "`da_data` and `da_group` do not have identical dims. \ Please interpolate broadcast appropriately before remapping" ) da_thick = da_data.coords[distance_coord].copy() da_dim = da_data.coords[dim].copy() # make sure that the thickness data is not counted # anywhere else but where there is data thick_name = da_thick.name # seems to be overwritten by the line below da_thick = da_thick * ((da_data * 0) + 1) # Same for the layer position da_dim = da_dim * ((da_data * 0) + 1) # Weight da_dim and da_data (only for content_var=False) with da_thick da_dim = da_dim * da_thick if not content_var: da_data = da_data * da_thick data_remapped = xr_1d_groupby(da_data, da_group, bins, dim) thickness = xr_1d_groupby(da_thick, da_group, bins, dim) layer_pos = xr_1d_groupby(da_dim, da_group, bins, dim) if return_average: data_remapped = data_remapped / thickness data_remapped.coords["%s_layer_%s" % (da_group.name, thick_name)] = thickness # calculate the mean depth of the layer data_remapped.coords["%s_layer_%s" % (da_group.name, dim)] = layer_pos / thickness data_remapped.name = da_data.name return data_remapped
import click from ..utils import Spotify from ..utils.exceptions import AuthScopeError @click.command(options_metavar='[<options>]') @click.option( '--track', 'browse_type', flag_value='track', default=True, help='(default) Open the current track in your browser.' ) @click.option( '--album', 'browse_type', flag_value='album', help='Open the current album in your browser.' ) @click.option( '--artist', 'browse_type', flag_value='artist', help='Open the current artist in your browser.' ) @click.option( '--playlist', 'browse_type', flag_value='playlist', help='Open the current playlist in your browser.' ) @click.option( '-q', '--quiet', is_flag=True, help='Suppress output.' ) def browse(browse_type, verbose=0, quiet=False): """Open the current track, album, artist, or playlist in the browser. Specify one of the above options to change what to browse (default: track). """ import webbrowser from cli.commands.status import status playback_data = status.callback(_return_parsed=True) music = playback_data['music'] # parse command and playback context if browse_type in ['track', 'album', 'artist']: url = music[browse_type]['url'] name = music[browse_type]['name'] if browse_type != 'artist': name = '"{}" by {}'.format(name, music['artist']['name']) elif browse_type == 'playlist': # playlist and radio are both type 'playlist' if music['context']['type'] != 'playlist': click.echo('Error: Current session is not a playlist.', err=True) return url = music['context']['url'] id_str = music['context']['id'] name = Spotify.request('playlists/' + id_str)['name'] if not quiet: click.echo( '{} - {}\n' '{}' .format(browse_type.title(), name, url) ) webbrowser.open(url) return
""" Check SFP status using sfpshow. This script covers test case 'Check SFP status and configure SFP' in the SONiC platform test plan: https://github.com/Azure/SONiC/blob/master/doc/pmon/sonic_platform_test_plan.md """ import logging import pytest from util import parse_eeprom from util import parse_output from util import get_dev_conn cmd_sfp_presence = "sudo sfpshow presence" cmd_sfp_eeprom = "sudo sfpshow eeprom" pytestmark = [ pytest.mark.disable_loganalyzer, # disable automatic loganalyzer pytest.mark.topology('any') ] def test_check_sfp_presence(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts, xcvr_skip_list): """ @summary: Check SFP presence using 'sfputil show presence' """ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] global ans_host ans_host = duthost portmap, dev_conn = get_dev_conn(duthost, conn_graph_facts, enum_frontend_asic_index) logging.info("Check output of '{}'".format(cmd_sfp_presence)) sfp_presence = duthost.command(cmd_sfp_presence) parsed_presence = parse_output(sfp_presence["stdout_lines"][2:]) for intf in dev_conn: if intf not in xcvr_skip_list[duthost.hostname]: assert intf in parsed_presence, "Interface is not in output of '{}'".format(cmd_sfp_presence) assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'" def test_check_sfpshow_eeprom(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts, xcvr_skip_list): """ @summary: Check SFP presence using 'sfputil show presence' """ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] global ans_host ans_host = duthost portmap, dev_conn = get_dev_conn(duthost, conn_graph_facts, enum_frontend_asic_index) logging.info("Check output of '{}'".format(cmd_sfp_eeprom)) sfp_eeprom = duthost.command(cmd_sfp_eeprom) parsed_eeprom = parse_eeprom(sfp_eeprom["stdout_lines"]) for intf in dev_conn: if intf not in xcvr_skip_list[duthost.hostname]: assert intf in parsed_eeprom, "Interface is not in output of 'sfputil show eeprom'" assert parsed_eeprom[intf] == "SFP EEPROM detected"
#!/usr/bin/env python """x""" if __name__ == "__main__": from spin import spin from bottle import run, default_app run(host="localhost", port=8080, debug=True)
# Copyright 2011-2014 Splunk, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"): you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import from . search_command_internals import ConfigurationSettingsType from . streaming_command import StreamingCommand from . search_command import SearchCommand from . import csv class ReportingCommand(SearchCommand): """ Processes search results and generates a reporting data structure. Reporting search commands run as either reduce or map/reduce operations. The reduce part runs on a search head and is responsible for processing a single chunk of search results to produce the command's reporting data structure. The map part is called a streaming preop. It feeds the reduce part with partial results and by default runs on the search head and/or one or more indexers. You must implement a :meth:`reduce` method as a generator function that iterates over a set of event records and yields a reporting data structure. You may implement a :meth:`map` method as a generator function that iterates over a set of event records and yields :class:`dict` or :class:`list(dict)` instances. **ReportingCommand configuration** Configure the :meth:`map` operation using a Configuration decorator on your :meth:`map` method. Configure it like you would a :class:`StreamingCommand`. Configure the :meth:`reduce` operation using a Configuration decorator on your :meth:`ReportingCommand` class. :ivar input_header: :class:`InputHeader`: Collection representing the input header associated with this command invocation. :ivar messages: :class:`MessagesHeader`: Collection representing the output messages header associated with this command invocation. """ #region Methods def map(self, records): """ Override this method to compute partial results. You must override this method, if :code:`requires_preop=True`. """ self # Turns off ide guidance that method may be static return NotImplemented def reduce(self, records): """ Override this method to produce a reporting data structure. You must override this method. """ raise NotImplementedError('reduce(self, records)') def _execute(self, operation, reader, writer): for record in operation(SearchCommand.records(reader)): writer.writerow(record) return def _prepare(self, argv, input_file): if len(argv) >= 3 and argv[2] == '__map__': ConfigurationSettings = type(self).map.ConfigurationSettings operation = self.map argv = argv[3:] else: ConfigurationSettings = type(self).ConfigurationSettings operation = self.reduce argv = argv[2:] if input_file is None: reader = None else: reader = csv.DictReader(input_file) return ConfigurationSettings, operation, argv, reader #endregion #region Types class ConfigurationSettings(SearchCommand.ConfigurationSettings): """ Represents the configuration settings for a :code:`ReportingCommand`. """ #region Properties @property def clear_required_fields(self): """ Specifies whether `required_fields` are the only fields required by subsequent commands. If :const:`True`, :attr:`required_fields` are the *only* fields required by subsequent commands. If :const:`False`, :attr:`required_fields` are additive to any fields that may be required by subsequent commands. In most cases :const:`False` is appropriate for streaming commands and :const:`True` is appropriate for reporting commands. Default: :const:`True` """ return type(self)._clear_required_fields _clear_required_fields = True @property def requires_preop(self): """ Indicates whether :meth:`ReportingCommand.map` is required for proper command execution. If :const:`True`, :meth:`ReportingCommand.map` is guaranteed to be called. If :const:`False`, Splunk considers it to be an optimization that may be skipped. Default: :const:`False` """ return type(self)._requires_preop _requires_preop = False @property def retainsevents(self): """ Signals that :meth:`ReportingCommand.reduce` transforms _raw events to produce a reporting data structure. Fixed: :const:`False` """ return False @property def streaming(self): """ Signals that :meth:`ReportingCommand.reduce` runs on the search head. Fixed: :const:`False` """ return False @property def streaming_preop(self): """ Denotes the requested streaming preop search string. Computed. """ command = type(self.command) if command.map == ReportingCommand.map: return "" command_line = str(self.command) command_name = type(self.command).name text = ' '.join([ command_name, '__map__', command_line[len(command_name) + 1:]]) return text #endregion #region Methods @classmethod def fix_up(cls, command): """ Verifies :code:`command` class structure and configures the :code:`command.map` method. Verifies that :code:`command` derives from :code:`ReportingCommand` and overrides :code:`ReportingCommand.reduce`. It then configures :code:`command.reduce`, if an overriding implementation of :code:`ReportingCommand.reduce` has been provided. :param command: :code:`ReportingCommand` class Exceptions: :code:`TypeError` :code:`command` class is not derived from :code:`ReportingCommand` :code:`AttributeError` No :code:`ReportingCommand.reduce` override """ if not issubclass(command, ReportingCommand): raise TypeError('%s is not a ReportingCommand' % command) if command.reduce == ReportingCommand.reduce: raise AttributeError('No ReportingCommand.reduce override') if command.map == ReportingCommand.map: cls._requires_preop = False return f = vars(command)['map'] # Function backing the map method # There is no way to add custom attributes to methods. See # [Why does setattr fail on a method](http://goo.gl/aiOsqh) # for an explanation. try: settings = f._settings except AttributeError: f.ConfigurationSettings = StreamingCommand.ConfigurationSettings return # Create new `StreamingCommand.ConfigurationSettings` class module = '.'.join([command.__module__, command.__name__, 'map']) name = 'ConfigurationSettings' bases = (StreamingCommand.ConfigurationSettings,) f.ConfigurationSettings = ConfigurationSettingsType( module, name, bases, settings) del f._settings return #endregion #endregion
from redis import Redis from rq import Queue import tasks import time c = Redis(host=tasks.REDIS_HOST) q = Queue(connection=c) t0 = time.time() jobs = [] for i in range(32): jobs.append(q.enqueue(tasks.newkeys, 1024)) while any(not job.is_finished for job in jobs): time.sleep(0.1) t1 = time.time() print(t1 - t0)
from string import ascii_uppercase from string import digits import random class Robot: previous_names = [] def __init__(self): self.name = None self.reset() def reset(self): robot_name = self.__make_name() while robot_name in Robot.previous_names: robot_name = self.__make_name() Robot.previous_names.append(robot_name) self.name = robot_name @staticmethod def __make_name(): name_as_list = random.choices(ascii_uppercase, k=2) + random.choices(digits, k=3) return "".join(name_as_list)
from typing import List from libs.configuration import Configuration from libs.device import Device from libs.group import Group from libs.script import Script class ScriptMenu: def __init__( self, config: Configuration, devices: List[Device], groups: List[Group], scripts: List[Script], ) -> None: self.config = config self.devices = devices self.groups = groups self.scripts = scripts self.commands = { 'quit': ['q', 'quit', 'exit'], 'help': ['h', 'help'], 'list scripts': ['l'], 'create script': ['c'], 'run script': ['r'], 'edit script': ['e'], } def run(self) -> bool: command_line = input('scr: ') if len(command_line) == 0: return True command = command_line.lower().split()[0] if command in self.commands['quit']: return False elif command in self.commands['help']: for c in self.commands: print(f'{c}: {self.commands[c]}') elif command in self.commands['list scripts']: if len(self.scripts) > 0: for script in self.scripts: print(f'name : {script.name}') print(f'devices: {script.devices}') print(f'groups : {script.groups}') print(f'scripts: {script.scripts}') print(f'actions: {script.actions}') print('-'*20) else: print('No scripts defined.') elif command in self.commands['create script']: ... elif command in self.commands['run script']: ... elif command in self.commands['edit script']: ... else: print('Unknown command') print('') return True
# Copyright 2019 Zuru Tech HK Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test CounterCallback.""" import pytest from ashpy.callbacks import CounterCallback, Event from ashpy.models.gans import ConvDiscriminator, ConvGenerator from tests.utils.fake_training_loop import FakeAdversarialTraining class FakeCounterCallback(CounterCallback): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fake_counter = 0 def on_event(self, event, context): if event == self._event: self.fake_counter += 1 super().on_event(event, context) @pytest.fixture() def _models(): image_resolution = (28, 28) layer_spec_input_res = (7, 7) layer_spec_target_res = (7, 7) kernel_size = 5 channels = 1 # Model definition generator = ConvGenerator( layer_spec_input_res=layer_spec_input_res, layer_spec_target_res=image_resolution, kernel_size=kernel_size, initial_filters=32, filters_cap=16, channels=channels, ) discriminator = ConvDiscriminator( layer_spec_input_res=image_resolution, layer_spec_target_res=layer_spec_target_res, kernel_size=kernel_size, initial_filters=16, filters_cap=32, output_shape=1, ) return generator, discriminator def test_counter_callback_multiple_events(): """Counter Callback should not receive multiple events.""" with pytest.raises(TypeError): FakeCounterCallback( event=[Event.ON_EPOCH_END], name="TestCounterCallbackMultipleEvents", fn=lambda context: print("Bloop"), ) # TODO: parametrize tests following test_save_callback.py def test_counter_callback(_models, tmpdir): clbk = FakeCounterCallback( event=Event.ON_EPOCH_END, name="TestCounterCallback", fn=lambda context: print("Bloop"), ) callbacks = [clbk] generator, discriminator = _models FakeAdversarialTraining( logdir=tmpdir, callbacks=callbacks, generator=generator, discriminator=discriminator, epochs=1, )() assert clbk.fake_counter == 1
# File: generators.py # Description: Examples on how to create and use generators in Python # Environment: PyCharm and Anaconda environment # # MIT License # Copyright (c) 2018 Valentyn N Sichkar # github.com/sichkar-valentyn # # Reference to: # [1] Valentyn N Sichkar. Examples on how to create and use generators in Python // GitHub platform [Electronic resource]. URL: https://github.com/sichkar-valentyn/Generators_in_Python (date of access: XX.XX.XXXX) from random import random # Creating the class for iterations class RandomIterator: # We add the method __iter__ if we want to iterate this class def __iter__(self): return self def __init__(self, k): self.k = k self.i = 0 def __next__(self): if self.i < self.k: self.i += 1 return random() else: raise StopIteration # Creating function as generator # Instead of return we use yield # In this way we remember the order till yield each time we call the function def random_generator(k): for i in range(k): yield random() # Creating instance of class gen = random_generator(3) print(type(gen)) for i in gen: print(i) # More clear example about remembering the yield the order def simple_gen(): print('Checkpoint 1') yield 1 print('Checkpoint 2') #return 'No more elements' yield 2 print('Checkpoint 3') g = simple_gen() x = next(g) print(x) y = next(g) print(y) z = next(g) # Implementing the task - creating the methods for prime numbers def is_prime(n): if n == 2: return True if n % 2 == 0: return False for i in range(3, n // 2, 2): if n % i == 0: return False return True def primes(): n = 2 while True: if is_prime(n): yield n n += 1
#Задачи на циклы и оператор условия------ #---------------------------------------- ''' Задача 1 Вывести на экран циклом пять строк из нулей, причем каждая строка должна быть пронумерована. ''' for i in range(1,6): print(i,' 0') ''' Задача 2 Пользователь в цикле вводит 10 цифр. Найти количество введеных пользователем цифр 5. ''' s = 0 for i in range(10): a = int(input('Введите число ')) if a == 5: s += 1 print('Количество введенных цифр 5 = ',s) ''' Задача 3 Найти сумму ряда чисел от 1 до 100. Полученный результат вывести на экран. ''' abc = 0 for i in range(1,101): abc += i print('Сумма чисел от 1 до 100 = ',abc) ''' Задача 4 Найти произведение ряда чисел от 1 до 10. Полученный результат вывести на экран. ''' p = 1 for i in range(1,11): p *= i print('Произведение числе от 1 до 10 = ',p) ''' Задача 5 Вывести цифры числа на каждой строчке. ''' a = int(input('Введите целое число ')) while a != 0: print (a % 10) a = a // 10 ''' Задача 6 Найти сумму цифр числа. ''' a = int(input('Введите целое число ')) sum = 0 while a != 0: sum += (a % 10) a = a // 10 print('Сумма цифр числа = ',sum) ''' Задача 7 Найти произведение цифр числа. ''' a = int(input('Введите целое число ')) pr = 1 while a != 0: pr *= (a % 10) a = a // 10 print('Произведение цифр числа = ',pr) ''' Задача 8 Дать ответ на вопрос: есть ли среди цифр числа 5? ''' a = int(input('Введите целое число ')) b = 0 while a > 0: if a % 10 == 5: print('Есть 5') break a = a // 10 else: print('Нет пятерок') ''' Задача 9 Найти максимальную цифру в числе ''' a = int(input('Введите целое число ')) max = 0 while a != 0: if max < a % 10: max = a % 10 a = a // 10 print('Максимальная цифра в числе - ',max) ''' Задача 10 Найти количество цифр 5 в числе ''' a = int(input('Введите целое число ')) b = 0 while a > 0: if a % 10 == 5: b += 1 a = a // 10 print('Количество цифр 5 в числе = ',b)
#Python学习的基本功能库 EXERCISE_VERSION=1.0 print('-'*50) print('初始化Excrcise模块 ver:',EXERCISE_VERSION) print('-'*50)
import socket def getMAC(interface='eth0'): try: str = open('/sys/class/net/%s/address' % interface).read() except: str = "00:00:00:00:00:00" return str[0:17] def getserial(): # Extract serial from cpuinfo file cpu_serial = "0000000000000000" try: f = open('/proc/cpuinfo', 'r') for line in f: if line[0:6] == 'Serial': cpu_serial = line[10:26] f.close() except: cpu_serial = "ERROR000000000" return cpu_serial def getRevision(): # Extract board revision from cpuinfo file myrevision = "0000" try: f = open('/proc/cpuinfo', 'r') for line in f: if line[0:8] == 'Revision': length = len(line) myrevision = line[11:length - 1] f.close() except: myrevision = "0000" return myrevision def getHardware(): # Extract board revision from cpuinfo file myrevision = "0000" try: f = open('/proc/cpuinfo', 'r') for line in f: if line[0:8] == 'Hardware': length = len(line) myrevision = line[11:length - 1] f.close() except: myrevision = "0000" return myrevision def getModel(): # Extract board revision from cpuinfo file myrevision = "0000" try: f = open('/proc/cpuinfo', 'r') for line in f: if line[0:5] == 'Model': length = len(line) myrevision = line[9:length - 1] f.close() except: myrevision = "0000" return myrevision def get_ip_address(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) ip_address = s.getsockname()[0] s.close() return ip_address
"""Tests for marker sizing.""" from pathlib import Path from typing import Dict, Tuple from unittest import mock from pytest import approx from sb_vision import FileCamera, Vision from sb_vision.camera_base import CameraBase TEST_DATA = Path(__file__).parent / 'test_data' # The expected error for the distances of markers should be within tolerances EXPECTED_LARGE_DISTANCE = 2.5 EXPECTED_SMALL_DISTANCE = 1 EXPECTED_TOLERANCE = 0.15 def assertMarkerDistance( camera: CameraBase, *, marker_sizes: Dict[int, Tuple[float, float]], expected_distance: float ) -> None: """Assert that the processed distance is as expected for a marker size.""" vision = Vision(camera) with mock.patch('sb_vision.tokens.MARKER_SIZES', marker_sizes): token, = vision.snapshot() dist = token.spherical.dist assert dist == approx(expected_distance, rel=EXPECTED_TOLERANCE) def test_unknown_marker_size(): """Test an unknown marker size defaults to the trained size.""" # The c270 model is trained on 25cm markers; so it assume that all markers # are that size unless told otherwise. assertMarkerDistance( FileCamera(TEST_DATA / 'tecknet-10cm-at-1m.jpg', camera_model='C016'), marker_sizes={}, expected_distance=EXPECTED_LARGE_DISTANCE, ) def test_large_marker_large_size(): """Test a marker matching the trained size has the right distance.""" assertMarkerDistance( FileCamera(TEST_DATA / 'tecknet-25cm-at-2.5m.jpg', camera_model='C016'), marker_sizes={23: (0.25, 0.25)}, expected_distance=EXPECTED_LARGE_DISTANCE, ) def test_large_marker_small_size(): """ Test image with large marker gives small distance when configured for a small marker. """ assertMarkerDistance( FileCamera(TEST_DATA / 'tecknet-25cm-at-2.5m.jpg', camera_model='C016'), marker_sizes={23: (0.1, 0.1)}, expected_distance=EXPECTED_SMALL_DISTANCE, ) def test_small_marker_large_size(): """ Test image with small marker gives large distance when configured for a large marker. """ assertMarkerDistance( FileCamera(TEST_DATA / 'tecknet-10cm-at-1m.jpg', camera_model='C016'), marker_sizes={44: (0.25, 0.25)}, expected_distance=EXPECTED_LARGE_DISTANCE, ) def test_small_marker_small_size(): """ Test image with small marker gives small distance when configured for a small marker. """ assertMarkerDistance( FileCamera(TEST_DATA / 'tecknet-10cm-at-1m.jpg', camera_model='C016'), marker_sizes={44: (0.1, 0.1)}, expected_distance=EXPECTED_SMALL_DISTANCE, )
import getDataAA import numpy as np import os def run(): print('Scanner de millas AA iniciado...') data = getDataAA.get() pMiles = np.array(data.millas[0]) # clase promocional tMiles = np.array(data.millas[0]) # clase turista bMiles = np.array(data.millas[0]) # clase busines pMiles[pMiles == 'N/A'] = '-1' tMiles[tMiles == 'N/A'] = '-1' bMiles[bMiles == 'N/A'] = '-1' pMiles = pMiles.astype(np.float) tMiles = tMiles.astype(np.float) bMiles = bMiles.astype(np.float) # chequeo si existen las tablas anteriores. Sino uso las actuales y las guardo. if os.path.isfile('pMiles_old.npy'): pMiles_old = np.load('pMiles_old.npy') else: pMiles_old = pMiles np.save('pMiles_old.npy', pMiles) if os.path.isfile('tMiles_old.npy'): tMiles_old = np.load('tMiles_old.npy') else: tMiles_old = tMiles np.save('tMiles_old.npy', tMiles) if os.path.isfile('bMiles_old.npy'): bMiles_old = np.load('bMiles_old.npy') else: bMiles_old = bMiles np.save('bMiles_old.npy', bMiles) if bool((pMiles-pMiles_old).any()) or bool((tMiles-tMiles_old).any()) or bool((bMiles-bMiles_old).any()): print('\n¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡¡') print('Hay cambio en la tabla de Millas.') print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n') print('Reemplazar tabla anterior de millas? ') val = input('(S/N): ') if val == 'S' or val == 's': np.save('pMiles_old.npy', pMiles) np.save('tMiles_old.npy', tMiles) np.save('bMiles_old.npy', bMiles) else: print('No cambio en la tabla de Millas.') val = 'N' #print('Escanear nuevamente?') #val = input('(S/N): ') return val
'''Thread-safe version of Tkinter. Copyright (c) 2009, Allen B. Taylor This module is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser Public License for more details. You should have received a copy of the GNU Lesser Public License along with this program. If not, see <http://www.gnu.org/licenses/>. Usage: import mtTkinter as Tkinter # Use "Tkinter." as usual. or from mtTkinter import * # Use Tkinter module definitions as usual. This module modifies the original Tkinter module in memory, making all functionality thread-safe. It does this by wrapping the Tk class' tk instance with an object that diverts calls through an event queue when the call is issued from a thread other than the thread in which the Tk instance was created. The events are processed in the creation thread via an 'after' event. The modified Tk class accepts two additional keyword parameters on its __init__ method: mtDebug: 0 = No debug output (default) 1 = Minimal debug output ... 9 = Full debug output mtCheckPeriod: Amount of time in milliseconds (default 100) between checks for out-of-thread events when things are otherwise idle. Decreasing this value can improve GUI responsiveness, but at the expense of consuming more CPU cycles. Note that, because it modifies the original Tkinter module (in memory), other modules that use Tkinter (e.g., Pmw) reap the benefits automagically as long as mtTkinter is imported at some point before extra threads are created. Author: Allen B. Taylor, a.b.taylor@gmail.com ''' # from Tkinter import * from tkinter import * import threading import queue class _Tk(object): """ Wrapper for underlying attribute tk of class Tk. """ def __init__(self, tk, mtDebug = 0, mtCheckPeriod = 10): self._tk = tk # Create the incoming event queue. self._eventQueue = queue.Queue(1) # Identify the thread from which this object is being created so we can # tell later whether an event is coming from another thread. self._creationThread = threading.currentThread() # Store remaining values. self._debug = mtDebug self._checkPeriod = mtCheckPeriod def __getattr__(self, name): # Divert attribute accesses to a wrapper around the underlying tk # object. return _TkAttr(self, getattr(self._tk, name)) class _TkAttr(object): """ Thread-safe callable attribute wrapper. """ def __init__(self, tk, attr): self._tk = tk self._attr = attr def __call__(self, *args, **kwargs): """ Thread-safe method invocation. Diverts out-of-thread calls through the event queue. Forwards all other method calls to the underlying tk object directly. """ # Check if we're in the creation thread. if threading.currentThread() == self._tk._creationThread: # We're in the creation thread; just call the event directly. if self._tk._debug >= 8 or \ self._tk._debug >= 3 and self._attr.__name__ == 'call' and \ len(args) >= 1 and args[0] == 'after': print(('Calling event directly:', \ self._attr.__name__, args, kwargs)) return self._attr(*args, **kwargs) else: # We're in a different thread than the creation thread; enqueue # the event, and then wait for the response. responseQueue = queue.Queue(1) if self._tk._debug >= 1: print(('Marshalling event:', self._attr.__name__, args, kwargs)) self._tk._eventQueue.put((self._attr, args, kwargs, responseQueue)) isException, response = responseQueue.get() # Handle the response, whether it's a normal return value or # an exception. if isException: exType, exValue, exTb = response raise exType(exValue).with_traceback(exTb) else: return response # Define a hook for class Tk's __init__ method. def _Tk__init__(self, *args, **kwargs): # We support some new keyword arguments that the original __init__ method # doesn't expect, so separate those out before doing anything else. new_kwnames = ('mtCheckPeriod', 'mtDebug') new_kwargs = {} for name, value in list(kwargs.items()): if name in new_kwnames: new_kwargs[name] = value del kwargs[name] # Call the original __init__ method, creating the internal tk member. self.__original__init__mtTkinter(*args, **kwargs) # Replace the internal tk member with a wrapper that handles calls from # other threads. self.tk = _Tk(self.tk, **new_kwargs) # Set up the first event to check for out-of-thread events. self.after_idle(_CheckEvents, self) # Replace Tk's original __init__ with the hook. Tk.__original__init__mtTkinter = Tk.__init__ Tk.__init__ = _Tk__init__ def _CheckEvents(tk): "Event checker event." used = False try: # Process all enqueued events, then exit. while True: try: # Get an event request from the queue. method, args, kwargs, responseQueue = \ tk.tk._eventQueue.get_nowait() except: # No more events to process. break else: # Call the event with the given arguments, and then return # the result back to the caller via the response queue. used = True if tk.tk._debug >= 2: print(('Calling event from main thread:', \ method.__name__, args, kwargs)) try: responseQueue.put((False, method(*args, **kwargs))) except SystemExit as ex: raise SystemExit(ex) except Exception as ex: # Calling the event caused an exception; return the # exception back to the caller so that it can be raised # in the caller's thread. from sys import exc_info exType, exValue, exTb = exc_info() responseQueue.put((True, (exType, exValue, exTb))) finally: # Schedule to check again. If we just processed an event, check # immediately; if we didn't, check later. if used: tk.after_idle(_CheckEvents, tk) else: tk.after(tk.tk._checkPeriod, _CheckEvents, tk) # Test thread entry point. def _testThread(root): text = "This is Tcl/Tk version %s" % TclVersion if TclVersion >= 8.1: try: text = text + str("\nThis should be a cedilla: \347", "iso-8859-1") except NameError: pass # no unicode support try: if root.globalgetvar('tcl_platform(threaded)'): text = text + "\nTcl is built with thread support" else: raise RuntimeError except: text = text + "\nTcl is NOT built with thread support" text = text + "\nmtTkinter works with or without Tcl thread support" label = Label(root, text=text) label.pack() button = Button(root, text="Click me!", command=lambda root=root: root.button.configure( text="[%s]" % root.button['text'])) button.pack() root.button = button quit = Button(root, text="QUIT", command=root.destroy) quit.pack() # The following three commands are needed so the window pops # up on top on Windows... root.iconify() root.update() root.deiconify() # Simulate button presses... button.invoke() root.after(1000, _pressOk, root, button) # Test button continuous press event. def _pressOk(root, button): button.invoke() try: root.after(1000, _pressOk, root, button) except: pass # Likely we're exiting # Test. Mostly borrowed from the Tkinter module, but the important bits moved # into a separate thread. if __name__ == '__main__': import threading root = Tk(mtDebug = 1) thread = threading.Thread(target = _testThread, args=(root,)) thread.start() root.mainloop() thread.join()
class CheckError(AssertionError): pass
import hashlib import os import pickle import random import yaml import torch import logging import git import numpy as np def set_seed(seed, cudnn=True): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) if (seed is not None) and cudnn: torch.backends.cudnn.deterministic = True def save_obj(obj, name): with open(name + '.pkl', 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def load_obj(name): with open(name + '.pkl', 'rb') as f: return pickle.load(f) def get_path_from_args(args): """ Returns a unique hash for an argparse object. """ args_str = str(args) path = hashlib.md5(args_str.encode()).hexdigest() return path def get_base_path(): p = os.path.dirname(os.path.realpath(__file__)) if os.path.exists(p): return p raise RuntimeError('I dont know where I am; please specify a path for saving results.') def load_config(args, path="."): """Loads and replaces default parameters with experiment specific parameters Args: args (argparse): Python argparse that contains arguments path (str): Root directory to load config from. Default: "." """ with open(path + "/config/" + args.config, 'r') as f: config = yaml.safe_load(f) for key, value in config.items(): args.__dict__[key] = value def set_logger(logger_name, log_file, level=logging.INFO): """Sets python logging Args: logger_name (str): Specifies logging name log_file (str): Specifies path to save logging level (int): Logging when above specified level. Default: logging.INFO """ log = logging.getLogger(logger_name) if not log.handlers: formatter = logging.Formatter('%(asctime)s : %(message)s') fileHandler = logging.FileHandler(log_file, mode='a') fileHandler.setFormatter(formatter) streamHandler = logging.StreamHandler() streamHandler.setFormatter(formatter) log.setLevel(level) log.addHandler(fileHandler) log.addHandler(streamHandler) def set_log(args, path="../"): """Loads and replaces default parameters with experiment specific parameters Args: args (argparse): Python argparse that contains arguments path (str): Root directory to get Git repository. Default: "." Examples: log[args.log_name].info("Hello {}".format("world")) Returns: log (dict): Dictionary that contains python logging """ log = {} set_logger( logger_name=args.log_name, log_file=r'{0}{1}'.format("./log/", args.log_name)) log[args.log_name] = logging.getLogger(args.log_name) for arg, value in sorted(vars(args).items()): log[args.log_name].info("%s: %r", arg, value) repo = git.Repo(path) log[args.log_name].info("Branch: {}".format(repo.active_branch)) log[args.log_name].info("Commit: {}".format(repo.head.commit)) return log
import os import shutil import tempfile from itertools import chain from typing import Text, List, Dict import uuid import requests from fastapi import File from fastapi.background import BackgroundTasks from fastapi.security import OAuth2PasswordBearer from loguru import logger from mongoengine.errors import ValidationError from rasa.shared.constants import DEFAULT_DATA_PATH from rasa.shared.nlu.constants import TEXT from rasa.shared.nlu.training_data import entities_parser from rasa.shared.nlu.training_data.formats.markdown import MarkdownReader from .constant import ALLOWED_NLU_FORMATS, ALLOWED_STORIES_FORMATS, \ ALLOWED_DOMAIN_FORMATS, ALLOWED_CONFIG_FORMATS, EVENT_STATUS, ALLOWED_RULES_FORMATS, ALLOWED_HTTP_ACTIONS_FORMATS, \ REQUIREMENTS from .constant import RESPONSE from .training_data_generation_processor import TrainingDataGenerationProcessor from ...exceptions import AppException from ...shared.models import StoryStepType from ...shared.utils import Utility class DataUtility: """Class contains logic for various utilities""" oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/auth/login") oauth2_scheme_non_strict = OAuth2PasswordBearer(tokenUrl="/api/auth/login", auto_error=False) markdown_reader = MarkdownReader() @staticmethod def prepare_nlu_text(example: Text, entities: List[Dict]): """ combines plain text and entities into training example format :param example: training example plain text :param entities: list of entities :return: trianing example combine with enities """ if not Utility.check_empty_string(example): if entities: from rasa.shared.nlu.training_data.formats.rasa_yaml import RasaYAMLWriter example = RasaYAMLWriter.generate_message({'text': example, "entities": entities}) return example @staticmethod async def save_uploaded_data(bot: Text, training_files: [File]): if not training_files: raise AppException("No files received!") if training_files[0].filename.endswith('.zip'): bot_data_home_dir = await DataUtility.save_training_files_as_zip(bot, training_files[0]) else: bot_data_home_dir = os.path.join('training_data', bot, str(uuid.uuid4())) data_path = os.path.join(bot_data_home_dir, DEFAULT_DATA_PATH) Utility.make_dirs(data_path) for file in training_files: if file.filename in ALLOWED_NLU_FORMATS.union(ALLOWED_STORIES_FORMATS).union(ALLOWED_RULES_FORMATS): path = os.path.join(data_path, file.filename) Utility.write_to_file(path, await file.read()) elif file.filename in ALLOWED_CONFIG_FORMATS.union(ALLOWED_DOMAIN_FORMATS).union( ALLOWED_HTTP_ACTIONS_FORMATS): path = os.path.join(bot_data_home_dir, file.filename) Utility.write_to_file(path, await file.read()) return bot_data_home_dir @staticmethod async def save_training_files_as_zip(bot: Text, training_file: File): tmp_dir = tempfile.mkdtemp() try: zipped_file = os.path.join(tmp_dir, training_file.filename) Utility.write_to_file(zipped_file, await training_file.read()) unzip_path = os.path.join('training_data', bot, str(uuid.uuid4())) shutil.unpack_archive(zipped_file, unzip_path, 'zip') return unzip_path except Exception as e: logger.error(e) raise AppException("Invalid zip") finally: Utility.delete_directory(tmp_dir) @staticmethod def validate_and_get_requirements(bot_data_home_dir: Text, delete_dir_on_exception: bool = False): """ Checks whether at least one of the required files are present and finds other files required for validation during import. @param bot_data_home_dir: path where data exists @param delete_dir_on_exception: whether directory needs to be deleted in case of exception. """ requirements = set() data_path = os.path.join(bot_data_home_dir, DEFAULT_DATA_PATH) if not os.path.exists(bot_data_home_dir): raise AppException("Bot data home directory not found") files_received = set(os.listdir(bot_data_home_dir)) if os.path.exists(data_path): files_received = files_received.union(os.listdir(data_path)) if ALLOWED_NLU_FORMATS.intersection(files_received).__len__() < 1: requirements.add('nlu') if ALLOWED_STORIES_FORMATS.intersection(files_received).__len__() < 1: requirements.add('stories') if ALLOWED_DOMAIN_FORMATS.intersection(files_received).__len__() < 1: requirements.add('domain') if ALLOWED_CONFIG_FORMATS.intersection(files_received).__len__() < 1: requirements.add('config') if ALLOWED_RULES_FORMATS.intersection(files_received).__len__() < 1: requirements.add('rules') if ALLOWED_HTTP_ACTIONS_FORMATS.intersection(files_received).__len__() < 1: requirements.add('http_actions') if requirements == REQUIREMENTS: if delete_dir_on_exception: Utility.delete_directory(bot_data_home_dir) raise AppException('Invalid files received') return requirements @staticmethod async def save_training_files(nlu: File, domain: File, config: File, stories: File, rules: File = None, http_action: File = None): """ convert mongo data to individual files :param nlu: nlu data :param domain: domain data :param stories: stories data :param config: config data :param rules: rules data :param http_action: http actions data :return: files path """ training_file_loc = {} tmp_dir = tempfile.mkdtemp() data_path = os.path.join(tmp_dir, DEFAULT_DATA_PATH) os.makedirs(data_path) nlu_path = os.path.join(data_path, nlu.filename) domain_path = os.path.join(tmp_dir, domain.filename) stories_path = os.path.join(data_path, stories.filename) config_path = os.path.join(tmp_dir, config.filename) Utility.write_to_file(nlu_path, await nlu.read()) Utility.write_to_file(domain_path, await domain.read()) Utility.write_to_file(stories_path, await stories.read()) Utility.write_to_file(config_path, await config.read()) training_file_loc['rules'] = await DataUtility.write_rule_data(data_path, rules) training_file_loc['http_action'] = await DataUtility.write_http_data(tmp_dir, http_action) training_file_loc['nlu'] = nlu_path training_file_loc['config'] = config_path training_file_loc['stories'] = stories_path training_file_loc['domain'] = domain_path training_file_loc['root'] = tmp_dir return training_file_loc @staticmethod async def write_rule_data(data_path: str, rules: File = None): """ writes the rule data to file and returns the file path :param data_path: path of the data files :param rules: rules data :return: rule file path """ if rules and rules.filename: rules_path = os.path.join(data_path, rules.filename) Utility.write_to_file(rules_path, await rules.read()) return rules_path else: return None @staticmethod async def write_http_data(temp_path: str, http_action: File = None): """ writes the http_actions data to file and returns the file path :param temp_path: path of the temporary directory :param http_action: http_action data :return: http_action file path """ if http_action and http_action.filename: http_path = os.path.join(temp_path, http_action.filename) Utility.write_to_file(http_path, await http_action.read()) return http_path else: return None @staticmethod def extract_text_and_entities(text: Text): """ extract entities and plain text from markdown intent example :param text: markdown intent example :return: plain intent, list of extracted entities """ example = entities_parser.parse_training_example(text) return example.get(TEXT), example.get('entities', None) @staticmethod def __extract_response_button(buttons: Dict): """ used to prepare ResponseButton by extracting buttons configuration from bot utterance :param buttons: button configuration in bot response :return: yields ResponseButton """ from .data_objects import ResponseButton for button in buttons: yield ResponseButton._from_son(button) @staticmethod def prepare_response(value: Dict): """ used to prepare bot utterance either Text or Custom for saving in Mongo :param value: utterance value :return: response type, response object """ from .data_objects import ResponseText, ResponseCustom if RESPONSE.Text.value in value: response_text = ResponseText() response_text.text = str(value[RESPONSE.Text.value]).strip() if RESPONSE.IMAGE.value in value: response_text.image = value[RESPONSE.IMAGE.value] if RESPONSE.CHANNEL.value in value: response_text.channel = value["channel"] if RESPONSE.BUTTONS.value in value: response_text.buttons = list( DataUtility.__extract_response_button(value[RESPONSE.BUTTONS.value]) ) data = response_text response_type = "text" elif RESPONSE.CUSTOM.value in value: data = ResponseCustom._from_son( {RESPONSE.CUSTOM.value: value[RESPONSE.CUSTOM.value]} ) response_type = "custom" else: response_type = None data = None return response_type, data @staticmethod def get_rasa_core_policies(): from rasa.core.policies import registry return list(Utility.get_imports(registry.__file__)) @staticmethod def trigger_data_generation_event(bot: str, user: str, token: str): try: event_url = Utility.environment['data_generation']['event_url'] logger.info("Training data generator event started") response = requests.post(event_url, headers={'content-type': 'application/json'}, json={'user': user, 'token': token}) logger.info("Training data generator event completed" + response.content.decode('utf8')) except Exception as e: logger.error(str(e)) TrainingDataGenerationProcessor.set_status(bot=bot, user=user, status=EVENT_STATUS.FAIL.value, exception=str(e)) @staticmethod def get_interpreter(model_path): from rasa.model import get_model, get_model_subdirectories from rasa.core.interpreter import create_interpreter try: with get_model(model_path) as unpacked_model: _, nlu_model = get_model_subdirectories(unpacked_model) _interpreter = create_interpreter( nlu_model ) except Exception: logger.debug(f"Could not load interpreter from '{model_path}'.") _interpreter = None return _interpreter @staticmethod def train_model(background_tasks: BackgroundTasks, bot: Text, user: Text, email: Text, process_type: Text): """ train model common code when uploading files or training a model :param background_tasks: fast api background task :param bot: bot id :param user: user id :param email: user email for generating token for reload :param process_type: either upload or train """ from ...shared.data.model_processor import ModelProcessor from ...shared.auth import Authentication from ...shared.data.constant import MODEL_TRAINING_STATUS from ...train import start_training exception = process_type != 'upload' ModelProcessor.is_training_inprogress(bot, raise_exception=exception) ModelProcessor.is_daily_training_limit_exceeded(bot, raise_exception=exception) ModelProcessor.set_training_status( bot=bot, user=user, status=MODEL_TRAINING_STATUS.INPROGRESS.value, ) token = Authentication.create_access_token(data={"sub": email}, token_expire=180) background_tasks.add_task( start_training, bot, user, token ) @staticmethod def validate_flow_events(events, type, name): from rasa.shared.core.constants import RULE_SNIPPET_ACTION_NAME Utility.validate_document_list(events) if type == "STORY" and events[0].type != "user": raise ValidationError("First event should be an user") if type == "RULE": if events[0].name == RULE_SNIPPET_ACTION_NAME and events[0].type == "action": if events[1].type != "user": raise ValidationError('First event should be an user or conversation_start action') else: if events[0].type != "user": raise ValidationError('First event should be an user or conversation_start action') if events[len(events) - 1].type == "user": raise ValidationError("user event should be followed by action") intents = 0 for i, j in enumerate(range(1, len(events))): if events[i].type == "user": intents = intents + 1 if events[i].type == "user" and events[j].type == "user": raise ValidationError("Found 2 consecutive user events") if type == "RULE" and intents > 1: raise ValidationError( f"""Found rules '{name}' that contain more than user event.\nPlease use stories for this case""") @staticmethod def load_fallback_actions(bot: Text): from .processor import MongoProcessor mongo_processor = MongoProcessor() config = mongo_processor.load_config(bot) fallback_action = DataUtility.parse_fallback_action(config) nlu_fallback_action = MongoProcessor.fetch_nlu_fallback_action(bot) return fallback_action, nlu_fallback_action @staticmethod def parse_fallback_action(config: Dict): fallback_action = "action_default_fallback" action_fallback = next((comp for comp in config['policies'] if comp["name"] == "RulePolicy"), None) if action_fallback: fallback_action = action_fallback.get("core_fallback_action_name", fallback_action) return fallback_action @staticmethod def load_default_actions(): from kairon.importer.validator.file_validator import DEFAULT_ACTIONS return list(DEFAULT_ACTIONS - {"action_default_fallback", "action_two_stage_fallback"}) @staticmethod def get_template_type(story: Dict): steps = story['steps'] if len(steps) == 2 and steps[0]['type'] == StoryStepType.intent and steps[1]['type'] == StoryStepType.bot: template_type = 'Q&A' else: template_type = 'CUSTOM' return template_type @staticmethod def augment_sentences(sentences: list, stopwords: list = None, num_variations: int = 5): from nlpaug.augmenter.char import KeyboardAug from nlpaug.augmenter.word import SynonymAug from nlpaug.flow import Sometimes from nlpaug.augmenter.word import SpellingAug from nlpaug.augmenter.word import AntonymAug keyboard_aug = KeyboardAug(aug_char_min=1, aug_char_max=10, aug_char_p=0.3, aug_word_p=0.3, aug_word_min=1, aug_word_max=10, stopwords=stopwords, include_special_char=False, include_numeric=False, include_upper_case=True, lang='en', min_char=4) synonym_aug = SynonymAug(aug_src='wordnet', aug_min=1, aug_max=4, aug_p=0.3, stopwords=stopwords, lang='eng') antonym_aug = AntonymAug(aug_min=1, aug_max=10, aug_p=0.3, stopwords=stopwords, lang='eng') spelling_aug = SpellingAug(aug_min=1, aug_max=10, aug_p=0.3, stopwords=stopwords, include_reverse=False) aug = Sometimes([keyboard_aug, synonym_aug, spelling_aug, antonym_aug], aug_p=0.25) augmented_text = aug.augment(sentences, n=num_variations) return set(chain.from_iterable(augmented_text)) @staticmethod def generate_synonym(entity: str, num_variations: int = 3): from nltk.corpus import wordnet synonyms = [] syn_sets = wordnet.synsets(entity) for syn in syn_sets: for word in syn.lemma_names(): if word != entity: synonyms.append(word) num_variations -= 1 if num_variations <= 0: return synonyms return synonyms class ChatHistoryUtils: @staticmethod def unique_user_input(month, current_user_bot): from ...shared.data.processor import MongoProcessor response = Utility.trigger_history_server_request( current_user_bot, f'/api/history/{current_user_bot}/metrics/users/input', {'month': month} ) user_input = response['data'] processor = MongoProcessor() training_examples = processor.get_all_training_examples(bot=current_user_bot) queries_not_present = [query for query in user_input if query['_id'] not in training_examples[0]] return queries_not_present
from django.urls import path from . import views urlpatterns = [ path('restaurant/', views.my_restaurant, name='my_restaurant'), path('restaurant/new/', views.new_restaurant, name='new_restaurant'), path('restaurant/detail/<pk>/', views.restaurant_detail, name='restaurant_detail'), path('<slug>/', views.restaurant_main, name='restaurant_main'), path('<restaurant_slug>/<menu_slug>/', views.restaurant_menu, name='restaurant_menu'), ]
from rest_framework import serializers from apps.ticket.models import Ticket from apps.ticket.ticket_utils import ACCESIBILITY_DICT CONTACT_PHONES = {"PKP Intercity (Grupa PKP)": "703 200 200", "PKP TLK": "703 200 200", "PKP IC": "703 200 200", "PKP Szybka Kolej Miejska w Trójmieście (Grupa PKP)": "(58) 721 21 70", "Przewozy Regionalne": "703 20 20 20", "Koleje Mazowieckie": "(22) 364 44 44", "Koleje Dolnośląskie": "(76) 753 52 05", "Koleje Wielkopolskie": "(61) 279 27 78", "Koleje Śląskie": "(32) 428 88 88", "Arriva": "801 081 515", "Szybka Kolej Miejska": "801 044 484", "Warszawska Kolej Dojazdowa": "(22) 758 00 12", "Łódzka Kolej Aglomeracyjna": "(42) 205 55 15", "Koleje Małopolskie": "703 20 20 25"} class ConnectionInfoSerializer(serializers.Serializer): status = serializers.CharField() late = serializers.CharField() late_reason = serializers.CharField() class TicketFileUploadSerializer(serializers.Serializer): ticket = serializers.FileField() class TicketModelSerializer(serializers.ModelSerializer): carrier_contact_phone = serializers.SerializerMethodField() start_place_accessibility = serializers.SerializerMethodField() finish_place_accessibility = serializers.SerializerMethodField() connection_current_info = ConnectionInfoSerializer() car_info = serializers.JSONField() def get_carrier_contact_phone(self, obj: Ticket): carrier = obj.carrier return CONTACT_PHONES[carrier] def _get_place_accessibility(self, name): acc = ACCESIBILITY_DICT[name] return acc def get_start_place_accessibility(self, obj: Ticket): return self._get_place_accessibility(obj.start_place) def get_finish_place_accessibility(self, obj: Ticket): return self._get_place_accessibility(obj.finish_place) class Meta: model = Ticket fields = '__all__' class TicketListSerializer(serializers.ListSerializer): child = TicketModelSerializer()
#!/usr/bin/env python from .models import Tweet, VerificationTask, FirstresponderTask
from json import decoder import sys import json sys.dont_write_bytecode = True import atexit from models.Connection import Connection from models.Log import LogFile from models.Database import DataBase from controllers.utils import do_at_exit def main(): try: cfg_file = open("config.json","r") config = json.loads(cfg_file.read()) ip = config['server_ip'] port = config['server_port'] log_dir_name = config['log_directory_name'] log_file_name = config['log_file_name'] db_user = config['db_user'] db_passwd = config['db_user_passwd'] db_name = config['db_name'] except FileNotFoundError: cfg_file = open("config.json","w") demo = {"server_ip" : "0.0.0.0", "server_port" : 65432, "log_directory_name" : "Logs", "log_file_name" : "log.txt", "db_user" : "", "db_user_passwd" : "", "db_name" : ""} cfg_file.write(json.dumps(demo)) print("Config in config.json") input("") sys.exit() except (KeyError, json.decoder.JSONDecodeError): print("Malformed Config!\n you can delete config.json, again run app to make standard config.json for you :)") input("") sys.exit() log = LogFile(log_file_name, log_dir_name) atexit.register(do_at_exit,log) db = DataBase(db_user, db_passwd, db_name, log) try: server = Connection(log, db, (ip,port)) server.run() except KeyboardInterrupt: log.write("Server Closed","warning") if __name__ == "__main__": main()
import time import unittest from pysmartcache.clients import CacheClient, DjangoClient, MemcachedClient, RedisClient from pysmartcache.constants import CACHE_MISS from pysmartcache.exceptions import ImproperlyConfigured from tests.base import override_env class CacheClientTestCase(unittest.TestCase): def test_instantiate(self): with override_env(PYSMARTCACHE_CLIENT='REDIS', PYSMARTCACHE_HOST='127.0.0.1:6379'): client = CacheClient.instantiate() self.assertTrue(isinstance(client, RedisClient)) with override_env(PYSMARTCACHE_CLIENT='MEMCACHED', PYSMARTCACHE_HOST='127.0.0.1:11211'): client = CacheClient.instantiate() self.assertTrue(isinstance(client, MemcachedClient)) with override_env(PYSMARTCACHE_CLIENT='DJANGO'): client = CacheClient.instantiate() self.assertTrue(isinstance(client, DjangoClient)) with override_env(PYSMARTCACHE_CLIENT='django'): client = CacheClient.instantiate() self.assertTrue(isinstance(client, DjangoClient)) with override_env(PYSMARTCACHE_CLIENT='dJaNgO'): client = CacheClient.instantiate() self.assertTrue(isinstance(client, DjangoClient)) with override_env(PYSMARTCACHE_CLIENT=None): # This is mandatory. self.assertRaises(ImproperlyConfigured, CacheClient.instantiate) with override_env(PYSMARTCACHE_CLIENT='HAMSTER', PYSMARTCACHE_HOST='1.1.1.1'): # Invalid client. self.assertRaises(ImproperlyConfigured, CacheClient.instantiate) with override_env(PYSMARTCACHE_CLIENT='REDIS'): # Host is mandatory for REDIS. self.assertRaises(ImproperlyConfigured, CacheClient.instantiate) with override_env(PYSMARTCACHE_CLIENT='MEMCACHED'): # Host is mandatory for MEMCACHED. self.assertRaises(ImproperlyConfigured, CacheClient.instantiate) class ClientBaseTestCase(object): def tearDown(self): with override_env(PYSMARTCACHE_CLIENT=self.client_name, PYSMARTCACHE_HOST=self.client_host): super(ClientBaseTestCase, self).tearDown() CacheClient.instantiate().purge() def test_common(self): with override_env(PYSMARTCACHE_CLIENT=self.client_name, PYSMARTCACHE_HOST=self.client_host): client = CacheClient.instantiate() self.assertEqual(client.get('answer'), CACHE_MISS) client.set('answer', '42', 1) self.assertEqual(client.get('answer'), '42') client.set('impulse', '101', 3) self.assertEqual(client.get('impulse'), '101') self.assertEqual(client.get('answer'), '42') time.sleep(1) self.assertEqual(client.get('impulse'), '101') self.assertEqual(client.get('answer'), CACHE_MISS) # Expired client.purge() self.assertEqual(client.get('impulse'), CACHE_MISS) self.assertEqual(client.get('answer'), CACHE_MISS) class RedisClientTestCase(ClientBaseTestCase, unittest.TestCase): client_name = 'REDIS' client_host = '127.0.0.1:6379' class MemcachedClientTestCase(ClientBaseTestCase, unittest.TestCase): client_name = 'MEMCACHED' client_host = '127.0.0.1:11211'
# Create the areas list areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50] # Print out second element from areas print(areas[1]) # Print out last element from areas print(areas[-1]) # Print out the area of the living room print(areas[5]) # Create the areas list areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50] # Sum of kitchen and bedroom area: eat_sleep_area eat_sleep_area = areas[3] + areas[-3] # Print the variable eat_sleep_area print(eat_sleep_area) # Create the areas list areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50] # Use slicing to create downstairs downstairs = areas[0:6] # Use slicing to create upstairs upstairs = areas[-4:] # Print out downstairs and upstairs print(downstairs) print(upstairs) # Create the areas list areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50] # Alternative slicing to create downstairs downstairs = areas[:6] # Alternative slicing to create upstairs upstairs = areas[-4:]
import os import re import sys import json import time import argparse import requests import logging from typing import List import demisto_sdk.commands.common.tools as tools from Tests.scripts.utils.log_util import install_logging # disable insecure warnings requests.packages.urllib3.disable_warnings() PRIVATE_BUILD_INFRA_SCRIPTS = ['Tests/scripts/validate_premium_packs.sh', 'Tests/scripts/validate_premium_packs.py', 'Tests/scripts/validate_index.py'] PRIVATE_BUILD_INFRA_FOLDERS = ['Tests/private_build', 'Tests/Marketplace'] TRIGGER_BUILD_URL = 'https://api.github.com/repos/demisto/content-private/dispatches' GET_DISPATCH_WORKFLOWS_URL = 'https://api.github.com/repos/demisto/content-private/actions/runs' WORKFLOW_HTML_URL = 'https://github.com/demisto/content-private/actions/runs' GET_WORKFLOW_URL = 'https://api.github.com/repos/demisto/content-private/actions/runs/{:s}/jobs' PRIVATE_REPO_WORKFLOW_ID_FILE = 'PRIVATE_REPO_WORKFLOW_ID.txt' GET_WORKFLOWS_MAX_RETRIES = 3 GET_WORKFLOWS_TIMEOUT_THRESHOLD = 3600 # one hour def get_modified_files(branch_name: str = None) -> List[str]: """ Gets modified files between master branch and the input branch_name, If the branch_name is empty the method compare master branch with the commit sha1 from the environment variable CIRCLE_SHA1. Args: branch_name: The branch name to compare with master. Returns: A list of modified files. """ if not branch_name: branch_name = os.environ.get('CIRCLE_SHA1') files = [] files_string = tools.run_command(f'git diff --name-only origin/master...{branch_name}') for line in files_string.split("\n"): if line: files.append(line) return files def branch_has_private_build_infra_change(branch_name: str = None) -> bool: """ Checks whether the modified files in the branch are private build infrastructure files. Args: branch_name: The branch name to compare with master. Returns: True if private build infrastructure files modified, False otherwise. """ modified_files = get_modified_files(branch_name) for infra_file in modified_files: if infra_file in PRIVATE_BUILD_INFRA_SCRIPTS: return True path = os.path.dirname(infra_file) for infra_code_dir_path in PRIVATE_BUILD_INFRA_FOLDERS: if path.startswith(infra_code_dir_path): return True return False def get_dispatch_workflows_ids(github_token: str, branch: str) -> List[int]: """ Gets private repo dispatch workflows on the given branch. Args: github_token: Github bearer token. branch: The branch to get the workflows from. Returns: A list of workflows ids. """ res = requests.get(GET_DISPATCH_WORKFLOWS_URL, headers={'Authorization': f'Bearer {github_token}'}, params={'branch': branch, 'event': 'repository_dispatch'}, verify=False) if res.status_code != 200: logging.error(f'Failed to get private repo workflows, request to ' f'{GET_DISPATCH_WORKFLOWS_URL} failed with error: {str(res.content)}') sys.exit(1) try: workflows = json.loads(res.content) except ValueError: logging.error('Enable to parse private repo workflows response') sys.exit(1) workflows = workflows.get('workflow_runs', []) return [workflow.get('id') for workflow in workflows] def main(): install_logging("TriggerPrivateBuild.log") # get github token parameter arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--github-token', help='Github token') args = arg_parser.parse_args() github_token = args.github_token # get branch name branches = tools.run_command("git branch") branch_name_regex = re.search(r"\* (.*)", branches) branch_name = branch_name_regex.group(1) if branch_has_private_build_infra_change(branch_name): # get the workflows ids before triggering the build pre_existing_workflow_ids = get_dispatch_workflows_ids(github_token, 'master') # trigger private build payload = {'event_type': f'Trigger private build from content/{branch_name}', 'client_payload': {'commit_sha1': branch_name, 'is_infra_build': 'True'}} res = requests.post(TRIGGER_BUILD_URL, headers={'Accept': 'application/vnd.github.everest-preview+json', 'Authorization': f'Bearer {github_token}'}, data=json.dumps(payload), verify=False) if res.status_code != 204: logging.critical(f'Failed to trigger private repo build, request to ' f'{TRIGGER_BUILD_URL} failed with error: {str(res.content)}') sys.exit(1) workflow_ids_diff = [] for i in range(GET_WORKFLOWS_MAX_RETRIES): # wait 5 seconds and get the workflow ids again time.sleep(5) workflow_ids_after_dispatch = get_dispatch_workflows_ids(github_token, 'master') # compare with the first workflows list to get the current id workflow_ids_diff = [x for x in workflow_ids_after_dispatch if x not in pre_existing_workflow_ids] if workflow_ids_diff: break if len(workflow_ids_diff) == 1: workflow_id = workflow_ids_diff[0] logging.success(f'Private repo build triggered successfully, workflow id: {workflow_id}\n URL:' f' {WORKFLOW_HTML_URL}/{workflow_id}') # write the workflow id to text file to use it in get_private_build_status.py with open(PRIVATE_REPO_WORKFLOW_ID_FILE, "w") as f: f.write(str(workflow_id)) sys.exit(0) else: logging.critical('Could not found the private repo workflow') sys.exit(1) else: logging.info('Build private repo skipped') if __name__ == "__main__": main()
import pandas as pd import numpy as np import allennlp import io import scispacy import spacy from allennlp.predictors.predictor import Predictor import io import pandas as pd def identify_cancer(title, abstract): '''Returns the answer to a query string after running through 2 APIs (Scispacy and AllenNLP)''' # run title through spacy and retrieve UMLS entities nlp = spacy.load("en_core_sci_sm") predictor = Predictor.from_path("https://storage.googleapis.com/allennlp-public-models/naqanet-2019.04.29-fixed-weight-names-allennlpv1.0.tar.gz") def find_cancer_type(abstract, query_str): study_type = predictor.predict(passage=abstract, question=query_str) return study_type['answer']['value'] # convert UMLS (biomedical vocab) entities into elements of Allen MLP query string doc = nlp(title) query_list = list(doc.ents) query_list = [str(w) for w in query_list] query_str = "Of " + " ".join(query_list) + " , which is a cancer?" # return best answer for which of those terms reveals the cancer type return find_cancer_type(df['abstract'][i], query_str))
import os EXAMPLES_DIR = "./examples/" EXAMPLE_NAME_TEMPLATE = "example-policy{}.yml" VALID_BEGINNGINGS = ["scenarios", "config"] cwd = os.path.dirname(os.path.realpath(__file__)) + "/../docs/" files = [] for (dirpath, dirnames, filenames) in os.walk(cwd): for filename in filenames: if filename.endswith(".md"): #print(dirpath + "/" + filename) files.append(dirpath + "/" + filename) counter = 0 for filename in files: #print("Reading " + filename) with open(filename) as f: content = "" inside = False for line in f.readlines(): if line.startswith("```yaml"): #print("Starting " + line) inside = True elif line.startswith("```"): is_valid = False for beg in VALID_BEGINNGINGS: if content.startswith(beg): is_valid = True if is_valid and inside: #print("Finishing " + line) output = EXAMPLES_DIR + EXAMPLE_NAME_TEMPLATE.format(counter) print(output) with open(output, "w") as policy_file: policy_file.write(content) inside = False content = "" counter += 1 elif inside: content += line
# fichier pour les divers tests """ import WordProcessing as wp import meteo #wp.CentreTxt(wp.gotheng, "test", wp.st7789.GREEN, wp.st7789.BLACK) print("-start test-") wp.tft.fill(wp.st7789.BLACK) txt="test" wp.tft.draw(meteo, txt, 0, 16, 0x00d3, 0x7030) #wp.tft.rect(0, 0, 240, 135, wp.st7789.BLUE) #wp.draw_circle(120, 68, 50, wp.st7789.RED) #wp.draw_ellipse(120, 68, 50, 20, wp.st7789.BLUE) #wp.fill_circle(120, 68, 20, wp.st7789.RED) #wp.fill_ellipse(120, 68, 50, 20, wp.st7789.BLUE) #wp.draw_polygon(5, 120, 68, 50, wp.st7789.RED, 5) #wp.fill_polygon(6, 120, 68, 20, wp.st7789.RED, 0) print("-STOP-")""" import time from machine import Pin, SPI import st7789 import inconsolata_16 as font_16 import inconsolata_32 as font_32 import inconsolata_64 as font_64 def main(): fast = False def display_font(font): tft.fill(st7789.BLUE) column = 0 row = 0 for char in font.MAP: tft.bitmap(font, column, row, font.MAP.index(char)) column += font.WIDTH if column >= tft.width() - font.WIDTH: row += font.HEIGHT column = 0 if row > tft.height() - font.HEIGHT: row = 0 if not fast: time.sleep(0.05) tft = st7789.ST7789( SPI(2, baudrate=30000000, sck=Pin(18), mosi=Pin(19)), 135, 240, reset=Pin(23, Pin.OUT), cs=Pin(5, Pin.OUT), dc=Pin(16, Pin.OUT), backlight=Pin(4, Pin.OUT), rotation=3) tft.init() while True: for font in [font_16, font_32, font_64]: display_font(font) fast = not fast main()
# Generated by Django 2.2.6 on 2019-12-25 07:37 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('study', '0003_auto_20191225_1429'), ] operations = [ migrations.RemoveField( model_name='blogcontent', name='content', ), migrations.AddField( model_name='blogcontent', name='detail', field=models.TextField(default=0), preserve_default=False, ), ]
''' 8/01 22:35 - 23:30 input: [] output: [[..],[..]] assumption: all int, may be duplicate DFS + de-dup idx 0 1 2 1, 2, 2 / \ 0 1 [] / \ / \ 1 1,2 1 2 [] / | / | / | / \ 2 1,2,2 1,2 1,2 1 2,2 2 2 [] 3 test: nums = [1, 2, 2], len = 3 res = [[1, 2, 2], [1, 2], [1], [2, 2], [2], []] path = [] idx = ''' class Solution: def subsets_with_dup(self, nums): if not nums or len(nums) == 0: return [] res = [] self.dfs(sorted(nums), [], 0, res) return res # [1,2,2] [] 0 [] {} def dfs(self, nums, path, idx, res): if idx == len(nums): res.append(path[:]) return path.append(nums[idx]) self.dfs(nums, path, idx + 1, res) path.pop() while idx < len(nums) - 1 and nums[idx + 1] == nums[idx]: idx += 1 self.dfs(nums, path, idx + 1, res) def subsetsWithDup(self, nums): ret = [] self.dfs(sorted(nums), [], ret) return ret def dfs(self, nums, path, ret): ret.append(path) for i in range(len(nums)): if i > 0 and nums[i] == nums[i-1]: continue self.dfs(nums[i+1:], path+[nums[i]], ret) if __name__ == '__main__': sol = Solution() print(sol.subsetsWithDup([1, 2, 2]))
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries # SPDX-License-Identifier: MIT """This example zeros the joystick, and prints when the joystick moves or the buttons are pressed.""" import time from adafruit_featherwing import joy_featherwing wing = joy_featherwing.JoyFeatherWing() last_x = 0 last_y = 0 while True: x, y = wing.joystick if (abs(x - last_x) > 3) or (abs(y - last_y) > 3): last_x = x last_y = y print(x, y) if wing.button_a: print("Button A!") if wing.button_b: print("Button B!") if wing.button_x: print("Button X!") if wing.button_y: print("Button Y!") if wing.button_select: print("Button SELECT!") time.sleep(0.01)
""" Commands for the 'Character' app that handles the roster, stories, the timeline, etc. """ from datetime import datetime from django.db.models import Q from evennia.utils.evtable import EvTable from server.utils.arx_utils import inform_staff, check_break, list_to_string from commands.base import ArxCommand, ArxPlayerCommand from commands.mixins import FormCommandMixin from server.utils.exceptions import CommandError from server.utils.prettytable import PrettyTable from web.character.models import (Investigation, Clue, InvestigationAssistant, ClueDiscovery, Theory, RevelationDiscovery, Revelation, get_random_clue, SearchTag) from web.character.forms import ClueCreateForm, RevelationCreateForm from world.dominion.models import Agent, Plot from world.stats_and_skills import VALID_STATS, VALID_SKILLS class InvestigationFormCommand(ArxCommand): """ ABC for creating commands based on investigations that process a form. """ form_verb = "Creating" form_switches = ("topic", "target", "tag", "tags", "story", "stat", "skill", "cancel", "finish") ap_cost = 10 new_clue_cost = 100 def check_ap_cost(self, cost=None): if not cost: cost = self.ap_cost if cost < 0: cost = 0 if self.caller.player_ob.pay_action_points(cost): return True else: self.msg("You cannot afford to do that action.") return False @property def form_attr(self): return "investigation_form" @property def investigation_form(self): return getattr(self.caller.ndb, self.form_attr) @investigation_form.setter def investigation_form(self, val): setattr(self.caller.ndb, self.form_attr, val) @investigation_form.deleter def investigation_form(self): self.caller.nattributes.remove(self.form_attr) @property def related_manager(self): return self.caller.roster.investigations def disp_investigation_form(self): form = self.investigation_form if not form: return story, stat, skill = form[1], form[2], form[3] msg = "|w%s an investigation:|n %s" % (self.form_verb, self.topic_string(color=True)) msg += self.display_target_string() msg += "\n%s" % (story or "Story unfinished.") msg += "\n|wStat:|n %s - |wSkill:|n %s" % (stat or "???", skill or "???") self.msg(msg) def display_target_string(self): return "\n|w%s:|n %s" % (self.target_type.capitalize(), self.investigation_form[0]) def topic_string(self, color=False): """Joins tag-requirements and tag-omissions into a string""" def colorize(val, col="|r"): col = (col + "-") if col == "|r" else col val = ("%s%s|n" % (col, val)) if color else str(val) return val source_clue = self.investigation_form[6] if source_clue: return str(source_clue) tags_list = self.investigation_form[5] if not tags_list: return "" topic = "; ".join(colorize(ob, col="|235") for ob in tags_list[0]) if tags_list[1]: topic += "; " topic += "; ".join(colorize(ob) for ob in tags_list[1]) return topic @property def target_type(self): return "topic" @property def finished_form(self): """Property that validates the form that has been created.""" try: form = self.investigation_form topic, story, stat, skill = form[0], form[1], form[2], form[3] if not topic: self.msg("You must have %s defined." % self.target_type.lower()) return if not story: self.msg("You must have a story defined.") return return topic, story, stat, skill except (TypeError, ValueError, IndexError, AttributeError): self.msg("Your investigation form is not yet filled out.") return False @property def start_cost(self): return 0 def pay_costs(self): dompc = self.caller.player_ob.Dominion amt = dompc.assets.social amt -= self.start_cost if amt < 0: self.msg("It costs %s social resources to start a new investigation." % self.start_cost) return False if self.need_new_clue_written and not self.offer_placeholder_clue(): return False self.msg("You spend %s social resources to start a new investigation." % self.start_cost) dompc.assets.social = amt dompc.assets.save() return True def refuse_new_clue(self, reason): msg = reason + " Try different tags or abort." self.msg(msg) def offer_placeholder_clue(self): """ Allows investigator to request a newly written clue """ ap = self.new_clue_cost topic = self.topic_string(color=True) attr = "new_clue_write" prompt = "An opportunity has arisen to pursue knowledge previously unseen by mortal eyes. " prompt += "It will require a great deal of energy (|c%s|n action points) to investigate. " % ap prompt += "Your tag requirements: %s\n" % topic prompt += "|yRepeat the command to confirm and continue.|n" if not self.caller.confirmation(attr, topic, prompt): return False if not self.caller.player.pay_action_points(ap): # TODO: check command name self.refuse_new_clue("You're too busy for such an investigation. (low AP)") return False return True def mark_active(self, created_object): """ Finishes setting up the created object with any fields that need to be filled out, and informs the caller of what was done, as well as announces to staff. Saves the created object. """ pass def create_obj_from_form(self, form): """ Create a new object from our related manager with the form we were given from finished form, with appropriate kwargs """ kwargs = {self.target_type: form[0], "actions": form[1], "stat_used": form[2], "skill_used": form[3]} return self.related_manager.create(**kwargs) def do_finish(self): """ the finished_form property checks if all the fields are valid. Further checks on whether the fields can be used are done by pay_costs. The object to be created is then created using our related_manager property, and the target is populated with add_target_to_obj. It's then setup with mark_active """ form = self.finished_form if not form: return if not self.check_enough_time_left(): return if not self.pay_costs(): return if self.check_too_busy_to_finish(): return inv_ob = self.create_obj_from_form(form) if self.need_new_clue_written: form = self.investigation_form source_clue = form[6] clue_name = "PLACEHOLDER for Investigation #%s" % inv_ob.id if source_clue: gm_notes = "Trying to find things related to Clue #%s: %s" % (source_clue.id, source_clue) search_tags = list(source_clue.search_tags.all()) else: search_tags, omit_tags = form[5] gm_notes = "Added tags: %s\n" % list_to_string(search_tags) gm_notes += "Exclude tags: %s" % list_to_string([("-%s" % ob) for ob in omit_tags]) clue = Clue.objects.create(name=clue_name, gm_notes=gm_notes, allow_investigation=True, rating=30) for tag in search_tags: clue.search_tags.add(tag) inv_ob.clue_target = clue inv_ob.save() self.mark_active(inv_ob) del self.investigation_form def check_too_busy_to_finish(self): """Checks whether we're too busy to finish the form""" return @property def initial_form_values(self): return ['', '', '', '', '', [], None] # noinspection PyAttributeOutsideInit def create_form(self): """ Initially populates the form we use. Other switches will populate the fields, which will be used in do_finish() """ self.investigation_form = self.initial_form_values self.disp_investigation_form() def get_target(self): """Sets the target of the object we'll create.""" self.disp_investigation_form() def check_skill(self): if self.args.lower() not in self.caller.db.skills: self.msg("You have no skill by the name of %s." % self.args) return return True @property def need_new_clue_written(self): return def func(self): """ Base version of the command that can be inherited. It allows for creation of the form with the 'new' switch, is populated with 'target', 'story', 'stat', and 'skill', aborted with 'cancel', and finished with 'finish'. """ investigation = self.investigation_form if "new" in self.switches: self.create_form() return True if self.check_switches(self.form_switches): if not investigation: self.msg("You need to create a form first with /new.") return True if self.check_switches(("target", "topic", "tag", "tags")): self.get_target() return True if "story" in self.switches: investigation[1] = self.args self.disp_investigation_form() return True if "stat" in self.switches: if not self.caller.attributes.get(self.args.lower()): self.msg("No stat by the name of %s." % self.args) return investigation[2] = self.args self.disp_investigation_form() return True if "skill" in self.switches: if not self.check_skill(): return investigation[3] = self.args self.disp_investigation_form() return True if "cancel" in self.switches: del self.investigation_form self.msg("Investigation creation cancelled.") return True if "finish" in self.switches: self.do_finish() return True def check_enough_time_left(self): """Returns True if they have enough time left to create/modify an investigation, False otherwise.""" from evennia.scripts.models import ScriptDB from datetime import timedelta script = ScriptDB.objects.get(db_key="Weekly Update") day = timedelta(hours=24, minutes=5) if script.time_remaining < day: self.msg("It is too close to the end of the week to do that.") return False return True class CmdAssistInvestigation(InvestigationFormCommand): """ @helpinvestigate Usage: @helpinvestigate @helpinvestigate/history @helpinvestigate/new @helpinvestigate/retainer <retainer ID> @helpinvestigate/target <investigation ID #> @helpinvestigate/story <text of how you/your retainer help> @helpinvestigate/stat <stat to use for the check> @helpinvestigate/skill <additional skill besides investigation> @helpinvestigate/cancel @helpinvestigate/finish @helpinvestigate/stop @helpinvestigate/resume <id #> @helpinvestigate/changestory <id #>=<new story> @helpinvestigate/changestat <id #>=<new stat> @helpinvestigate/changeskill <id #>=<new skill> @helpinvestigate/actionpoints <id #>=<AP amount> @helpinvestigate/silver <id #>=<additional silver to spend> @helpinvestigate/resource <id #>=<resource type>,<amount> @helpinvestigate/retainer/stop <retainer ID> @helpinvestigate/retainer/resume <id #>=<retainer ID> @helpinvestigate/retainer/changestory <retainer ID>/<id #>=<story> @helpinvestigate/retainer/changestat <retainer ID>/<id #>=<stat> @helpinvestigate/retainer/changeskill <retainer ID>/<id #>=<skill> @helpinvestigate/retainer/silver, or /resource, etc., as above Helps with an investigation, or orders a retainer to help with the investigation. You may only help with one investigation at a time, and only if you are not actively investigating something yourself. You may stop helping an investigation with /stop, and resume it with /resume. To set a retainer to help the investigation, use the /retainer switch and supply their number. Entering an invalid retainer ID will switch back to you as being the investigation's helper. """ key = "@helpinvestigate" aliases = ["+helpinvestigate", "helpinvestigate"] locks = "cmd:all()" help_category = "Investigation" form_verb = "Helping" change_switches = ("changestory", "changestat", "changeskill", "actionpoints", "silver", "resource", "resources") def pay_costs(self): """No resource cost for helping investigations""" return True @property def related_manager(self): return self.helper.assisted_investigations @property def form_attr(self): return "assist_investigation_form" @property def initial_form_values(self): return ['', '', '', '', self.caller, [], None] @property def helper(self): """Returns caller or their retainer who they are using in the investigation""" try: return self.investigation_form[4] or self.caller except IndexError: return self.caller def disp_investigation_form(self): super(CmdAssistInvestigation, self).disp_investigation_form() self.msg("{wAssisting Character:{n %s" % self.helper) def check_eligibility(self, helper): helping = helper.assisted_investigations.filter(currently_helping=True) if helping: self.msg("%s is already helping an investigation: %s" % (helper, ", ".join(str(ob.investigation.id) for ob in helping))) return False formid = self.investigation_form[0] if helper == self.caller: try: if self.caller.roster.investigations.filter(active=True): self.msg("You cannot assist an investigation while having an active investigation.") return False if self.caller.roster.investigations.get(id=formid): self.msg("You cannot assist one of your own investigations. You must use a retainer.") return False except (TypeError, ValueError, AttributeError, Investigation.DoesNotExist): pass return True def set_helper(self): if not self.investigation_form: self.msg("No form found. Use /new.") return try: helper = self.caller.player_ob.retainers.get(id=self.args).dbobj if not helper.db.abilities or helper.db.abilities.get("investigation_assistant", 0) < 1: self.msg("%s is not able to assist investigations." % helper) return except (AttributeError, ValueError, Agent.DoesNotExist): self.msg("No retainer by that number. Setting it to be you instead.") helper = self.caller if not self.check_eligibility(helper): return self.investigation_form[4] = helper self.disp_investigation_form() def disp_invites(self): invites = self.caller.db.investigation_invitations or [] # check which are valid investigations = Investigation.objects.filter(id__in=invites, ongoing=True, active=True) investigations = investigations | self.caller.roster.investigations.filter(ongoing=True) self.msg("You are permitted to help the following investigations:\n%s" % "\n".join( " %s (ID: %s)" % (str(ob), ob.id) for ob in investigations)) invest_ids = [ob.id for ob in investigations] # prune out invitations to investigations that are not active invites = [num for num in invites if num in invest_ids] self.caller.db.investigation_invitations = invites @property def valid_targ_ids(self): invites = self.caller.db.investigation_invitations or [] if self.helper != self.caller: # if it's a retainer, we add IDs of investigations we're running or assisting as valid for them invites.extend(list(Investigation.objects.filter(Q(character=self.caller.roster) | Q(assistants__char=self.caller) ).exclude(ongoing=False).values_list('id', flat=True))) return invites def get_target(self): """ Sets the target of the object we'll create. For an assisting investigation, it'll be the ID of the investigation. """ if not self.args: self.disp_invites() return try: targ = int(self.args) except ValueError: self.msg("You must supply the ID of an investigation.") return if targ not in self.valid_targ_ids: self.msg("No investigation by that ID.") return # check that we can't do our own unless it's a retainer helper = self.investigation_form[4] if helper == self.caller: if self.caller.roster.investigations.filter(ongoing=True, id=targ): self.msg("You cannot assist your own investigation.") return if helper.assisted_investigations.filter(investigation_id=targ): phrase = "%s is" % str(helper) if helper != self.caller else "You are" self.msg("%s already helping that investigation. You can /resume helping it." % phrase) return self.investigation_form[0] = targ super(CmdAssistInvestigation, self).get_target() def check_too_busy_to_finish(self): """Checks if helper is too busy""" try: if self.helper.roster.investigations.filter(active=True): already_investigating = True self.msg("You already have active investigations.") else: already_investigating = False except AttributeError: already_investigating = False return already_investigating def mark_active(self, created_object): """After the InvestigationAssistant has been created, check to see if we can mark it helping""" already_investigating = self.check_too_busy_to_finish() if not already_investigating and not self.check_enough_time_left(): return if not already_investigating and not self.check_ap_cost(): return current_qs = self.helper.assisted_investigations.filter(currently_helping=True).exclude(id=created_object.id) if current_qs: for ob in current_qs: ob.currently_helping = False ob.save() self.msg("%s was currently helping another investigation. Switching." % self.helper) if not already_investigating: created_object.currently_helping = True created_object.save() created_object.investigation.do_roll() self.msg("%s is now helping %s." % (self.helper, created_object.investigation)) else: self.msg("You already have an active investigation. That must stop before you help another.\n" "Once that investigation is no longer active, you may resume helping this investigation.") self.caller.attributes.remove(self.form_attr) @property def target_type(self): return "investigation" @property def finished_form(self): form = super(CmdAssistInvestigation, self).finished_form if not form: return invest_id, actions, stat, skill = form valid_investigations = self.valid_targ_ids if invest_id not in valid_investigations: self.msg("That is not a valid ID of an investigation for %s to assist." % self.helper) self.msg("Valid IDs: %s" % ", ".join(str(ob) for ob in valid_investigations)) return try: investigation = Investigation.objects.get(id=invest_id) except Investigation.DoesNotExist: self.msg("No investigation by that ID found.") return return investigation, actions, stat, skill def disp_currently_helping(self, char): self.msg("%s and retainers is helping the following investigations:" % char) table = PrettyTable(["ID", "Character", "Investigation Owner", "Currently Helping"]) if "history" in self.switches: investigations = char.assisted_investigations.filter(investigation__ongoing=False) else: investigations = char.assisted_investigations.filter(investigation__ongoing=True) retainers = [retainer.dbobj.id for retainer in char.player_ob.retainers.all() if retainer.dbobj] if "history" in self.switches: retainer_investigations = InvestigationAssistant.objects.filter(char__in=retainers, investigation__ongoing=False) else: retainer_investigations = InvestigationAssistant.objects.filter(char__in=retainers, investigation__ongoing=True) investigations = list(investigations) + list(retainer_investigations) for ob in investigations: def apply_color(object_to_format): if ob.investigation.active: return "{w%s{n" % object_to_format return "{r%s{n" % object_to_format row = [apply_color(column) for column in (ob.investigation.id, ob.char, ob.investigation.char, ob.currently_helping)] table.add_row(row) self.msg(table) def check_skill(self): if self.args.lower() not in self.helper.db.skills: self.msg("%s has no skill by the name of %s." % (self.helper, self.args)) return return True def view_investigation(self): try: character_ids = [self.caller.id] + [ob.dbobj.id for ob in self.caller.player_ob.retainers] ob = Investigation.objects.filter(assistants__char_id__in=character_ids).distinct().get(id=self.args) except (Investigation.DoesNotExist, TypeError, ValueError): self.msg("Could not find an investigation you're helping by that number.") self.disp_currently_helping(self.caller) return self.msg(ob.display()) def get_retainer_from_args(self, args): try: if args.isdigit(): char = self.caller.player.retainers.get(id=args).dbobj else: char = self.caller.player.retainers.get(name=args).dbobj return char except (ValueError, TypeError, Agent.DoesNotExist): self.msg("Retainer not found by that name or number.") return def func(self): finished = super(CmdAssistInvestigation, self).func() if finished: return if not self.args and not self.switches or "history" in self.switches: if self.investigation_form: self.disp_investigation_form() self.disp_invites() self.disp_currently_helping(self.caller) return if "retainer" in self.switches and len(self.switches) == 1: self.set_helper() return if "view" in self.switches or not self.switches: self.view_investigation() return if "stop" in self.switches: if "retainer" in self.switches: char = self.get_retainer_from_args(self.args) if not char: return else: char = self.caller refund = 0 for ob in char.assisted_investigations.filter(currently_helping=True): ob.currently_helping = False ob.save() refund += self.ap_cost if refund: self.caller.player_ob.pay_action_points(-refund) self.msg("%s stopped assisting investigations." % char) return if "resume" in self.switches: if "retainer" in self.switches: try: if self.rhs.isdigit(): char = self.caller.player.retainers.get(id=self.rhs).dbobj else: char = self.caller.player.retainers.get(name=self.rhs).dbobj except (Agent.DoesNotExist, AttributeError): self.msg("No retainer found by that ID or number.") return else: # not a retainer, just the caller. So check if they have an active investigation char = self.caller if self.caller.roster.investigations.filter(active=True): self.msg("You currently have an active investigation, and cannot assist an investigation.") return # check if they already are assisting something if char.assisted_investigations.filter(currently_helping=True): self.msg("%s is already assisting an investigation." % char) return if not self.check_enough_time_left(): return try: ob = char.assisted_investigations.get(investigation__id=self.lhs) except (ValueError, TypeError, InvestigationAssistant.DoesNotExist): self.msg("Not helping an investigation by that number.") return except InvestigationAssistant.MultipleObjectsReturned: self.msg("Well, this is awkward. You are assisting that investigation multiple times. This shouldn't " "be able to happen, but here we are.") inform_staff("BUG: %s is assisting investigation %s multiple times." % (char, self.lhs)) return # check if they have action points to afford it if not self.check_ap_cost(): return # all checks passed, mark it as currently being helped if the investigation exists ob.currently_helping = True ob.save() self.msg("Now helping %s." % ob.investigation) return if set(self.change_switches) & set(self.switches): if "retainer" in self.switches: lhs = self.lhs.split("/") try: char = self.get_retainer_from_args(lhs[0]) if not char: return investigation_id = lhs[1] except (IndexError, TypeError, ValueError): self.msg("You must specify <retainer ID>/<investigation ID>.") return else: char = self.caller investigation_id = self.lhs try: ob = char.assisted_investigations.get(investigation__id=investigation_id) if not self.check_enough_time_left(): return if "changestory" in self.switches: ob.actions = self.rhs field = "story" elif "changestat" in self.switches: rhs = self.rhs.lower() if rhs not in VALID_STATS: self.msg("Not a valid stat.") return ob.stat_used = rhs field = "stat" elif "changeskill" in self.switches: rhs = self.rhs.lower() if rhs not in VALID_SKILLS: self.msg("Not a valid skill.") return ob.skill_used = rhs field = "skill" elif "silver" in self.switches: ob = ob.investigation amt = self.caller.db.currency or 0.0 try: val = int(self.rhs) amt -= val if amt < 0 or val <= 0: raise ValueError if val % 5000 or (ob.silver + val) > 50000: self.msg("Silver must be a multiple of 5000, 50000 max.") self.msg("Current silver: %s" % ob.silver) return except (TypeError, ValueError): self.msg("You must specify a positive amount that is less than your money on hand.") return self.caller.pay_money(val) ob.silver += val ob.save() # redo the roll with new difficulty ob.do_roll() self.msg("You add %s silver to the investigation." % val) return elif "resource" in self.switches or "resources" in self.switches: ob = ob.investigation dompc = self.caller.player_ob.Dominion try: rtype, val = self.rhslist[0].lower(), int(self.rhslist[1]) if val <= 0: raise ValueError oamt = getattr(ob, rtype) if oamt + val > 50: self.msg("Maximum of 50 per resource. Current value: %s" % oamt) return current = getattr(dompc.assets, rtype) current -= val if current < 0: self.msg("You do not have enough %s resources." % rtype) return setattr(dompc.assets, rtype, current) dompc.assets.save() except (TypeError, ValueError, IndexError, AttributeError): self.msg("Invalid syntax.") return oamt += val setattr(ob, rtype, oamt) ob.save() # redo the roll with new difficulty ob.do_roll() self.msg("You have added %s resources to the investigation." % val) return elif "actionpoints" in self.switches: ob = ob.investigation if not ob.active: self.msg("The investigation must be marked active to invest in it.") return # check if we can pay try: amt = int(self.rhs) if amt <= 0: raise ValueError if amt % 5: self.msg("Action points must be a multiple of 5") self.msg("Current action points allocated: %s" % ob.action_points) return if not self.check_ap_cost(amt): return except (TypeError, ValueError): self.msg("Amount of action points must be a positive number you can afford.") return # add action points and save ob.action_points += amt ob.save() self.msg("New action point total is %s." % ob.action_points) return else: self.msg("Unrecognized switch.") return ob.save() self.msg("Changed %s to: %s" % (field, self.rhs)) except (ValueError, InvestigationAssistant.DoesNotExist): self.msg("%s isn't helping an investigation by that number." % char) return self.msg("Unrecognized switch.") class CmdInvestigate(InvestigationFormCommand): """ @investigate Usage: @investigate @investigate/history @investigate/view <id #> @investigate/active <id #> @investigate/silver <id #>=<additional silver to spend> @investigate/resource <id #>=<resource type>,<amount> @investigate/actionpoints <id #>=<additional points to spend> @investigate/changestory <id #>=<new story> @investigate/changestat <id #>=<new stat> @investigate/changeskill <id #>=<new skill> @investigate/abandon <id #> @investigate/resume <id #> @investigate/pause <id #> @investigate/requesthelp <id #>=<player> Create Usage: @investigate/new @investigate/tags <tag to investigate>[/-<tag to omit>...] @investigate/story <text of how you do the investigation> @investigate/stat <stat to use for the check> @investigate/skill <additional skill to use besides investigation> @investigate/cancel @investigate/finish Investigation allows characters to research secrets and unravel some of the world's mysteries. To start, use @investigate/new and fill out required fields with /tags and /story switches, then use /finish to finalize your investigation for GMs to see. A tag is a word defining the topic of research, while story tells how it will be accomplished. The /stat and /skill switches let you set the appropriate roll used by your story. The 'investigation' skill will always be taken into account. Use /cancel to cancel the form. While you can have many ongoing investigations, one advances weekly. Determine which by selecting the /active investigation. Spend silver and resources to attempt to help your investigation progress. Use /pause switch to mark an investigation inactive, or /abandon it altogether. About topic/tags: Using multiple tags results in very specific research on a clue involving ALL those topics. You may place '-' in front to omit clues with that tag. ex: "@investigate/tags primum/tyrval/-adept" Alternately, you may specify an existing clue to try to find out things related to it, by setting the topic with 'Clue: <id or name>'. So if you want to find more things related to clue 50, it would be 'Clue: 50'. Be aware that specificity may result in nothing found, but you might be offered the chance to expend great effort (100 AP) into researching a clue that no one has found before. """ key = "@investigate" locks = "cmd:all()" help_category = "Investigation" aliases = ["+investigate", "investigate"] base_cost = 25 model_switches = ("view", "active", "silver", "resource", "pause", "actionpoints", "changestory", "abandon", "resume", "requesthelp", "changestat", "changeskill") # noinspection PyAttributeOutsideInit def get_help(self, caller, cmdset): doc = self.__doc__ if caller.db.char_ob: caller = caller.db.char_ob self.caller = caller doc += "\n\nThe cost to make an investigation active is %s action points and %s resources." % ( self.ap_cost, self.start_cost) return doc @property def ap_cost(self): try: cost = 50 - (self.caller.db.skills.get('investigation', 0) * 5) if cost < 0: cost = 0 return cost except AttributeError: return 50 def list_ongoing_investigations(self): qs = self.related_manager.filter(ongoing=True) table = PrettyTable(["ID", "Tag/Topic", "Active"]) for ob in qs: table.add_row([ob.id, ob.topic, "{wX{n" if ob.active else ""]) self.msg("Ongoing investigations:\n%s" % table) def list_old_investigations(self): qs = self.related_manager.filter(ongoing=False) table = PrettyTable(["ID", "Tag/Topic"]) for ob in qs: table.add_row([ob.id, ob.topic]) self.msg("Old investigations:\n%s" % table) @property def start_cost(self): caller = self.caller try: skill = caller.db.skills.get("investigation", 0) cost = self.base_cost - (5 * skill) if cost < 0: cost = 0 return cost except AttributeError: return self.base_cost def display_target_string(self): return "" def mark_active(self, created_object): if not (self.related_manager.filter(active=True) or self.caller.assisted_investigations.filter(currently_helping=True)): if not self.caller.assisted_investigations.filter(currently_helping=True): if self.caller.player_ob.pay_action_points(self.ap_cost): created_object.active = True self.msg("New investigation created. This has been set as your active investigation " + "for the week, and you may add resources/silver to increase its chance of success.") else: self.msg("New investigation created. You could not afford the action points to mark it active.") else: self.msg("New investigation created. This investigation is not active because you are " + "currently assisting an investigation already.") else: self.msg("New investigation created. You already are participating in an active investigation " + "for this week, but may still add resources/silver to increase its chance of success " + "for when you next mark this as active.") self.msg("You may only have one active investigation per week, and cannot change it once " + "it has received GM attention. Only the active investigation can progress.") created_object.save() staffmsg = "%s has started an investigation on %s." % (self.caller, created_object.topic) if created_object.targeted_clue: staffmsg += " They will roll to find clue %s." % created_object.targeted_clue created_object.setup_investigation_for_clue(created_object.targeted_clue) else: staffmsg += " Their topic does not target a clue, and will automatically fail unless GM'd." inform_staff(staffmsg) def create_form(self): if not self.check_enough_time_left(): return super(CmdInvestigate, self).create_form() def get_target(self): """Sets the target of the object we'll create. For an investigation, this will be the topic.""" no_tags_msg = "You must include a tag or clue to investigate" if not self.args: return self.msg(no_tags_msg + ".") try: search_tags, omit_tags, source_clue = self.get_tags_or_clue_from_args() except CommandError as err: return self.msg(err) if not search_tags and not source_clue: return self.msg(no_tags_msg + ", not just tags you want to omit.") clue = get_random_clue(self.caller.roster, search_tags, omit_tags, source_clue) if not clue: if check_break(): return self.refuse_new_clue("Investigations that require new writing are not " + "allowed during staff break.") if len(search_tags) + len(omit_tags) > 6: return self.refuse_new_clue("That investigation would be too specific.") self.msg("The tag(s) or clue specified does not match an existing clue, and will be much more difficult and" " more expensive to look into than normal. Try other tags for an easier investigation, or " "proceed to /finish for a much more difficult one.") self.investigation_form[5] = [search_tags, omit_tags] self.investigation_form[4] = clue self.investigation_form[0] = self.args self.investigation_form[6] = source_clue super(CmdInvestigate, self).get_target() def get_tags_or_clue_from_args(self): args = self.args.split("/") search_tags = [] omit_tags = [] source_clue = None if args[0].lower().startswith("clue:"): args = args[0].lower() name = args.lstrip("clue:").strip() q_args = Q(characters=self.caller.roster) source_clue = self.get_by_name_or_id(Clue, name, q_args=q_args) return search_tags, omit_tags, source_clue for tag_txt in args: tag = self.get_by_name_or_id(SearchTag, tag_txt.lstrip("-")) if tag_txt.startswith("-"): omit_tags.append(tag) else: search_tags.append(tag) return search_tags, omit_tags, source_clue @property def need_new_clue_written(self): return not bool(self.investigation_form[4]) def func(self): finished = super(CmdInvestigate, self).func() if finished: return caller = self.caller entry = caller.roster dompc = caller.player_ob.Dominion investigation = self.investigation_form if not self.args and not self.switches: if investigation: self.disp_investigation_form() self.list_ongoing_investigations() return if "history" in self.switches: # display history self.list_old_investigations() return if (set(self.switches) & set(self.model_switches)) or not self.switches: try: ob = self.related_manager.get(id=int(self.lhs)) except (TypeError, ValueError): caller.msg("Must give ID of investigation.") return except Investigation.DoesNotExist: caller.msg("Investigation not found.") return if "resume" in self.switches: msg = "To mark an investigation as active, use /active." if ob.ongoing: self.msg("Already ongoing. %s" % msg) return if ob.clue_discoveries.exists(): self.msg("This investigation has found something already. Start another.") return if not self.check_enough_time_left(): return ob.ongoing = True ob.save() caller.msg("Investigation has been marked to be ongoing. %s" % msg) return if "pause" in self.switches: if not ob.active: self.msg("It was already inactive.") return self.caller.player_ob.pay_action_points(-self.ap_cost) ob.active = False ob.save() caller.msg("Investigation is no longer active.") return if "abandon" in self.switches or "stop" in self.switches: ob.ongoing = False if ob.active: self.caller.player_ob.pay_action_points(-self.ap_cost) ob.active = False ob.save() asslist = [] for ass in ob.active_assistants: ass.currently_helping = False ass.save() asslist.append(str(ass.char)) caller.msg("Investigation has been marked to no longer be ongoing nor active.") caller.msg("You can resume it later with /resume.") if asslist: caller.msg("The following assistants have stopped helping: %s" % ", ".join(asslist)) return if "view" in self.switches or not self.switches: caller.msg(ob.display()) return if "active" in self.switches: if ob.active: self.msg("It is already active.") return if not ob.ongoing: self.msg("That investigation is finished.") return try: current_active = entry.investigations.get(active=True) except Investigation.DoesNotExist: current_active = None if caller.assisted_investigations.filter(currently_helping=True): self.msg("You are currently helping an investigation, and must stop first.") return if check_break() and not ob.targeted_clue: self.msg("Investigations that do not target a clue cannot be marked active during the break.") return if not self.check_enough_time_left(): return if current_active: if not current_active.automate_result: caller.msg("You already have an active investigation " + "that has received GMing this week, and cannot be switched.") return if not self.check_ap_cost(): return current_active.active = False current_active.save() else: # check cost if we don't have a currently active investigation if not self.check_ap_cost(): return # can afford it, proceed to turn off assisted investigations and mark active for ass in caller.assisted_investigations.filter(currently_helping=True): ass.currently_helping = False ass.save() self.msg("No longer assisting in %s" % ass.investigation) ob.active = True ob.save() caller.msg("%s set to active." % ob) return if "silver" in self.switches: if not self.check_enough_time_left(): return amt = caller.db.currency or 0.0 try: val = int(self.rhs) amt -= val if amt < 0 or val <= 0: raise ValueError if val % 5000 or (ob.silver + val) > 50000: caller.msg("Silver must be a multiple of 5000, 50000 max.") caller.msg("Current silver: %s" % ob.silver) return except (TypeError, ValueError): caller.msg("You must specify a positive amount that is less than your money on hand.") return caller.pay_money(val) ob.silver += val ob.save() # redo the roll with new difficulty ob.do_roll() caller.msg("You add %s silver to the investigation." % val) return if "actionpoints" in self.switches: if not self.check_enough_time_left(): return if not ob.active: self.msg("The investigation must be marked active to invest AP in it.") return try: val = int(self.rhs) if val <= 0: raise ValueError if val % 5: caller.msg("Action points must be a multiple of 5") caller.msg("Current action points allocated: %s" % ob.action_points) return if not self.check_ap_cost(val): return except (TypeError, ValueError): caller.msg("You must specify a positive amount that you can afford.") return ob.action_points += val ob.save() # redo the roll with new difficulty ob.do_roll() caller.msg("You add %s action points to the investigation." % val) return if "resource" in self.switches or "resources" in self.switches: if not self.check_enough_time_left(): return try: rtype, val = self.rhslist[0].lower(), int(self.rhslist[1]) if val <= 0: raise ValueError oamt = getattr(ob, rtype) if oamt + val > 50: caller.msg("Maximum of 50 per resource. Current value: %s" % oamt) return current = getattr(dompc.assets, rtype) current -= val if current < 0: caller.msg("You do not have enough %s resources." % rtype) return setattr(dompc.assets, rtype, current) dompc.assets.save() except (TypeError, ValueError, IndexError, AttributeError): caller.msg("Invalid syntax.") return oamt += val setattr(ob, rtype, oamt) ob.save() # redo the roll with new difficulty ob.do_roll() caller.msg("You have added %s resources to the investigation." % val) return if "changestory" in self.switches: ob.actions = self.rhs ob.save() caller.msg("The new story of your investigation is:\n%s" % self.rhs) return if "changestat" in self.switches: if self.rhs not in VALID_STATS: self.msg("That is not a valid stat name.") return ob.stat_used = self.rhs ob.save() caller.msg("The new stat is: %s" % self.rhs) return if "changeskill" in self.switches: if self.rhs not in VALID_SKILLS: self.msg("That is not a valid skill name.") return ob.skill_used = self.rhs ob.save() caller.msg("The new skill is: %s" % self.rhs) return if "requesthelp" in self.switches: from typeclasses.characters import Character try: char = Character.objects.get(db_key__iexact=self.rhs, roster__roster__name="Active") except Character.DoesNotExist: self.msg("No active player found by that name.") return if char == caller: self.msg("You cannot invite yourself.") return if char.assisted_investigations.filter(investigation=ob): self.msg("They are already able to assist the investigation.") return current = char.db.investigation_invitations or [] if ob.id in current: self.msg("They already have an invitation to assist this investigation.") return if not (ob.active and ob.ongoing): self.msg("You may only invite others to active investigations.") return self.msg("Asking %s to assist with %s." % (char.key, ob)) current.append(ob.id) char.db.investigation_invitations = current name = caller.key inform_msg = "%s has requested your help in their investigation, ID %s.\n" % (name, ob.id) inform_msg += "To assist them, use the {w@helpinvestigate{n command, creating a " inform_msg += "form with {w@helpinvestigate/new{n, setting the target with " inform_msg += "{w@helpinvestigate/target %s{n, and filling in the other fields." % ob.id inform_msg += "\nThe current actions of their investigation are: %s" % ob.actions char.player_ob.inform(inform_msg, category="Investigation Request From %s" % name, append=False) return caller.msg("Invalid switch.") return class CmdAdminInvestigations(ArxPlayerCommand): """ @gminvestigations Usage: @gminvest @gminvest/view <ID #> @gminvest/target <ID #>=<Clue #> @gminvest/randomtarget <ID #> @gminvest/roll <ID #>[=<roll mod>,<difficulty>] @gminvest/result <ID #>=<result string> @gminvest/cluemessage <ID #>=<message> @gminvest/setprogress <ID #>=<amount> @gminvest/search <character>=<keyword> Checks active investigations, and allows you to override their automatic results. You can /roll to see a result - base difficulty is 50 unless you override it. Specifying a result string will cause that to be returned to them in weekly maintenance, otherwise it'll process the event as normal to find a clue based on the topic. /search is used to search undiscovered clues that match a keyword for a given character to try to find possible matches. """ key = "@gminvest" aliases = ["@gminvestigations"] locks = "cmd:perm(wizards)" help_category = "Investigation" @property def qs(self): return Investigation.objects.filter(active=True, ongoing=True, character__roster__name="Active") def disp_active(self): qs = list(self.qs) if len(qs) <= 20: table = EvTable("ID", "Char", "Topic", "Targeted Clue", "Roll", border="cells", width=78) for ob in qs: roll = ob.get_roll() roll = "{r%s{n" % roll if roll < 1 else "{w%s{n" % roll target = "{rNone{n" if not ob.targeted_clue else str(ob.targeted_clue) character = "{c%s{n" % ob.character table.add_row(ob.id, character, str(ob.topic), target, roll) else: table = PrettyTable(["ID", "Char", "Topic", "Targeted Clue", "Roll"]) for ob in qs: roll = ob.get_roll() roll = "{r%s{n" % roll if roll < 1 else "{w%s{n" % roll target = "{rNone{n" if not ob.targeted_clue else str(ob.targeted_clue)[:30] character = "{c%s{n" % ob.character table.add_row([ob.id, character, str(ob.topic)[:15], target, roll]) self.caller.msg(str(table)) def set_roll(self, ob, roll, mod=0, diff=None): ob.roll = roll ob.save() self.msg("Recording their new roll as: %s." % roll) check = ob.check_success(modifier=mod, diff=diff) if check: self.msg("They will {wsucceed{n the check to discover a clue this week.") else: self.msg("They will {rfail{n the check to discover a clue this week.") def func(self): caller = self.caller if not self.args: self.disp_active() return if "search" in self.switches: player = self.caller.search(self.lhs) if not player: return clue_query = (Q(desc__icontains=self.rhs) | Q(name__icontains=self.rhs) | Q(search_tags__name__icontains=self.rhs)) rev_query = Q(revelations__desc__icontains=self.rhs) | Q(revelations__search_tags__name__icontains=self.rhs) rev_query |= Q(revelations__name__icontains=self.rhs) undisco = (player.roster.undiscovered_clues.filter(allow_investigation=True) .filter(clue_query | rev_query).distinct()) self.msg("Clues that match: %s" % ", ".join("(ID:%s, %s)" % (ob.id, ob) for ob in undisco)) return try: if "view" in self.switches or not self.switches: ob = Investigation.objects.get(id=int(self.args)) caller.msg(ob.gm_display()) return if "randomtarget" in self.switches: ob = Investigation.objects.get(id=int(self.args)) ob.clue_target = None self.msg("%s now targets %s" % (ob, ob.targeted_clue)) return if "target" in self.switches: ob = self.qs.get(id=int(self.lhs)) try: targ = Clue.objects.get(id=int(self.rhs)) except Clue.DoesNotExist: caller.msg("No clue by that ID.") return if targ in ob.character.clues.all(): self.msg("|rThey already have that clue. Aborting.") return ob.setup_investigation_for_clue(targ) # will also handle saving the investigation caller.msg("%s set to %s." % (ob, targ)) return if "roll" in self.switches: mod = 0 diff = None ob = self.qs.get(id=int(self.lhs)) try: mod = int(self.rhslist[0]) diff = int(self.rhslist[1]) except IndexError: pass roll = ob.do_roll(mod=mod, diff=diff) self.set_roll(ob, roll) return if "result" in self.switches: ob = self.qs.get(id=int(self.lhs)) ob.result = self.rhs ob.save() caller.msg("Result is now:\n%s" % ob.result) return if "setprogress" in self.switches: ob = self.qs.get(id=int(self.lhs)) ob.progress = int(self.rhs) ob.save() self.msg("Their progress is now %s, required to complete is %s." % (ob.progress, ob.completion_value)) return except (TypeError, ValueError): caller.msg("Arguments must be numbers.") return except Investigation.DoesNotExist: caller.msg("No Investigation by that ID.") return caller.msg("Invalid switch.") return class CmdListClues(ArxPlayerCommand): """ @clues Usage: @clues @clues <clue #> @clues/share <clue #>[,<clue2 #>...]=<target>[,<target2>...]/<note> @clues/search <text> @clues/addnote <clue #>=[text to append] Displays the clues that your character has discovered in game, or shares them with others. /search returns the clues that contain the text specified. /addnote allows you to add more text to your discovery of the clue. When sharing clues, please roleplay a bit about them first. Don't dump information on people without any context. You must also write a note which is appended to their clue that serves as a record about the scene: please briefly describe the scene in which the clue was shared, or why they were told, or any other contextual notes about it. """ key = "clues" locks = "cmd:all()" aliases = ["clue", "@zoinks", "@jinkies"] help_category = "Investigation" def get_help(self, caller, cmdset): """Custom helpfile that lists clue sharing costs""" if caller.player_ob: caller = caller.player_ob doc = self.__doc__ doc += "\n\nYour cost of sharing clues is %s." % caller.clue_cost return doc @property def clue_discoveries(self): """Clue discovery objects for our caller""" try: return self.caller.roster.clue_discoveries.all() except AttributeError: return ClueDiscovery.objects.none() def func(self): """Executes clues command""" try: if not self.args or "search" in self.switches: return self.disp_clue_table() if "share" in self.switches: return self.share_clues() # get clue for display or sharing try: discovery = self.clue_discoveries.get(clue_id=self.lhs) except (ClueDiscovery.DoesNotExist, ValueError, TypeError): discovery = None if not self.switches and self.caller.check_permstring("builders"): try: discovery = Clue.objects.get(id=self.lhs) except Clue.DoesNotExist: pass if not discovery: self.msg("No clue found by that ID.") self.disp_clue_table() return if not self.switches: self.msg(discovery.display()) return if "addnote" in self.switches: return self.add_note(discovery) self.msg("Invalid switch") except CommandError as err: self.msg(err) def share_clues(self): """Shares clues with others in room""" discoveries_to_share = [] clue_err_msg = "" for arg in self.lhslist: try: discovery = self.clue_discoveries.get(clue_id=arg) except (ClueDiscovery.DoesNotExist, ValueError, TypeError): clue_err_msg += "No clue found by this ID: {w%s{n. " % arg continue if discovery.clue.allow_sharing: discoveries_to_share.append(discovery) else: clue_err_msg += "{w%s{n cannot be shared. " % discovery.clue if clue_err_msg: self.msg(clue_err_msg) if not discoveries_to_share: return if not self.rhs: raise CommandError("Who are you sharing with?") split_result = self.rhs.split("/", 1) try: rhslist, note = split_result[0], split_result[1] except IndexError: raise CommandError("You must provide a note that gives context to the clues you're sharing.") if len(note) < 80: raise CommandError("Please write a longer note that gives context to the clues you're sharing.") rhslist = rhslist.split(",") shared_names = [] targets = [] for arg in rhslist: pc = self.caller.search(arg) if not pc: return if not pc.char_ob.location or self.caller.char_ob.location != pc.char_ob.location: raise CommandError("You can only share clues with someone in the same room. Please don't share " "clues without some RP talking about them.") targets.append(pc) cost = len(targets) * len(discoveries_to_share) * self.caller.clue_cost if not self.caller.pay_action_points(cost): raise CommandError("Sharing the clue(s) with them would cost %s action points." % cost) for targ in targets: for discovery in discoveries_to_share: discovery.share(targ.roster, note=note) shared_names.append(str(targ.roster)) msg = "You have shared the clue(s) '%s' with %s." % (", ".join(str(ob.clue) for ob in discoveries_to_share), ", ".join(shared_names)) if note: msg += "\nYour note: %s" % note self.msg(msg) def disp_clue_table(self): table = PrettyTable(["{wClue #{n", "{wSubject{n", "{wType{n"]) discoveries = self.clue_discoveries.order_by('date') if "search" in self.switches: msg = "{wMatching Clues{n\n" discoveries = discoveries.filter(Q(message__icontains=self.args) | Q(clue__desc__icontains=self.args) | Q(clue__name__icontains=self.args) | Q(clue__search_tags__name__iexact=self.args)).distinct() else: msg = "{wDiscovered Clues{n\n" for discovery in discoveries: table.add_row([discovery.clue.id, discovery.name, discovery.clue.get_clue_type_display()]) msg += str(table) self.msg(msg, options={'box': True}) def add_note(self, discovery): if not self.rhs: self.msg("Must contain a note to add.") return header = "\n[%s] %s wrote: " % (datetime.now().strftime("%x %X"), self.caller.key) discovery.message += header + self.rhs discovery.save() self.msg(discovery.display()) class CmdListRevelations(ArxPlayerCommand): """ @revelations Usage: @revelations @revelations <ID> @revelations/checkmissed The first form of this command will just list all the revelations you know. The second form views a specific revelation. The third form will check if there are any revelations you should know which were missed due to clues being added to revelations later. """ key = "@revelations" locks = "cmd:all()" help_category = "Investigation" def disp_rev_table(self): caller = self.caller table = PrettyTable(["{wRevelation #{n", "{wSubject{n"]) revs = caller.roster.revelations.all() msg = "{wDiscovered Revelations{n\n" for rev in revs: table.add_row([rev.id, rev.name]) msg += str(table) caller.msg(msg, options={'box': True}) def resync_revelations(self): character = self.caller.roster revelations = Revelation.objects.filter(~Q(characters=character)).distinct() discovered = [] for revelation in revelations: if revelation.player_can_discover(character): discovered.append(revelation) date = datetime.now() for revelation in discovered: message = "You had a revelation which had been missed!" RevelationDiscovery.objects.create(character=character, discovery_method="Checked for Missing", message=message, investigation=None, revelation=revelation, date=date) self.msg("You were missing a revelation: %s" % str(revelation)) def func(self): if "checkmissed" in self.switches: self.msg("Checking for missed revelations...") self.resync_revelations() self.msg("Done!") return if not self.args: self.disp_rev_table() return try: rev = self.caller.roster.revelation_discoveries.get(revelation_id=self.args) except (ValueError, TypeError, RevelationDiscovery.DoesNotExist): rev = None if self.caller.check_permstring("builders"): try: rev = Revelation.objects.get(id=self.args) except Revelation.DoesNotExist: pass if not rev: self.msg("No revelation by that number.") self.disp_rev_table() return self.msg(rev.display()) clues = self.caller.roster.clues.filter(revelations=rev.revelation) self.msg("Related Clues: %s" % "; ".join(str(clue) for clue in clues)) class CmdTheories(ArxPlayerCommand): """ @theories Usage: @theories @theories/mine @theories <theory ID #> @theories/share <theory ID #>=<player>[,<player2>,...] @theories/create <topic>=<description> @theories/addclue <theory ID #>=<clue ID #> @theories/rmclue <theory ID #>=<clue ID #> @theories/addrelatedtheory <your theory ID #>=<other's theory ID #> @theories/forget <theory ID #> @theories/editdesc <theory ID #>=<desc> @theories/edittopic <theory ID #>=<topic> @theories/shareall <theory ID #>=<player> @theories/readall <theory ID #> @theories/addeditor <theory ID #>=<player> @theories/rmeditor <theory ID #>=<player> Allows you to create and share theories your character comes up with, and associate them with clues and other theories. You may only create associations for theories that you created. /shareall allows you to also share any clue you know that is related to the theory specify. """ key = "@theories" locks = "cmd:all()" help_category = "Investigation" def display_theories(self): table = EvTable("{wID #{n", "{wTopic{n") if "mine" in self.switches: qs = self.caller.editable_theories.all().order_by('id') else: qs = self.caller.known_theories.all().order_by('id') for theory in qs: table.add_row(theory.id, theory.topic) self.msg(table) def view_theory(self): theories = self.caller.known_theories.all() try: theory = theories.get(id=self.args) except (Theory.DoesNotExist, ValueError, TypeError): self.msg("No theory by that ID.") return self.msg(theory.display()) self.msg("{wRelated Theories{n: %s\n" % ", ".join(str(ob.id) for ob in theory.related_theories.filter(id__in=theories))) disp_clues = theory.related_clues.filter(id__in=self.caller.roster.clues.all()) self.msg("{wRelated Clues:{n %s" % ", ".join(ob.name for ob in disp_clues)) if "readall" in self.switches: for clue in disp_clues: clue_display = "{wName{n: %s\n\n%s\n" % (clue.name, clue.desc) self.msg(clue_display) def func(self): if not self.args: self.display_theories() return if not self.switches or "view" in self.switches or "readall" in self.switches: self.view_theory() return if "search" in self.switches: matches = self.caller.known_theories.filter(Q(topic__icontains=self.args) | Q(desc__icontains=self.args)) self.msg("Matches: %s" % ", ".join("%s (#%s)" % (ob, ob.id) for ob in matches)) return if "create" in self.switches: theory = self.caller.created_theories.create(topic=self.lhs, desc=self.rhs) theory.add_editor(self.caller) self.msg("You have created a new theory.") return if "share" in self.switches or "shareall" in self.switches: try: theory = self.caller.known_theories.get(id=self.lhs) except (Theory.DoesNotExist, ValueError): self.msg("No theory found by that ID.") return targs = [] for arg in self.rhslist: targ = self.caller.search(arg) if not targ: continue targs.append(targ) if not targs: return clue_discoveries = self.caller.roster.clue_discoveries.filter(clue__id__in=theory.related_clues.all()) per_targ_cost = self.caller.clue_cost for targ in targs: if "shareall" in self.switches: cost = len(targs) * len(clue_discoveries) * per_targ_cost if cost > self.caller.roster.action_points: self.msg("That would cost %s action points." % cost) return try: if targ.db.char_ob.location != self.caller.db.char_ob.location: self.msg("You must be in the same room.") continue except AttributeError: self.msg("One of you does not have a character object.") continue for clue in clue_discoveries: if not clue.clue.allow_sharing: self.msg("%s cannot be shared. Skipping." % clue.clue) continue clue.share(targ.roster) self.msg("Shared clue %s with %s" % (clue.name, targ)) self.caller.pay_action_points(cost) if theory in targ.known_theories.all(): self.msg("They already know that theory.") continue theory.share_with(targ) self.msg("Theory %s added to %s." % (self.lhs, targ)) targ.inform("%s has shared a theory with you." % self.caller, category="Theories") return if "delete" in self.switches or "forget" in self.switches: try: theory = self.caller.known_theories.get(id=self.lhs) except (Theory.DoesNotExist, ValueError): self.msg("No theory by that ID.") return theory.forget_by(self.caller) self.msg("Theory forgotten.") if not theory.known_by.all(): # if no one knows about it now theory.delete() return if "addeditor" in self.switches or "rmeditor" in self.switches: try: theory = self.caller.editable_theories.get(id=self.lhs) except (Theory.DoesNotExist, ValueError): self.msg("No theory by that ID.") return player = self.caller.search(self.rhs) if not player: return if not theory.known_by.filter(id=player.id).exists(): self.msg("They do not know the theory yet.") return if "addeditor" in self.switches: theory.add_editor(player) self.msg("%s can now edit the theory." % player) return if "rmeditor" in self.switches: if player == theory.creator: self.msg("%s is the theory's original author, and cannot be removed." % player) else: theory.remove_editor(player) self.msg("%s cannot edit the theory." % player) return try: theory = self.caller.editable_theories.get(id=self.lhs) except (Theory.DoesNotExist, ValueError): self.msg("You cannot edit a theory by that number.") return if "editdesc" in self.switches: theory.desc = self.rhs theory.save() self.msg("New desc is: %s" % theory.desc) for player in theory.known_by.all(): if player == self.caller: continue player.inform("%s has been edited." % theory, category="Theories") return if "edittopic" in self.switches: theory.topic = self.rhs theory.save() self.msg("New topic is: %s" % theory.topic) return if "addrelatedtheory" in self.switches or "rmrelatedtheory" in self.switches: try: other_theory = self.caller.known_theories.get(id=self.rhs) except (Theory.DoesNotExist, ValueError): self.msg("You do not know a theory by that id.") return if "addrelatedtheory" in self.switches: theory.related_theories.add(other_theory) self.msg("Theory added.") else: theory.related_theories.remove(other_theory) self.msg("Theory removed.") return if "addclue" in self.switches or "rmclue" in self.switches: try: clue = self.caller.roster.clues.get(id=self.rhs) except (Clue.DoesNotExist, ValueError, TypeError, AttributeError): self.msg("No clue by that ID.") return if "addclue" in self.switches: theory.related_clues.add(clue) self.msg("Added clue %s to theory." % clue.name) else: theory.related_clues.remove(clue) self.msg("Removed clue %s from theory." % clue.name) return self.msg("Invalid switch.") class ListPlotsMixin(object): """Mixin for commands that use plots""" @property def gm_plots(self): """Plots our caller is gming""" return self.caller.Dominion.plots_we_can_gm @property def gm_revelations(self): """Revelations our caller has written""" return self.caller.roster.revelations_written.all() def list_gm_plots(self): """Lists plots we're gming, and clues and revelations we've created""" plots = self.gm_plots clues = self.caller.roster.clues_written.all() revelations = self.caller.roster.revelations_written.all() def format_list(some_iter): """Helper function for formatting""" return ["%s (#%s)" % (ob, ob.id) for ob in some_iter] msg = "{wPlots GMd:{n %s\n" % list_to_string(format_list(plots)) msg += "{wClues Written:{n %s\n" % list_to_string(format_list(clues)) msg += "{wRevelations Written:{n %s\n" % list_to_string(format_list(revelations)) return msg def get_revelation(self): """Gets a revelation by ID""" try: if self.args.isdigit(): revelation = self.gm_revelations.get(id=self.args) else: revelation = self.gm_revelations.get(name__iexact=self.args) return revelation except (Revelation.DoesNotExist, ValueError, TypeError): raise CommandError("No Revelation by that name or number.\n" + self.list_gm_plots()) class PRPLorecommand(ListPlotsMixin, FormCommandMixin, ArxPlayerCommand): """Base class for commands that make lore for PRPs""" class CmdPRPClue(PRPLorecommand): """ Creates a clue for a PRP you ran Usage: +prpclue +prpclue/create +prpclue/revelation <revelation ID or name> +prpclue/name <clue name> +prpclue/desc <description> +prpclue/rating <investigation difficulty, 1-50> +prpclue/tags <tag 1>,<tag 2>,etc +prpclue/fake +prpclue/noinvestigate +prpclue/noshare +prpclue/finish +prpclue/abandon +prpclue/sendclue <clue ID>=<participant> +prpclue/listclues <revelation ID> Allows a GM to create custom clues for their PRP, and then send it to participants. Tags are the different keywords/phrases that allow it to be matched to an investigate. Setting a clue as fake means that it's false/a hoax. /noinvestigate and /noshare prevent investigating the clue or sharing it, respectively. Once the clue is created, it can be sent to any participant with the /sendclue switch. Clues must have a revelation written that is tied to the plot. See the prprevelation command for details. """ key = "prpclue" help_category = "PRP" locks = "cmd: all()" form_class = ClueCreateForm form_attribute = "clue_creation_form" form_initial_kwargs = (('allow_sharing', True), ('allow_investigation', True), ('red_herring', False)) def func(self): try: if not self.args and not self.switches: self.msg(self.list_gm_plots()) self.display_form() return if "abandon" in self.switches: self.caller.attributes.remove(self.form_attribute) self.msg("Abandoned.") return if "create" in self.switches: return self.create_form() if "listclues" in self.switches: revelation = self.get_revelation() if not revelation: return self.msg("Clues: %s" % ", ".join("%s (#%s)" % (clue, clue.id) for clue in revelation.clues.all())) return if "sendclue" in self.switches: try: clue = Clue.objects.filter(revelations__plots__in=self.gm_plots).distinct().get(id=self.lhs) except (TypeError, ValueError, Clue.DoesNotExist): self.msg("No clue found by that ID.") return targ = self.caller.search(self.rhs) if not targ: return if targ.Dominion not in clue.event.participants.all(): self.msg("Target is not among the participants of that event.") return targ.roster.discover_clue(clue) self.msg("You have sent them a clue.") targ.inform("A new clue has been sent to you about event %s. Use @clues to view it." % clue.event, category="Clue Discovery") return form = self.caller.attributes.get(self.form_attribute) if not form: self.msg("Use /create to start a new form.") return if "finish" in self.switches: return self.submit_form() if "name" in self.switches: form['name'] = self.args if "desc" in self.switches: form['desc'] = self.args if "revelation" in self.switches: revelation = self.get_revelation() if not revelation: return form['revelation'] = revelation.id if "rating" in self.switches: form['rating'] = self.args if "tags" in self.switches: form['tag_names'] = self.args if "fake" in self.switches: form['red_herring'] = not form.get('red_herring') if "noinvestigate" in self.switches: form['allow_investigation'] = not form.get('allow_investigation', True) if "noshare" in self.switches: form['allow_sharing'] = not form.get('allow_sharing', True) self.display_form() except CommandError as err: self.msg(err) class CmdPRPRevelation(PRPLorecommand): """ Creates a revelation for a PRP you are GMing for Usage: +prprevelation +prprevelation/create +prprevelation/name <name> +prprevelation/desc <description> +prprevelation/rating <total value of clues required for discovery> +prprevelation/tags <tag 1>,<tag 2>,etc +prprevelation/plot <plot ID>[=<notes about relationship to plot>] +prprevelation/finish +prprevelation/abandon Allows a GM for a PRP to create lore for PRPs they're running. A Revelation is a summation of significant game lore, while a Clue's a small part of it: either a specific perspective of someone, providing more context/detail on some aspect of it, etc. For example, if you ran a PRP on a haunted castle, the revelation might be 'The Haunted Castle of Foobar'. The Revelation's desc would be a synopsis of the narrative of the entire plot. Clues would be about the history of House Foobar, the structure of the castle, events that caused it to become haunted, etc. Tags are keywords/phrases used specifically for searching/indexing topics in the database. Please use them liberally on anything significant in the revelation to help staff out. For example, you would add a tag for Foobar in the above example, and if the House was destroyed by 'The Bloodcurse', you would add that as a tag as well. """ key = "prprevelation" help_category = "PRP" locks = "cmd: all()" form_class = RevelationCreateForm form_attribute = "revelation_creation_form" form_initial_kwargs = (('red_herring', False),) def func(self): """Executes command""" try: if not self.args and not self.switches: self.msg(self.list_gm_plots()) self.display_form() return if "abandon" in self.switches: self.caller.attributes.remove(self.form_attribute) self.msg("Abandoned.") return if "create" in self.switches: return self.create_form() form = self.caller.attributes.get(self.form_attribute) if not form: self.msg("Use /create to start a new form.") return if "finish" in self.switches: return self.submit_form() if "name" in self.switches: form['name'] = self.args if "desc" in self.switches: form['desc'] = self.args if "plot" in self.switches: try: if self.lhs.isdigit(): plot = self.gm_plots.get(id=self.lhs) else: plot = self.gm_plots.get(name__iexact=self.lhs) except Plot.DoesNotExist: raise CommandError("No plot by that name or number.") form['plot'] = plot.id form['plot_gm_notes'] = self.rhs if "tags" in self.switches: form['tag_names'] = self.args if "fake" in self.switches: form['red_herring'] = not form.get('red_herring') if "rating" in self.switches: form['required_clue_value'] = self.args self.display_form() except CommandError as err: self.msg(err)
import packaging_scripts.pacman as pacman import re import unittest from hypothesis import given from hypothesis.strategies import iterables, text from pathlib import Path from types import GeneratorType from unittest.mock import MagicMock, patch # ========== Constants ========== TESTING_MODULE = f"packaging_scripts.pacman" # ========== Unit Tests ========== # class TestParseConfigFile(unittest.TestCase): # def setUp(self): # self.pat # # def tearDown(self): # # # class TestListConfiguredRepos(unittest.TestCase): # def setUp(self): # # def test_removes_one_element(self):
# -*- coding: utf-8 -*- """ Script for adding categories. These categories are intended for use when working with front-end tasks. This script is still WIP. How to run: python generate_test_categories.py <db_username> <db_password> """ import sys import ckan.model as model from sqlalchemy import create_engine def _generate_image_urls(): base_url = u'https://www.betaavoindata.fi/data/uploads/group' return [ u'{}/2019-09-14-125225.580409Ikonit-AvoinData-Regions.svg'.format(base_url), u'{}/2019-07-01-102212.846599energia.svg'.format(base_url), u'{}/2019-09-14-125054.290257Ikonit-AvoinData-Government.svg'.format(base_url), u'{}/2019-07-01-102343.243986Kansainvalistyminen.svg'.format(base_url), u'{}/2019-09-14-124019.900929Kulttuuri-liikunta-ulkoilu-ja-matkailu.svg'.format(base_url), u'{}/2019-09-14-124758.525007kulttuurijavapaaaika.svg'.format(base_url), u'{}/2019-09-14-123833.880147Ikonit--Kuvitukset--Juna.svg'.format(base_url), u'{}/2019-07-01-102740.5558642018-07-13-104602.335155Ikonit-AvoinData-Agriculture.svg'.format(base_url), u'{}/2019-09-14-124219.193737Liikennejamatkailu.svg'.format(base_url), u'{}/2019-07-01-103050.1219632018-03-19-090141.983568hallintojapaatoksenteko.svg'.format(base_url), u'{}/2019-09-14-124612.287327Asuminen-ja-muttaminen.svg'.format(base_url), u'{}/2018-03-22-162901.594613talous.svg'.format(base_url), u'{}/2018-03-22-162934.287485terveysjasosiaalipalvelut.svg'.format(base_url), u'{}/2019-09-14-124655.983665Yritystoiminnan-aloittaminen.svg'.format(base_url), u'{}/2019-09-14-123926.932875Parisuhde-ja-perhe.svg'.format(base_url), u'{}/2018-03-22-163027.730610ymparisto.svg'.format(base_url), ] def _generate_category_data(): image_urls = _generate_image_urls() categories = [] for i in range(len(image_urls)): categories.append( { 'name': u'cat{}'.format(i+1), 'title': u'Category{}'.format(i+1), 'image_url': image_urls[i] } ) return categories def _create_group_models(): data = _generate_category_data() return [ model.Group( name=x.get('name'), title=x.get('title'), image_url=x.get('image_url'), ) for x in data ] def create_categories(db_username, db_password): engine = create_engine('postgresql://{}:{}@localhost/ckan_default'.format(db_username, db_password)) session = model.meta.Session session.bind = engine groups = _create_group_models() session.bulk_save_objects(groups) session.commit() if __name__ == '__main__': db_username = sys.argv[1] db_password = sys.argv[2] create_categories(db_username, db_password) print('Done...')
# -*- coding: utf-8 -*- from setuptools import setup, find_packages DESCRIPTION = "A SlugField for MongoEngine." try: LONG_DESCRIPTION = open('README.md').read() except: LONG_DESCRIPTION = DESCRIPTION setup(name='mongoengine-slugfield', version='0.0.1', packages=find_packages(), author='Malthe Jørgensen', author_email='malthe.jorgensen@gmail.com', url='https://github.com/peergradeio/mongoengine-slugfield', license='Public Domain', include_package_data=True, description=DESCRIPTION, long_description=LONG_DESCRIPTION, platforms=['any'], classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'License :: Public Domain', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Database', 'Topic :: Software Development :: Libraries :: Python Modules', ], install_requires=['mongoengine', 'blinker', 'awesome-slugify'], test_suite='tests', )
import json import requests from apmserver import ElasticTest, ServerBaseTest, integration_test @integration_test class LoggingIntegrationTest(ElasticTest): config_overrides = { "logging_json": "true", } def test_log_valid_event(self): with open(self.get_transaction_payload_path()) as f: r = requests.post(self.intake_url, data=f, headers={'content-type': 'application/x-ndjson'}) assert r.status_code == 202, r.status_code intake_request_logs = list(self.logged_requests()) assert len(intake_request_logs) == 1, "multiple requests found" req = intake_request_logs[0] self.assertDictContainsSubset({ "level": "info", "message": "request accepted", "response_code": 202, }, req) def test_log_invalid_event(self): with open(self.get_payload_path("invalid-event.ndjson")) as f: r = requests.post(self.intake_url, data=f, headers={'content-type': 'application/x-ndjson'}) assert r.status_code == 400, r.status_code intake_request_logs = list(self.logged_requests()) assert len(intake_request_logs) == 1, "multiple requests found" req = intake_request_logs[0] self.assertDictContainsSubset({ "level": "error", "message": "data validation error", "response_code": 400, }, req) error = req.get("error") assert error.startswith("failed to validate transaction: error validating JSON:"), json.dumps(req) @integration_test class LoggingIntegrationEventSizeTest(ElasticTest): config_overrides = { "logging_json": "true", "max_event_size": "100", } def test_log_event_size_exceeded(self): with open(self.get_transaction_payload_path()) as f: r = requests.post(self.intake_url, data=f, headers={'content-type': 'application/x-ndjson'}) assert r.status_code == 400, r.status_code intake_request_logs = list(self.logged_requests()) assert len(intake_request_logs) == 1, "multiple requests found" req = intake_request_logs[0] self.assertDictContainsSubset({ "level": "error", "message": "request body too large", "response_code": 400, }, req) error = req.get("error") assert error.startswith("event exceeded the permitted size."), json.dumps(req) @integration_test class LoggingIntegrationTraceCorrelationTest(ElasticTest): config_overrides = { "logging_json": "true", "instrumentation_enabled": "true", } def test_trace_ids(self): with open(self.get_transaction_payload_path()) as f: r = requests.post(self.intake_url, data=f, headers={'content-type': 'application/x-ndjson'}) assert r.status_code == 202, r.status_code intake_request_logs = list(self.logged_requests()) assert len(intake_request_logs) == 1, "multiple requests found" req = intake_request_logs[0] self.assertIn("trace.id", req) self.assertIn("transaction.id", req) self.assertEqual(req["transaction.id"], req["request_id"]) class LoggingToEnvContainer(ServerBaseTest): def start_args(self): return {"extra_args": ["-environment", "container"]} def test_startup(self): # we only need to check that the server can start up self.wait_until_started() class LoggingToEnvSystemd(ServerBaseTest): def start_args(self): return {"extra_args": ["-environment", "systemd"]} def test_startup(self): # we only need to check that the server can start up self.wait_until_started() class LoggingToEnvMacOS(ServerBaseTest): def start_args(self): return {"extra_args": ["-environment", "macos_service"]} def test_startup(self): # we only need to check that the server can start up self.wait_until_started() class LoggingToEnvWindows(ServerBaseTest): def start_args(self): return {"extra_args": ["-environment", "windows_service"]} def test_startup(self): # we only need to check that the server can start up self.wait_until_started()
# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config from tempest.lib.common.utils import test_utils from tempest.lib import decorators from tempest.lib import exceptions from congress_tempest_tests.tests.scenario import manager_congress CONF = config.CONF class TestCeilometerDriver(manager_congress.ScenarioPolicyBase): @classmethod def skip_checks(cls): super(TestCeilometerDriver, cls).skip_checks() if not getattr(CONF.service_available, 'ceilometer', False): msg = ("%s skipped as ceilometer is not available" % cls.__class__.__name__) raise cls.skipException(msg) def setUp(cls): super(TestCeilometerDriver, cls).setUp() cls.telemetry_client = cls.admin_manager.telemetry_client cls.datasource_id = manager_congress.get_datasource_id( cls.admin_manager.congress_client, 'ceilometer') @decorators.attr(type='smoke') def test_ceilometer_meters_table(self): meter_schema = ( self.admin_manager.congress_client.show_datasource_table_schema( self.datasource_id, 'meters')['columns']) meter_id_col = next(i for i, c in enumerate(meter_schema) if c['name'] == 'meter_id') def _check_data_table_ceilometer_meters(): # Fetch data from ceilometer each time, because this test may start # before ceilometer has all the users. meters = self.telemetry_client.list_meters() meter_map = {} for meter in meters: meter_map[meter['meter_id']] = meter results = ( self.admin_manager.congress_client.list_datasource_rows( self.datasource_id, 'meters')) for row in results['results']: try: meter_row = meter_map[row['data'][meter_id_col]] except KeyError: return False for index in range(len(meter_schema)): if (str(row['data'][index]) != str(meter_row[meter_schema[index]['name']])): return False return True if not test_utils.call_until_true( func=_check_data_table_ceilometer_meters, duration=100, sleep_for=5): raise exceptions.TimeoutException("Data did not converge in time " "or failure in server") @decorators.attr(type='smoke') def test_update_no_error(self): if not test_utils.call_until_true( func=lambda: self.check_datasource_no_error('ceilometer'), duration=30, sleep_for=5): raise exceptions.TimeoutException('Datasource could not poll ' 'without error.')
from time import time from scale.network.interface import Interface # Node is considered to be active if it was seen in the last 5 seconds NETWORK_ALIVE_TIMEOUT = 5 class Node: def __init__(self, hostname: str) -> None: self.hostname = hostname self.public_key: str = None self.interfaces: list[Interface] = [] self.last_seen: float = None self.is_local: bool = False def add_interface(self, interface: Interface) -> None: if (interface.ip != "" and interface.ip != "127.0.0.1" and interface.ip != "::1" and interface.ip != "0.0.0.0" and interface.name != "lo"): self.interfaces.append(interface) def set_public_key(self, public_key: str) -> None: self.public_key = public_key def is_alive(self) -> bool: if self.is_local: return True if self.last_seen is None: return False return (time() - self.last_seen) < NETWORK_ALIVE_TIMEOUT def is_dead(self) -> bool: return not self.is_alive() def to_dict(self) -> dict: return { "hostname": self.hostname, "public_key": self.public_key, "interfaces": [interface.to_dict() for interface in self.interfaces], } def from_dict(d: dict): node = Node(d['hostname']) node.set_public_key(d['public_key']) for interface_dict in d['interfaces']: node.add_interface(Interface.from_dict(interface_dict)) return node
#!/usr/bin/env python # encoding: utf-8 ################################################################################ # # RMG - Reaction Mechanism Generator # # Copyright (c) 2009-2011 by the RMG Team (rmg_dev@mit.edu) # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the 'Software'), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # ################################################################################ """ This module contains methods for generation of resonance structures of molecules. The main function to generate all relevant resonance structures for a given Molecule object is ``generateResonanceStructures``. It calls the necessary functions for generating each type of resonance structure. Currently supported resonance types: - All species: - ``generateAdjacentResonanceStructures``: single radical shift with double or triple bond - ``generateLonePairRadicalResonanceStructures``: single radical shift with lone pair - ``generateN5dd_N5tsResonanceStructures``: shift between nitrogen with two double bonds and single + triple bond - Aromatic species only: - ``generateAromaticResonanceStructures``: fully delocalized structure, where all aromatic rings have benzene bonds - ``generateKekuleStructure``: generate a single Kekule structure for an aromatic compound (single/double bond form) - ``generateOppositeKekuleStructure``: for monocyclic aromatic species, rotate the double bond assignment - ``generateClarStructures``: generate all structures with the maximum number of pi-sextet assignments """ import cython import rmgpy.molecule.generator as generator import rmgpy.molecule.parser as parser from .graph import Vertex, Edge, Graph, getVertexConnectivityValue from .molecule import Atom, Bond, Molecule from .atomtype import AtomTypeError import rmgpy.molecule.pathfinder as pathfinder def populateResonanceAlgorithms(features=None): """ Generate list of resonance structure algorithms relevant to the current molecule. Takes a dictionary of features generated by analyzeMolecule(). Returns a list of resonance algorithms. """ cython.declare(methodList=list) methodList = [] if features is None: methodList = [ generateAdjacentResonanceStructures, generateLonePairRadicalResonanceStructures, generateN5dd_N5tsResonanceStructures, generateAromaticResonanceStructures, generateKekuleStructure, generateOppositeKekuleStructure, generateClarStructures, ] else: if features['isAromatic']: methodList.append(generateAromaticResonanceStructures) methodList.append(generateKekuleStructure) if features['isPolycyclicAromatic']: methodList.append(generateClarStructures) else: methodList.append(generateOppositeKekuleStructure) if features['isRadical'] and not features['isArylRadical']: methodList.append(generateAdjacentResonanceStructures) if features['hasNitrogen']: methodList.append(generateN5dd_N5tsResonanceStructures) if features['hasLonePairs']: methodList.append(generateLonePairRadicalResonanceStructures) return methodList def analyzeMolecule(mol): """ Identify key features of molecule important for resonance structure generation. Returns a dictionary of features. """ cython.declare(features=dict) features = {'isRadical': mol.isRadical(), 'isCyclic': mol.isCyclic(), 'isAromatic': False, 'isPolycyclicAromatic': False, 'isArylRadical': False, 'hasNitrogen': False, 'hasOxygen': False, 'hasLonePairs': False, } if features['isCyclic']: ASSSR = mol.getAromaticSSSR()[0] if len(ASSSR) > 0: features['isAromatic'] = True if len(ASSSR) > 1: features['isPolycyclicAromatic'] = True if features['isRadical'] and features['isAromatic']: features['isArylRadical'] = mol.isArylRadical(ASSSR) for atom in mol.vertices: if atom.isNitrogen(): features['hasNitrogen'] = True if atom.isOxygen(): features['hasOxygen'] = True if atom.lonePairs > 0: features['hasLonePairs'] = True return features def generateResonanceStructures(mol): """ Generate and return all of the resonance structures for the input molecule. Most of the complexity of this method goes into handling aromatic species, particularly to generate an accurate set of resonance structures that is consistent regardless of the input structure. The following considerations are made: 1. False positives from RDKit aromaticity detection can occur if a molecule has exocyclic double bonds 2. False negatives from RDKit aromaticity detection can occur if a radical is delocalized into an aromatic ring 3. sp2 hybridized radicals in the plane of an aromatic ring do not participate in hyperconjugation 4. Non-aromatic resonance structures of PAHs are not important resonance contributors (assumption) Aromatic species are broken into the following categories for resonance treatment: - Radical polycyclic aromatic species: Kekule structures are generated in order to generate adjacent resonance structures. The resulting structures are then used for Clar structure generation. After all three steps, any non-aromatic structures are removed, under the assumption that they are not important resonance contributors. - Radical monocyclic aromatic species: Kekule structures are generated along with adjacent resonance structures. All are kept regardless of aromaticity because the radical is more likely to delocalize into the ring. - Stable polycyclic aromatic species: Clar structures are generated - Stable monocyclic aromatic species: Kekule structures are generated """ cython.declare(molList=list, newMolList=list, features=dict, methodList=list) molList = [mol] # Analyze molecule features = analyzeMolecule(mol) # Use generateAromaticResonanceStructures to check for false positives and negatives if features['isAromatic'] or (features['isCyclic'] and features['isRadical'] and not features['isArylRadical']): newMolList = generateAromaticResonanceStructures(mol, features) if len(newMolList) == 0: # Encountered false positive, ie. the molecule is not actually aromatic features['isAromatic'] = False features['isPolycyclicAromatic'] = False else: newMolList = [] if len(newMolList) > 0: if features['isRadical'] and not features['isArylRadical']: if features['isPolycyclicAromatic']: _generateResonanceStructures(newMolList, [generateKekuleStructure]) _generateResonanceStructures(newMolList, [generateAdjacentResonanceStructures]) _generateResonanceStructures(newMolList, [generateClarStructures]) # Remove non-aromatic structures under the assumption that they aren't important resonance contributors newMolList = [m for m in newMolList if m.isAromatic()] else: _generateResonanceStructures(newMolList, [generateKekuleStructure, generateOppositeKekuleStructure]) _generateResonanceStructures(newMolList, [generateAdjacentResonanceStructures]) elif features['isPolycyclicAromatic']: _generateResonanceStructures(newMolList, [generateClarStructures]) else: # The molecule is an aryl radical or stable mono-ring aromatic # In this case, generate the kekulized form _generateResonanceStructures(newMolList, [generateKekuleStructure, generateOppositeKekuleStructure]) # Check for isomorphism against the original molecule for newMol in newMolList: if mol.isIsomorphic(newMol): # There will be at most one isomorphic molecule, since the new molecules have # already been checked against each other, so we can break after removing it newMolList.remove(newMol) break # Add the newly generated structures to the original list # This is not optimal, but is a temporary measure to ensure compatability until other issues are fixed molList.extend(newMolList) else: methodList = populateResonanceAlgorithms(features) _generateResonanceStructures(molList, methodList) return molList def _generateResonanceStructures(molList, methodList, copy=False): """ Iteratively generate all resonance structures for a list of starting molecules using the specified methods. Args: molList starting list of molecules methodList list of resonance structure algorithms copy if False, append new resonance structures to input list (default) if True, make a new list with all of the resonance structures """ cython.declare(index=cython.int, molecule=Molecule, newMolList=list, newMol=Molecule, mol=Molecule) if copy: # Make a copy of the list so we don't modify the input list molList = molList[:] # Iterate over resonance isomers index = 0 while index < len(molList): molecule = molList[index] newMolList = [] for method in methodList: newMolList.extend(method(molecule)) for newMol in newMolList: # Append to isomer list if unique for mol in molList: if mol.isIsomorphic(newMol): break else: molList.append(newMol) # Move to next resonance isomer index += 1 return molList def generateAdjacentResonanceStructures(mol): """ Generate all of the resonance structures formed by one allyl radical shift. Biradicals on a single atom are not supported. """ cython.declare(isomers=list, paths=list, index=cython.int, isomer=Molecule) cython.declare(atom=Atom, atom1=Atom, atom2=Atom, atom3=Atom, bond12=Bond, bond23=Bond) cython.declare(v1=Vertex, v2=Vertex) isomers = [] # Radicals if mol.isRadical(): # Iterate over radicals in structure for atom in mol.vertices: paths = pathfinder.findAllDelocalizationPaths(atom) for atom1, atom2, atom3, bond12, bond23 in paths: # Adjust to (potentially) new resonance isomer atom1.decrementRadical() atom3.incrementRadical() bond12.incrementOrder() bond23.decrementOrder() # Make a copy of isomer isomer = mol.copy(deep=True) # Also copy the connectivity values, since they are the same # for all resonance forms for index in range(len(mol.vertices)): v1 = mol.vertices[index] v2 = isomer.vertices[index] v2.connectivity1 = v1.connectivity1 v2.connectivity2 = v1.connectivity2 v2.connectivity3 = v1.connectivity3 v2.sortingLabel = v1.sortingLabel # Restore current isomer atom1.incrementRadical() atom3.decrementRadical() bond12.decrementOrder() bond23.incrementOrder() # Append to isomer list if unique isomer.updateAtomTypes(logSpecies=False) isomers.append(isomer) return isomers def generateLonePairRadicalResonanceStructures(mol): """ Generate all of the resonance structures formed by lone electron pair - radical shifts. """ cython.declare(isomers=list, paths=list, index=cython.int, isomer=Molecule) cython.declare(atom=Atom, atom1=Atom, atom2=Atom) cython.declare(v1=Vertex, v2=Vertex) isomers = [] # Radicals if mol.isRadical(): # Iterate over radicals in structure for atom in mol.vertices: paths = pathfinder.findAllDelocalizationPathsLonePairRadical(atom) for atom1, atom2 in paths: # Adjust to (potentially) new resonance isomer atom1.decrementRadical() atom1.incrementLonePairs() atom1.updateCharge() atom2.incrementRadical() atom2.decrementLonePairs() atom2.updateCharge() # Make a copy of isomer isomer = mol.copy(deep=True) # Also copy the connectivity values, since they are the same # for all resonance forms for index in range(len(mol.vertices)): v1 = mol.vertices[index] v2 = isomer.vertices[index] v2.connectivity1 = v1.connectivity1 v2.connectivity2 = v1.connectivity2 v2.connectivity3 = v1.connectivity3 v2.sortingLabel = v1.sortingLabel # Restore current isomer atom1.incrementRadical() atom1.decrementLonePairs() atom1.updateCharge() atom2.decrementRadical() atom2.incrementLonePairs() atom2.updateCharge() # Append to isomer list if unique isomer.updateAtomTypes(logSpecies=False) isomers.append(isomer) return isomers def generateN5dd_N5tsResonanceStructures(mol): """ Generate all of the resonance structures formed by shifts between N5dd and N5ts. """ cython.declare(isomers=list, paths=list, index=cython.int, isomer=Molecule) cython.declare(atom=Atom, atom1=Atom, atom2=Atom, atom3=Atom) cython.declare(bond12=Bond, bond13=Bond) cython.declare(v1=Vertex, v2=Vertex) isomers = [] # Iterate over nitrogen atoms in structure for atom in mol.vertices: paths = pathfinder.findAllDelocalizationPathsN5dd_N5ts(atom) for atom1, atom2, atom3, bond12, bond13, direction in paths: # from N5dd to N5ts if direction == 1: # Adjust to (potentially) new resonance isomer bond12.decrementOrder() bond13.incrementOrder() atom2.incrementLonePairs() atom3.decrementLonePairs() atom1.updateCharge() atom2.updateCharge() atom3.updateCharge() # Make a copy of isomer isomer = mol.copy(deep=True) # Also copy the connectivity values, since they are the same # for all resonance forms for index in range(len(mol.vertices)): v1 = mol.vertices[index] v2 = isomer.vertices[index] v2.connectivity1 = v1.connectivity1 v2.connectivity2 = v1.connectivity2 v2.connectivity3 = v1.connectivity3 v2.sortingLabel = v1.sortingLabel # Restore current isomer bond12.incrementOrder() bond13.decrementOrder() atom2.decrementLonePairs() atom3.incrementLonePairs() atom1.updateCharge() atom2.updateCharge() atom3.updateCharge() # Append to isomer list if unique isomer.updateAtomTypes(logSpecies=False) isomers.append(isomer) # from N5ts to N5dd if direction == 2: # Adjust to (potentially) new resonance isomer bond12.decrementOrder() bond13.incrementOrder() atom2.incrementLonePairs() atom3.decrementLonePairs() atom1.updateCharge() atom2.updateCharge() atom3.updateCharge() # Make a copy of isomer isomer = mol.copy(deep=True) # Also copy the connectivity values, since they are the same # for all resonance forms for index in range(len(mol.vertices)): v1 = mol.vertices[index] v2 = isomer.vertices[index] v2.connectivity1 = v1.connectivity1 v2.connectivity2 = v1.connectivity2 v2.connectivity3 = v1.connectivity3 v2.sortingLabel = v1.sortingLabel # Restore current isomer bond12.incrementOrder() bond13.decrementOrder() atom2.decrementLonePairs() atom3.incrementLonePairs() atom1.updateCharge() atom2.updateCharge() atom3.updateCharge() # Append to isomer list if unique isomer.updateAtomTypes(logSpecies=False) isomers.append(isomer) return isomers def generateAromaticResonanceStructures(mol, features=None): """ Generate the aromatic form of the molecule. For radicals, generates the form with the most aromatic rings. Returns result as a list. In most cases, only one structure will be returned. In certain cases where multiple forms have the same number of aromatic rings, multiple structures will be returned. If there's an error (eg. in RDKit) it just returns an empty list. """ cython.declare(molecule=Molecule, SSSR=list, rings=list, aromaticBonds=list, kekuleList=list, maxNum=cython.int, molList=list, newMolList=list, ring=list, bond=Bond) if features is None: features = analyzeMolecule(mol) if not features['isCyclic']: return [] molecule = mol.copy(deep=True) # First get all rings in the molecule SSSR = molecule.getSmallestSetOfSmallestRings() rings = [ring0 for ring0 in SSSR if len(ring0) == 6] # Then determine which ones are aromatic aromaticBonds = molecule.getAromaticSSSR(SSSR)[1] # If the species is a radical and the number of aromatic rings is less than the number of total rings, # then there is a chance that the radical can be shifted to a location that increases the number of aromatic rings. if (features['isRadical'] and not features['isArylRadical']) and (len(aromaticBonds) < len(rings)): if molecule.isAromatic(): kekuleList = generateKekuleStructure(molecule) else: kekuleList = [molecule] _generateResonanceStructures(kekuleList, [generateAdjacentResonanceStructures]) maxNum = 0 molList = [] # Iterate through the adjacent resonance structures and keep the structures with the most aromatic rings for mol0 in kekuleList: aromaticBonds = mol0.getAromaticSSSR()[1] if len(aromaticBonds) > maxNum: maxNum = len(aromaticBonds) molList = [(mol0, aromaticBonds)] elif len(aromaticBonds) == maxNum: molList.append((mol0, aromaticBonds)) else: # Otherwise, it is not possible to increase the number of aromatic rings by moving electrons, # so go ahead with the inputted form of the molecule molList = [(molecule, aromaticBonds)] newMolList = [] # Generate the aromatic resonance structure(s) for mol0, aromaticBonds in molList: if not aromaticBonds: continue for ring in aromaticBonds: for bond in ring: bond.order = 1.5 try: mol0.updateAtomTypes(logSpecies=False) except AtomTypeError: continue for mol1 in newMolList: if mol1.isIsomorphic(mol0): break else: newMolList.append(mol0) return newMolList def generateKekuleStructure(mol): """ Generate a kekulized (single-double bond) form of the molecule. The specific arrangement of double bonds is non-deterministic, and depends on RDKit. Returns a single Kekule structure as an element of a list of length 1. If there's an error (eg. in RDKit) then it just returns an empty list. """ cython.declare(atom=Atom) for atom in mol.atoms: if atom.atomType.label == 'Cb' or atom.atomType.label == 'Cbf': break else: return [] try: rdkitmol = generator.toRDKitMol(mol) # This perceives aromaticity isomer = parser.fromRDKitMol(Molecule(), rdkitmol) # This step Kekulizes the molecule except ValueError: return [] isomer.updateAtomTypes(logSpecies=False) return [isomer] def generateOppositeKekuleStructure(mol): """ Generate the Kekule structure with opposite single/double bond arrangement for single ring aromatics. Returns a single Kekule structure as an element of a list of length 1. """ # This won't work with the aromatic form of the molecule if mol.isAromatic(): return [] molecule = mol.copy(deep=True) aromaticBonds = molecule.getAromaticSSSR()[1] # We can only do this for single ring aromatics for now if len(aromaticBonds) != 1: return [] numS = 0 numD = 0 for bond in aromaticBonds[0]: if bond.isSingle(): numS += 1 bond.order = 2 elif bond.isDouble(): numD += 1 bond.order = 1 else: # Something is wrong: there is a bond that is not single or double return [] if numS != 3 or numD != 3: return [] try: molecule.updateAtomTypes() except AtomTypeError: return [] else: return [molecule] def generateIsomorphicResonanceStructures(mol): """ Select the resonance isomer that is isomorphic to the parameter isomer, with the lowest unpaired electrons descriptor. We generate over all resonance isomers (non-isomorphic as well as isomorphic) and retain isomorphic isomers. WIP: do not generate aromatic resonance isomers. """ cython.declare(isomorphic_isomers=list,\ isomers=list, ) cython.declare(isomer=Molecule,\ newIsomer=Molecule,\ isom=Molecule ) cython.declare(index=int) isomorphic_isomers = [mol]# resonance isomers that are isomorphic to the parameter isomer. isomers = [mol] # Iterate over resonance isomers index = 0 while index < len(isomers): isomer = isomers[index] newIsomers = [] for algo in populateResonanceAlgorithms(): newIsomers.extend(algo(isomer)) for newIsomer in newIsomers: # Append to isomer list if unique for isom in isomers: if isom.copy(deep=True).isIsomorphic(newIsomer.copy(deep=True)): isomorphic_isomers.append(newIsomer) break else: isomers.append(newIsomer) # Move to next resonance isomer index += 1 return isomorphic_isomers def generateClarStructures(mol): """ Generate Clar structures for a given molecule. Returns a list of :class:`Molecule` objects corresponding to the Clar structures. """ cython.declare(output=list, molList=list, newmol=Molecule, asssr=list, bonds=list, solution=list, y=list, x=list, index=cython.int, bond=Bond, ring=list) if not mol.isCyclic(): return [] output = _clarOptimization(mol) molList = [] for newmol, asssr, bonds, solution in output: # The solution includes a part corresponding to rings, y, and a part corresponding to bonds, x, using # nomenclature from the paper. In y, 1 means the ring as a sextet, 0 means it does not. # In x, 1 corresponds to a double bond, 0 either means a single bond or the bond is part of a sextet. y = solution[0:len(asssr)] x = solution[len(asssr):] # Apply results to molecule - double bond locations first for index, bond in enumerate(bonds): if x[index] == 0: bond.order = 1 # single elif x[index] == 1: bond.order = 2 # double else: raise ValueError('Unaccepted bond value {0} obtained from optimization.'.format(x[index])) # Then apply locations of aromatic sextets by converting to benzene bonds for index, ring in enumerate(asssr): if y[index] == 1: _clarTransformation(newmol, ring) try: newmol.updateAtomTypes() except AtomTypeError: pass else: molList.append(newmol) return molList def _clarOptimization(mol, constraints=None, maxNum=None): """ Implements linear programming algorithm for finding Clar structures. This algorithm maximizes the number of Clar sextets within the constraints of molecular geometry and atom valency. Returns a list of valid Clar solutions in the form of a tuple, with the following entries: [0] Molecule object [1] List of aromatic rings [2] List of bonds [3] Optimization solution The optimization solution is a list of boolean values with sextet assignments followed by double bond assignments, with indices corresponding to the list of aromatic rings and list of bonds, respectively. Method adapted from: Hansen, P.; Zheng, M. The Clar Number of a Benzenoid Hydrocarbon and Linear Programming. J. Math. Chem. 1994, 15 (1), 93–107. """ cython.declare(molecule=Molecule, asssr=list, exo=list, l=cython.int, m=cython.int, n=cython.int, a=list, objective=list, status=cython.int, solution=list, innerSolutions=list) from lpsolve55 import lpsolve import signal # Save the current signal handler sig = signal.getsignal(signal.SIGINT) # Make a copy of the molecule so we don't destroy the original molecule = mol.copy(deep=True) asssr = molecule.getAromaticSSSR()[0] if not asssr: return [] # Get list of atoms that are in rings atoms = set() for ring in asssr: atoms.update(ring) atoms = list(atoms) # Get list of bonds involving the ring atoms, ignoring bonds to hydrogen bonds = set() for atom in atoms: bonds.update([atom.bonds[key] for key in atom.bonds.keys() if key.isNonHydrogen()]) bonds = list(bonds) # Identify exocyclic bonds, and save their bond orders exo = [] for bond in bonds: if bond.atom1 not in atoms or bond.atom2 not in atoms: if bond.isDouble(): exo.append(1) else: exo.append(0) else: exo.append(None) # Dimensions l = len(asssr) m = len(atoms) n = l + len(bonds) # Connectivity matrix which indicates which rings and bonds each atom is in # Part of equality constraint Ax=b a = [] for atom in atoms: inRing = [1 if atom in ring else 0 for ring in asssr] inBond = [1 if atom in [bond.atom1, bond.atom2] else 0 for bond in bonds] a.append(inRing + inBond) # Objective vector for optimization: sextets have a weight of 1, double bonds have a weight of 0 objective = [1] * l + [0] * len(bonds) # Solve LP problem using lpsolve lp = lpsolve('make_lp', m, n) # initialize lp with constraint matrix with m rows and n columns lpsolve('set_verbose', lp, 2) # reduce messages from lpsolve lpsolve('set_obj_fn', lp, objective) # set objective function lpsolve('set_maxim', lp) # set solver to maximize objective lpsolve('set_mat', lp, a) # set left hand side to constraint matrix lpsolve('set_rh_vec', lp, [1] * m) # set right hand side to 1 for all constraints lpsolve('set_constr_type', lp, ['='] * m) # set all constraints as equality constraints lpsolve('set_binary', lp, [True] * n) # set all variables to be binary # Constrain values of exocyclic bonds, since we don't want to modify them for i in range(l, n): if exo[i - l] is not None: # NOTE: lpsolve indexes from 1, so the variable we're changing should be i + 1 lpsolve('set_bounds', lp, i + 1, exo[i - l], exo[i - l]) # Add constraints to problem if provided if constraints is not None: for constraint in constraints: lpsolve('add_constraint', lp, constraint[0], '<=', constraint[1]) status = lpsolve('solve', lp) objVal, solution = lpsolve('get_solution', lp)[0:2] lpsolve('delete_lp', lp) # Delete the LP problem to clear up memory # Reset signal handling since lpsolve changed it signal.signal(signal.SIGINT, sig) # Check that optimization was successful if status != 0: raise ILPSolutionError('Optimization could not find a valid solution.') # Check that we the result contains at least one aromatic sextet if objVal == 0: return [] # Check that the solution contains the maximum number of sextets possible if maxNum is None: maxNum = objVal # This is the first solution, so the result should be an upper limit elif objVal < maxNum: raise ILPSolutionError('Optimization obtained a sub-optimal solution.') if any([x != 1 and x != 0 for x in solution]): raise ILPSolutionError('Optimization obtained a non-integer solution.') # Generate constraints based on the solution obtained y = solution[0:l] new_a = y + [0] * len(bonds) new_b = sum(y) - 1 if constraints is not None: constraints.append((new_a, new_b)) else: constraints = [(new_a, new_b)] # Run optimization with additional constraints try: innerSolutions = _clarOptimization(mol, constraints=constraints, maxNum=maxNum) except ILPSolutionError: innerSolutions = [] return innerSolutions + [(molecule, asssr, bonds, solution)] def _clarTransformation(mol, aromaticRing): """ Performs Clar transformation for given ring in a molecule, ie. conversion to aromatic sextet. Args: mol a :class:`Molecule` object aromaticRing a list of :class:`Atom` objects corresponding to an aromatic ring in mol This function directly modifies the input molecule and does not return anything. """ cython.declare(bondList=list, i=cython.int, atom1=Atom, atom2=Atom, bond=Bond) bondList = [] for i, atom1 in enumerate(aromaticRing): for atom2 in aromaticRing[i + 1:]: if mol.hasBond(atom1, atom2): bondList.append(mol.getBond(atom1, atom2)) for bond in bondList: bond.order = 1.5 class ILPSolutionError(Exception): """ An exception to be raised when solving an integer linear programming problem if a solution could not be found or the solution is not valid. Can pass a string to indicate the reason that the solution is invalid. """ pass
# Copyright (C) 2010 Apple Inc. All rights reserved. # Copyright (C) 2011 Patrick Gansterer <paroga@paroga.com> # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for changelog.py.""" import changelog import unittest2 as unittest class ChangeLogCheckerTest(unittest.TestCase): """Tests ChangeLogChecker class.""" def assert_no_error(self, lines_to_check, changelog_data): def handle_style_error(line_number, category, confidence, message): self.fail('Unexpected error: %d %s %d %s for\n%s' % (line_number, category, confidence, message, changelog_data)) self.lines_to_check = set(lines_to_check) checker = changelog.ChangeLogChecker('ChangeLog', handle_style_error, self.mock_should_line_be_checked) checker.check(changelog_data.split('\n')) def assert_error(self, expected_line_number, lines_to_check, expected_category, changelog_data): self.had_error = False def handle_style_error(line_number, category, confidence, message): self.had_error = True self.assertEqual(expected_line_number, line_number) self.assertEqual(expected_category, category) self.lines_to_check = set(lines_to_check) checker = changelog.ChangeLogChecker('ChangeLog', handle_style_error, self.mock_should_line_be_checked) checker.check(changelog_data.split('\n')) self.assertTrue(self.had_error) def mock_handle_style_error(self): pass def mock_should_line_be_checked(self, line_number): return line_number in self.lines_to_check def test_init(self): checker = changelog.ChangeLogChecker('ChangeLog', self.mock_handle_style_error, self.mock_should_line_be_checked) self.assertEqual(checker.file_path, 'ChangeLog') self.assertEqual(checker.handle_style_error, self.mock_handle_style_error) self.assertEqual(checker.should_line_be_checked, self.mock_should_line_be_checked) def test_missing_bug_number(self): self.assert_error(1, range(1, 20), 'changelog/bugnumber', '2011-01-01 Patrick Gansterer <paroga@paroga.com>\n' '\n' ' Example bug\n') self.assert_error(1, range(1, 20), 'changelog/bugnumber', '2011-01-01 Patrick Gansterer <paroga@paroga.com>\n' '\n' ' Example bug\n' ' http://bugs.webkit.org/show_bug.cgi?id=\n') self.assert_error(1, range(1, 20), 'changelog/bugnumber', '2011-01-01 Patrick Gansterer <paroga@paroga.com>\n' '\n' ' Example bug\n' ' https://bugs.webkit.org/show_bug.cgi?id=\n') self.assert_error(1, range(1, 20), 'changelog/bugnumber', '2011-01-01 Patrick Gansterer <paroga@paroga.com>\n' '\n' ' Example bug\n' ' http://webkit.org/b/\n') self.assert_error(1, range(1, 20), 'changelog/bugnumber', '2011-01-01 Patrick Gansterer <paroga@paroga.com>\n' '\n' ' Example bug' '\n' ' http://trac.webkit.org/changeset/12345\n') self.assert_error(2, range(2, 5), 'changelog/bugnumber', '2011-01-01 Patrick Gansterer <paroga@paroga.com>\n' ' Example bug\n' ' https://bugs.webkit.org/show_bug.cgi\n' '\n' '2011-01-01 Patrick Gansterer <paroga@paroga.com>\n' ' Another change\n') self.assert_error(2, range(2, 6), 'changelog/bugnumber', '2011-01-01 Patrick Gansterer <paroga@paroga.com>\n' ' Example bug\n' ' More text about bug.\n' '\n' '2011-01-01 Patrick Gansterer <paroga@paroga.com>\n' '\n' ' No bug in this change.\n') def test_file_descriptions(self): self.assert_error(5, range(1, 20), 'changelog/filechangedescriptionwhitespace', '2011-01-01 Dmitry Lomov <dslomov@google.com>\n' ' ExampleBug\n' ' http://bugs.webkit.org/show_bug.cgi?id=12345\n' '\n' ' * Source/Tools/random-script.py:Fixed') self.assert_error(6, range(1, 20), 'changelog/filechangedescriptionwhitespace', '2011-01-01 Dmitry Lomov <dslomov@google.com>\n' ' ExampleBug\n' ' http://bugs.webkit.org/show_bug.cgi?id=12345\n' '\n' ' * Source/Tools/another-file: Done\n' ' * Source/Tools/random-script.py:Fixed\n' ' * Source/Tools/one-morefile:\n') def test_no_new_tests(self): self.assert_error(5, range(1, 20), 'changelog/nonewtests', '2011-01-01 Dmitry Lomov <dslomov@google.com>\n' ' ExampleBug\n' ' http://bugs.webkit.org/show_bug.cgi?id=12345\n' '\n' ' No new tests. (OOPS!)\n' ' * Source/Tools/random-script.py: Fixed') def test_no_error(self): self.assert_no_error([], '2011-01-01 Patrick Gansterer <paroga@paroga.com>\n' '\n' ' Example ChangeLog entry out of range\n' ' http://example.com/\n') self.assert_no_error([], '2011-01-01 Patrick Gansterer <paroga@paroga.com>\n' '\n' ' Example bug\n' ' http://bugs.webkit.org/show_bug.cgi?id=12345\n') self.assert_no_error(range(1, 20), '2011-01-01 Patrick Gansterer <paroga@paroga.com>\n' '\n' ' Example bug\n' ' http://bugs.webkit.org/show_bug.cgi?id=12345\n') self.assert_no_error(range(1, 20), '2011-01-01 Patrick Gansterer <paroga@paroga.com>\n' '\n' ' Example bug\n' ' https://bugs.webkit.org/show_bug.cgi?id=12345\n') self.assert_no_error(range(1, 20), '2011-01-01 Patrick Gansterer <paroga@paroga.com>\n' '\n' ' Example bug\n' ' http://webkit.org/b/12345\n') self.assert_no_error(range(1, 20), '2011-01-01 Patrick Gansterer <paroga@paroga.com>\n' '\n' ' Unreview build fix for r12345.\n') self.assert_no_error(range(1, 20), '2011-01-01 Patrick Gansterer <paroga@paroga.com>\n' '\n' ' Fix build after a bad change.\n') self.assert_no_error(range(1, 20), '2011-01-01 Patrick Gansterer <paroga@paroga.com>\n' '\n' ' Fix example port build.\n') self.assert_no_error(range(2, 6), '2011-01-01 Patrick Gansterer <paroga@paroga.com>\n' ' Example bug\n' ' https://bugs.webkit.org/show_bug.cgi?id=12345\n' '\n' '2011-01-01 Patrick Gansterer <paroga@paroga.com>\n' ' No bug here!\n') self.assert_no_error(range(1, 20), '2011-01-01 Patrick Gansterer <paroga@paroga.com>\n' ' Example bug\n' ' https://bugs.webkit.org/show_bug.cgi?id=12345\n' ' * Source/WebKit/foo.cpp: \n' ' * Source/WebKit/bar.cpp:\n' ' * Source/WebKit/foobar.cpp: Description\n')
""" This file defines minimal Tree/Node class for the PartGraph Shapes dataset for part tree usage """ import os import sys import json import torch import numpy as np from torch.utils import data from collections import namedtuple import utils import kornia import torch.nn.functional as F import copy from utils import one_hot # store a part hierarchy of graphs for a shape class Tree(object): # global object category information part_name2id = dict() part_id2name = dict() part_name2cids = dict() part_non_leaf_sem_names = [] num_sem = None root_sem = None leaf_geos = None cate_id = None @ staticmethod def load_category_info(cat): with open(os.path.join('../stats/part_semantics/', cat+'.txt'), 'r') as fin: for l in fin.readlines(): x, y, _ = l.rstrip().split() x = int(x) Tree.part_name2id[y] = x Tree.part_id2name[x] = y Tree.part_name2cids[y] = [] if '/' in y: Tree.part_name2cids['/'.join(y.split('/')[:-1])].append(x) Tree.num_sem = len(Tree.part_name2id) + 1 print(Tree.num_sem) for k in Tree.part_name2cids: Tree.part_name2cids[k] = np.array(Tree.part_name2cids[k], dtype=np.int32) if len(Tree.part_name2cids[k]) > 0: Tree.part_non_leaf_sem_names.append(k) Tree.root_sem = Tree.part_id2name[1] # store a part node in the tree class Node(object): def __init__(self, device=None, part_id=None, label=None, full_label=None, group_id=None, group_ins_id=None, is_leaf = False, box = None, children = None, edges = None, geo=None, geo_feat=None, dggeo = None, faces = None): self.device = device # device that this node lives self.part_id = part_id # part_id in result_after_merging.json of PartNet self.group_id = group_id # group_id is 0, 1, 2, ...; it will be the same for equivalent subtree nodes self.group_ins_id = group_ins_id# group_ins_id is 0, 1, 2, ... within each equivalent class self.label = label # node semantic label at the current level self.full_label = full_label # node semantic label from root (separated by slash) self.children = [] if children is None else children # initialize to be empty (no children) self.geo_id = None # the index of the part pc geo array self.geo = geo # 1 x 1000 x 3 point cloud self.geo_feat = geo_feat # 1 x 100 geometry feature self.faces = faces # facenum x 3 face index self.dggeo = dggeo # 1 x pointnum x 9 deformation geo feature self.is_leaf = is_leaf self.box = box self.edges = [] if edges is None else edges # all of its children relationships; # each entry is a tuple <part_a, part_b, type, params, dist> """ Here defines the edges format: part_a, part_b: Values are the order in self.children (e.g. 0, 1, 2, 3, ...). This is an directional edge for A->B. If an edge is commutative, you may need to manually specify a B->A edge. For example, an ADJ edge is only shown A->B, there is no edge B->A in the json file. type: Four types considered in StructureNet: ADJ, ROT_SYM, TRANS_SYM, REF_SYM. params: There is no params field for ADJ edge; For ROT_SYM edge, 0-2 pivot point, 3-5 axis unit direction, 6 radian rotation angle; For TRANS_SYM edge, 0-2 translation vector; For REF_SYM edge, 0-2 the middle point of the segment that connects the two box centers, 3-5 unit normal direction of the reflection plane. dist: For ADJ edge, it's the closest distance between two parts; For SYM edge, it's the chamfer distance after matching part B to part A. """ def get_semantic_id(self): return Tree.part_name2id[self.full_label] def get_semantic_one_hot(self): out = np.zeros((1, Tree.num_sem), dtype=np.float32) out[0, Tree.part_name2id[self.full_label]] = 1 return torch.tensor(out, dtype=torch.float32).to(device=self.device) def get_group_ins_one_hot(self, max_part_per_parent): out = np.zeros((1, max_part_per_parent), dtype=np.float32) out[0, self.group_ins_id] = 1 return torch.tensor(out, dtype=torch.float32).to(device=self.device) def get_group_ins_id(self): return self.group_ins_id def to(self, device): if self.box is not None and not isinstance(self.box, list): self.box = self.box.to(device) for edge in self.edges: if 'params' in edge: edge['params'].to(device) if self.geo is not None: self.geo = self.geo.to(device) if self.dggeo is not None: self.dggeo = self.dggeo.to(device) for child_node in self.children: child_node.to(device) return self def set_from_box_quat(self, box_quat): box_quat = box_quat.squeeze() center = box_quat[:3] size = box_quat[3:6] rotmat = kornia.quaternion_to_rotation_matrix(box_quat[[7, 8, 9, 6]]) box = torch.cat([center, size, rotmat[:, 0].view(-1), rotmat[:, 1].view(-1)]) self.box = box.view(1, -1).cuda() def get_box_quat(self): box = self.box return box.unsqueeze(1).to(device=self.box.device) def get_box_quat1(self): box = self.box # print(box) center = box[:, :3] size = box[:, 3:6] xdir = box[:, 6:9] xdir = F.normalize(xdir, p=2, dim=1) ydir = box[:, 9:] ydir = F.normalize(ydir, p=2, dim=1) zdir = torch.cross(xdir, ydir, dim = 1) zdir = F.normalize(zdir, p=2, dim=1) rotmat = torch.cat([xdir.unsqueeze(1), ydir.unsqueeze(1), zdir.unsqueeze(1)], dim = 1).transpose(2,1).repeat(2,1,1) # print(rotmat.shape) q1 = kornia.rotation_matrix_to_quaternion(rotmat, eps = 1e-6) quat = q1[:box.size(0), [3, 0, 1, 2]] # print(quat.shape) box_quat = torch.cat([center, size, quat], dim = 1) # self.set_from_box_quat(box_quat) return box_quat.unsqueeze(1).to(device=self.box.device) def _to_str(self, level, pid): out_str = ' |'*(level-1) + ' ├'*(level > 0) + str(pid) + ' ' + self.label + \ (' [LEAF %d] ' % self.geo_id if len(self.children) == 0 else ' ') + \ '{part_id: %d, group_id: %d [%d], subtree_geo_ids: %s}\n' % \ (self.part_id, self.group_id, self.group_ins_id, str(self.subtree_geo_ids)) for idx, child in enumerate(self.children): out_str += child._to_str(level+1, idx) return out_str def __str__(self): return self._to_str(0, 0) def depth_first_traversal(self): nodes = [] stack = [self] while len(stack) > 0: node = stack.pop() nodes.append(node) stack.extend(reversed(node.children)) return nodes def get_leaf_ids(self): leaf_ids = [] if len(self.children) == 0: leaf_ids.append(self.part_id) else: for cnode in self.children: leaf_ids += cnode.get_leaf_ids() return leaf_ids def mark_geo_id(self, d): if self.part_id in d: self.geo_id = d[self.part_id] for cnode in self.children: cnode.mark_geo_id(d) def compute_subtree_geo_ids(self): if len(self.children) == 0: self.subtree_geo_ids = [self.geo_id] else: self.subtree_geo_ids = [] for cnode in self.children: self.subtree_geo_ids += cnode.compute_subtree_geo_ids() return self.subtree_geo_ids def get_subtree_edge_count(self): cnt = 0 if self.children is not None: for cnode in self.children: cnt += cnode.get_subtree_edge_count() if self.edges is not None: cnt += len(self.edges) return cnt def boxes(self, per_node=False, leafs_only=False): nodes = list(reversed(self.depth_first_traversal())) node_boxesets = [] boxes_stack = [] for node in nodes: node_boxes = [] for i in range(len(node.children)): node_boxes = boxes_stack.pop() + node_boxes if node.box is not None and (not leafs_only or node.is_leaf): node_boxes.append(node.box) if per_node: node_boxesets.append(node_boxes) boxes_stack.append(node_boxes) assert len(boxes_stack) == 1 if per_node: return node_boxesets, list(nodes) else: boxes = boxes_stack[0] return boxes def graph(self, leafs_only=False): part_boxes = [] part_geos = [] edges = [] part_ids = [] part_sems = [] nodes = list(reversed(self.depth_first_traversal())) box_index_offset = 0 for node in nodes: child_count = 0 box_idx = {} for i, child in enumerate(node.children): if leafs_only and not child.is_leaf: continue part_boxes.append(child.box) part_geos.append(child.geo) part_ids.append(child.part_id) part_sems.append(child.full_label) box_idx[i] = child_count+box_index_offset child_count += 1 for edge in node.edges: if leafs_only and not ( node.children[edge['part_a']].is_leaf and node.children[edge['part_b']].is_leaf): continue edges.append(edge.copy()) edges[-1]['part_a'] = box_idx[edges[-1]['part_a']] edges[-1]['part_b'] = box_idx[edges[-1]['part_b']] box_index_offset += child_count return part_boxes, part_geos, edges, part_ids, part_sems def edge_tensors(self, edge_types, device, type_onehot=True): num_edges = len(self.edges) # get directed edge indices in both directions as tensor edge_indices = torch.tensor( [[e['part_a'], e['part_b']] for e in self.edges] + [[e['part_b'], e['part_a']] for e in self.edges], device=device, dtype=torch.long).view(1, num_edges*2, 2) # get edge type as tensor edge_type = torch.tensor([edge_types.index(edge['type']) for edge in self.edges], device=device, dtype=torch.long) if type_onehot: edge_type = one_hot(inp=edge_type, label_count=len(edge_types)).transpose(0, 1).view(1, num_edges, len(edge_types)).to(dtype=torch.float32) else: edge_type = edge_type.view(1, num_edges) edge_type = torch.cat([edge_type, edge_type], dim=1) # add edges in other direction (symmetric adjacency) return edge_type, edge_indices def free(self): for node in self.depth_first_traversal(): node.geo = [] node.dggeo = [] node.geo_feat = [] node.box = [] # functions for class Tree def __init__(self, root): self.root = root def to(self, device): self.root = self.root.to(device) return self def graph(self, leafs_only=False): return self.root.graph(leafs_only=leafs_only) @staticmethod def load_template(fn, device): with open(fn, 'r') as f: root_json = json.load(f) # create a virtual parent node of the root node and add it to the stack StackElement = namedtuple('StackElement', ['node_json', 'parent', 'parent_child_idx']) stack = [StackElement(node_json=root_json, parent=None, parent_child_idx=None)] root = None # traverse the tree, converting each node json to a Node instance while len(stack) > 0: stack_elm = stack.pop() parent = stack_elm.parent parent_child_idx = stack_elm.parent_child_idx node_json = stack_elm.node_json if node_json['group_ins_id']>9: print(fn) node = Tree.Node(device=device, part_id=node_json['id'], group_id=node_json['group_id'], group_ins_id=node_json['group_ins_id'], label=node_json['label'], is_leaf=('children' not in node_json), geo = [], dggeo = [], box = []) if 'children' in node_json: for ci, child in enumerate(node_json['children']): stack.append(StackElement(node_json=node_json['children'][ci], parent=node, parent_child_idx=ci)) if 'edges' in node_json: for edge in node_json['edges']: if 'params' in edge: edge['params'] = torch.from_numpy(np.array(edge['params'])).to(dtype=torch.float32) node.edges.append(edge) if parent is None: root = node root.full_label = root.label else: if len(parent.children) <= parent_child_idx: parent.children.extend([None] * (parent_child_idx+1-len(parent.children))) parent.children[parent_child_idx] = node node.full_label = parent.full_label + '/' + node.label return root # extend torch.data.Dataset class for PartNet class PartGraphShapesDataset(data.Dataset): def __init__(self, data_dir, pg_dir, device, batch_size, mode='sample_by_template'): self.data_dir = data_dir self.pg_dir = pg_dir self.device = device self.batch_size = batch_size self.mode = mode # self.errid = [] self.pg_shapes = [] self.sample_by_shape_pgids = [] with open(os.path.join(pg_dir, 'info.txt'), 'r') as fin: for i, l in enumerate(fin.readlines()): cur_pg_shapes = l.rstrip().split() self.pg_shapes.append(cur_pg_shapes) self.sample_by_shape_pgids += [i] * len(cur_pg_shapes) self.pg_templates = [] self.pg_leaf_ids = [] self.leaf_mappings = [] for i in range(len(self.pg_shapes)): cur_pg_dir = os.path.join(pg_dir, 'pt-%d' % i) t = Tree.load_template(os.path.join(cur_pg_dir, 'template.json'), device) self.pg_templates.append(t) leaf_ids = t.get_leaf_ids() t.leaf_cnt = len(leaf_ids) self.pg_leaf_ids.append(leaf_ids) t.mark_geo_id({y: x for x, y in enumerate(self.pg_leaf_ids[i])}) t.compute_subtree_geo_ids() self.leaf_mappings.append([]) for anno_id in self.pg_shapes[i]: with open(os.path.join(cur_pg_dir, anno_id+'.txt'), 'r') as fin: tmp_dict = dict() for l in fin.readlines(): x, y = l.rstrip().split() tmp_dict[int(x)] = int(y) tmp_dict[0] = 0 cur_leaf_mapping = [tmp_dict[x] for x in range(len(tmp_dict))] # cur_leaf_mapping = [tmp_dict[x] for x in self.pg_leaf_ids[i]] cur_leaf_mapping = np.array(cur_leaf_mapping, dtype=np.int32) self.leaf_mappings[i].append(cur_leaf_mapping) self.pg_leaf_ids[i] = np.array(self.pg_leaf_ids[i], dtype=np.int32) print('[PartGraphShapesDataset %d %s %d %d] %s %s' % (batch_size, mode, \ len(self.pg_shapes), len(self.sample_by_shape_pgids), data_dir, pg_dir)) def __len__(self): if self.mode == 'sample_by_template': return len(self.pg_shapes) elif self.mode == 'sample_by_shape': return len(self.sample_by_shape_pgids) else: raise ValueError('ERROR: unknown mode %s!' % self.mode) def get_pg_shapes(self, index): return self.pg_shapes[index] def get_pg_template(self, index): return self.pg_templates[index] def get_pg_leaf_ids(self, index): return self.pg_leaf_ids[index] def get_pg_real_pcs(self, index, num_shape): ids = np.random.choice(len(self.pg_shapes[index]), num_shape, replace=True) part_pcs = np.zeros((num_shape, len(self.pg_leaf_ids[index]), 1000, 3), dtype=np.float32) names = [] for i, idx in enumerate(ids): geo_fn = os.path.join(self.data_dir, self.pg_shapes[index][idx] + '.npz') geo_data = np.load(geo_fn)['parts'] part_pcs[i] = geo_data[self.leaf_mappings[index][idx]] names.append(self.pg_shapes[index][idx]) out = torch.from_numpy(part_pcs) return (names, out) def get_pg_real_pc(self, index, j): j = j % len(self.pg_shapes[index]) geo_fn = os.path.join(self.data_dir, self.pg_shapes[index][j] + '.npz') geo_data = np.load(geo_fn)['parts'] part_pcs = geo_data[self.leaf_mappings[index][j]] out = torch.from_numpy(part_pcs) return self.pg_shapes[index][j], out def __getitem__(self, index): if self.mode == 'sample_by_shape': index = self.sample_by_shape_pgids[index] ids = np.random.choice(len(self.pg_shapes[index]), self.batch_size, replace=True) pt_template = self.get_pg_template(index) # pt_template.free() pt_template = copy.deepcopy(pt_template) # part_pcs = np.zeros((self.batch_size, len(self.pg_leaf_ids[index]), 1000, 3), dtype=np.float32) for i, idx in enumerate(ids): geo_fn = os.path.join(self.data_dir, self.pg_shapes[index][idx] + '.npz') geo_data = np.load(geo_fn) # print(index) # print(geo_data['partsV'].shape) # print(geo_fn) # print(self.leaf_mappings[index][idx]) # print(idx) # print(ids) pt_template = self.load_object_batch(pt_template, geo_data, self.leaf_mappings[index][idx], self.device, cat = (i == (len(ids)-1))) return (index, pt_template, ) + (self.pg_shapes[index][ids[0]],) @staticmethod def load_object_batch(Ps_tamplate, geo_data, leaf_mappings, device, cat=False): StackElement = namedtuple('StackElement', ['node', 'parent_json', 'parent_child_idx']) # traverse the tree, converting child nodes of each node to json geo_id = -1 Ps_tamplate_batch = Ps_tamplate stack = [StackElement(node=Ps_tamplate_batch, parent_json=None, parent_child_idx=None)] while len(stack) > 0: stack_elm = stack.pop() parent_json = stack_elm.parent_json parent_child_idx = stack_elm.parent_child_idx node = stack_elm.node if len(node.children) == 0 or node.is_leaf: # print(len(node.geo)) node.geo.append(torch.tensor(geo_data['partsV'][leaf_mappings[node.part_id]], dtype=torch.float32).view(1, -1, 3)) LOGR = torch.tensor(geo_data['LOGR'][leaf_mappings[node.part_id]], dtype=torch.float32).view(1, -1, 3) S = torch.tensor(geo_data['S'][leaf_mappings[node.part_id]], dtype=torch.float32).view(1, -1, 6) node.dggeo.append(torch.cat((LOGR, S), 2)) node.faces = torch.tensor(geo_data['F'], dtype=torch.int32) node.box.append(torch.from_numpy(np.array(geo_data['box_quat'][leaf_mappings[node.part_id]])).to(dtype=torch.float32).view(1, -1)) if cat: node.geo = torch.cat(node.geo, dim = 0)#.to(device=device) node.dggeo = torch.cat(node.dggeo, dim = 0)#.to(device=device) node.box = torch.cat(node.box, dim = 0)#.to(device=device) # node.geo_id = 0 # break else: node.geo.append(torch.tensor(geo_data['partsV'][leaf_mappings[node.part_id]], dtype=torch.float32).view(1, -1, 3)) LOGR = torch.tensor(geo_data['LOGR'][leaf_mappings[node.part_id]], dtype=torch.float32).view(1, -1, 3) S = torch.tensor(geo_data['S'][leaf_mappings[node.part_id]], dtype=torch.float32).view(1, -1, 6) node.dggeo.append(torch.cat((LOGR, S), 2)) node.box.append(torch.from_numpy(np.array(geo_data['box_quat'][leaf_mappings[node.part_id]])).to(dtype=torch.float32).view(1, -1)) if cat: node.geo = torch.cat(node.geo, dim = 0)#.to(device=device) node.dggeo = torch.cat(node.dggeo, dim = 0)#.to(device=device) node.box = torch.cat(node.box, dim = 0)#.to(device=device) for child in node.children: # node_json['children'].append(None) stack.append(StackElement(node=child, parent_json=None, parent_child_idx=None)) return Ps_tamplate_batch @staticmethod def load_object(fn, Tree): with open(fn, 'r') as f: root_json = json.load(f) # create a virtual parent node of the root node and add it to the stack StackElement = namedtuple('StackElement', ['node_json', 'parent', 'parent_child_idx']) stack = [StackElement(node_json=root_json, parent=None, parent_child_idx=None)] root = None leaf_geos_box = [] leaf_geos_dg = [] leaf_geos_pts = [] geo_id = -1 geo_box_id = -1 # traverse the tree, converting each node json to a Node instance while len(stack) > 0: stack_elm = stack.pop() parent = stack_elm.parent parent_child_idx = stack_elm.parent_child_idx node_json = stack_elm.node_json node = Tree.Node( part_id=node_json['id'], is_leaf=('children' not in node_json), label=node_json['label']) # print(node_json['id']) if 'geo' in node_json.keys(): node.geo = torch.tensor(np.array(node_json['geo']), dtype=torch.float32).view(1, -1, 3) if 'box' in node_json: node.box = torch.from_numpy(np.array(node_json['box'])).to(dtype=torch.float32) if 'children' in node_json: for ci, child in enumerate(node_json['children']): stack.append(StackElement(node_json=node_json['children'][ci], parent=node, parent_child_idx=ci)) if 'edges' in node_json: for edge in node_json['edges']: if 'params' in edge: edge['params'] = torch.from_numpy(np.array(edge['params'])).to(dtype=torch.float32) node.edges.append(edge) if parent is None: root = node root.full_label = root.label else: if len(parent.children) <= parent_child_idx: parent.children.extend([None] * (parent_child_idx+1-len(parent.children))) parent.children[parent_child_idx] = node node.full_label = parent.full_label + '/' + node.label obj = Tree(root=root) return obj @staticmethod def save_object(obj_root, fn): # create a virtual parent node of the root node and add it to the stack StackElement = namedtuple('StackElement', ['node', 'parent_json', 'parent_child_idx']) stack = [StackElement(node=obj_root, parent_json=None, parent_child_idx=None)] obj_json = None # traverse the tree, converting child nodes of each node to json while len(stack) > 0: stack_elm = stack.pop() parent_json = stack_elm.parent_json parent_child_idx = stack_elm.parent_child_idx node = stack_elm.node node_json = { 'id': node.part_id, 'label': f'{node.label if node.label is not None else ""}'} if node.geo is not None: node_json['geo'] = node.geo.cpu().numpy().reshape(-1).tolist() if node.box is not None: node_json['box'] = node.box.cpu().numpy().reshape(-1).tolist() if len(node.children) > 0: node_json['children'] = [] for child in node.children: node_json['children'].append(None) stack.append(StackElement(node=child, parent_json=node_json, parent_child_idx=len(node_json['children'])-1)) if len(node.edges) > 0: node_json['edges'] = [] for edge in node.edges: node_json['edges'].append(edge) if 'params' in edge: node_json['edges'][-1]['params'] = node_json['edges'][-1]['params'].cpu().numpy().reshape(-1).tolist() if parent_json is None: obj_json = node_json else: parent_json['children'][parent_child_idx] = node_json with open(fn, 'w') as f: json.dump(obj_json, f)
# GPU performance tests extracted from py-videocorevi Python library. # Testing for Raspberry Pi 4 Benchmarking and device identification. # TREASURE PROJECT 2021 import time from time import clock_gettime,CLOCK_MONOTONIC from time import monotonic import fcntl import socket import struct import numpy as np from videocore6.v3d import * from videocore6 import pack_unpack from videocore6.driver import Driver from videocore6.assembler import qpu from bench_helper import BenchHelper import sys import os import random import hashlib def getsec(): return clock_gettime(CLOCK_MONOTONIC) @qpu def load_params(asm, thread, regs): if thread == 1: bxor(r0, r0, r0, sig = ldunifrf(rf0)) elif thread == 8: # 8 threads (1 threads / qpu) tidx(r0, sig = ldunifrf(rf0)) shr(r0, r0, 2) mov(r1, 0b1111) elif thread == 16: # 16 threads (2 threads / qpu) tidx(r0, sig = ldunifrf(rf0)) shr(r0, r0, 1).mov(r1, 1) shl(r1, r1, 5) sub(r1, r1, 1) else: assert thread in [1,8,16] band(r3, r0, r1, sig = ldunifrf(rf1)) shl(r0, rf1, 2) umul24(r0, r0, r3) eidx(r1).add(r0, r0, rf0) shl(r1, r1, 2) shl(r3, 4, 4).add(r0, r0, r1) n = len(regs) mov(tmua, r0, sig = thrsw).add(r0, r0, r3) nop() nop() nop(sig = ldtmu(r1)) for i in range(n): if i % 16 == 0: mov(r5rep, r1) mov(regs[i], r5) elif i % 16 == 15 and i != n - 1: mov(tmua, r0, sig = thrsw).add(r0, r0, r3) rotate(r5rep, r1, - (i % 16)) mov(regs[i], r5) nop(sig = ldtmu(r1)) else: rotate(r5rep, r1, - (i % 16)) mov(regs[i], r5) @qpu def qpu_sgemm_rnn_naive(asm, thread): params = [ 'P', 'Q', 'R', 'A_base', 'A_stride', 'B_base', 'B_stride', 'C_base', 'C_stride', 'alpha', 'beta', ] values = [ 'A_cur', 'B_cur', 'C_cur', 'i', 'j', 'k', ] g = globals() for i, reg in enumerate(params + values): g['reg_' + reg] = g['rf' + str(i+32)] load_params(asm, thread, [g['reg_' + reg] for reg in params]) add(r0, reg_P, 15) shr(r0, r0, 4) shl(r0, r0, 4) add(r1, reg_R, 15) shr(r1, r1, 4) shl(r1, r1, 6) umul24(r3, r0, reg_A_stride) add(reg_A_base, reg_A_base, r3) add(reg_B_base, reg_B_base, r1) umul24(r3, r0, reg_C_stride) add(reg_C_base, reg_C_base, r3) add(reg_C_base, reg_C_base, r1) for i in range(16): mov(rf[i], 0.0).mov(rf[i+16], 0.0) # i=(p+15)/16. add(r0, reg_P, 15) shr(reg_i, r0, 4) with loop as li: # j=(r+15)/16 add(r0, reg_R, 15) shr(reg_j, r0, 4) with loop as lj: shl(r0, reg_i, 4) umul24(r3, r0, reg_C_stride) shl(r1, reg_j, 6) sub(reg_C_cur, reg_C_base, r3) sub(reg_C_cur, reg_C_cur, r1) umul24(r3, r0, reg_A_stride) sub(reg_A_cur, reg_A_base, r3) sub(reg_B_cur, reg_B_base, r1) mov(reg_k, reg_Q) with loop as lk: eidx(r0) umul24(r1, r0, reg_A_stride) add(r1, r1, reg_A_cur).add(reg_A_cur, reg_A_cur, 4) mov(tmua, r1, sig = thrsw) shl(r1, r0, 2) add(r1, r1, reg_B_cur).add(reg_B_cur, reg_B_cur, reg_B_stride) mov(tmua, r1, sig = thrsw) nop(sig = ldtmu(r0)) mov(r5rep, r0) nop(sig = ldtmu(r4)) nop().fmul(r3, r5, r4) for i in range(1,16): rotate(r5rep, r0, -i) fadd(rf[i-1], rf[i-1], r3).fmul(r3, r5, r4) fadd(rf15, rf15, r3) sub(reg_k, reg_k, 1, cond = 'pushz') lk.b(cond = 'anyna') nop() # delay slot nop() # delay slot nop() # delay slot eidx(r0) shl(r0, r0, 2) add(r1, reg_C_cur, r0) mov(tmua, r1, sig = thrsw).add(r1, r1, reg_C_stride) fmul(rf[0], rf[0], reg_alpha) for i in range(1, 16): mov(tmua, r1, sig = thrsw).add(r1, r1, reg_C_stride) fmul(rf[i], rf[i], reg_alpha, sig = ldtmu(rf[i+15])) mov(r0, reg_beta).fmul(r3, rf[16], reg_beta, sig = ldtmu(rf[31])) for i in range(16): fadd(rf[i], rf[i], r3).fmul(r3, rf[i+17], r0) eidx(r0) shl(r0, r0, 2) add(r1, reg_C_cur, r0) for i in range(16): mov(tmud, rf[i]) mov(tmua, r1).add(r1, r1, reg_C_stride) mov(rf[i], 0.0).mov(rf[i+16], 0.0) tmuwt() sub(reg_j, reg_j, 1, cond = 'pushz') lj.b(cond = 'anyna') nop() # delay slot nop() # delay slot nop() # delay slot sub(reg_i, reg_i, 1, cond = 'pushz') li.b(cond = 'anyna') nop() nop() nop() nop(sig = thrsw) nop(sig = thrsw) nop() nop() nop(sig = thrsw) nop() nop() nop() def sgemm_rnn_naive(): thread = 8 P = 1024 Q = 1024 R = 1024 assert P % (16 * 2) == 0 assert R % (16 * 4) == 0 with Driver() as drv: code = drv.program(lambda asm: qpu_sgemm_rnn_naive(asm, thread)) A = drv.alloc((P, Q), dtype = 'float32') B = drv.alloc((Q, R), dtype = 'float32') C = drv.alloc((P, R), dtype = 'float32') np.random.seed(0) alpha = np.random.randn() beta = np.random.randn() A_ref = np.random.randn(*A.shape).astype(A.dtype) B_ref = np.random.randn(*B.shape).astype(B.dtype) C_ref = np.random.randn(*C.shape).astype(C.dtype) A[:] = A_ref B[:] = B_ref C[:] = C_ref start = time.perf_counter_ns() C_ref[:] = alpha * A_ref.dot(B_ref) + beta * C_ref time_ref = time.perf_counter_ns() - start def block_2x4_params(i, j): tile_P = P // 2 tile_R = R // 4 return [ tile_P, Q, tile_R, A.addresses()[tile_P*i, 0 ], A.strides[0], B.addresses()[0 , tile_R*j], B.strides[0], C.addresses()[tile_P*i, tile_R*j], C.strides[0], *pack_unpack('f', 'I', [alpha, beta]), ] unif_params = drv.alloc((thread, len(block_2x4_params(0,0))), dtype = 'uint32') for th in range(thread): unif_params[th] = block_2x4_params(th // 4, th % 4) unif = drv.alloc(2, dtype = 'uint32') unif[0] = unif_params.addresses()[0,0] unif[1] = unif_params.shape[1] start = time.perf_counter_ns() drv.execute(code, unif.addresses()[0], thread = thread) time_gpu = time.perf_counter_ns() - start np.set_printoptions(threshold=np.inf) def Gflops(sec): return (2 * P * Q * R + 3 * P * R) / sec * 1e-9 return [time_ref,time_gpu] #Gflops(time_ref),time_gpu,Gflops(time_gpu)] def sleep(duration): duration=duration*1000000000 now = time.perf_counter_ns() end = now + duration while now < end: now = time.perf_counter_ns() def get_QPU_freq(seg): with RegisterMapping() as regmap: with PerformanceCounter(regmap, [CORE_PCTR_CYCLE_COUNT]) as pctr: time.sleep(seg) result = pctr.result() return (result[0] * 1e-6) def cpu_random(): with RegisterMapping() as regmap: with PerformanceCounter(regmap, [CORE_PCTR_CYCLE_COUNT]) as pctr: a=random.random() result = pctr.result() return (result[0]) def cpu_true_random(n): with RegisterMapping() as regmap: with PerformanceCounter(regmap, [CORE_PCTR_CYCLE_COUNT]) as pctr: a=os.urandom(n) result = pctr.result() return (result[0]) def cpu_hash(): with RegisterMapping() as regmap: with PerformanceCounter(regmap, [CORE_PCTR_CYCLE_COUNT]) as pctr: h=int(hashlib.sha256("test string".encode('utf-8')).hexdigest(), 16) % 10**8 result = pctr.result() return (result[0]) @qpu def qpu_summation(asm, *, num_qpus, unroll_shift, code_offset, align_cond=lambda pos: pos % 512 == 170): g = globals() for i, v in enumerate(['length', 'src', 'dst', 'qpu_num', 'stride', 'sum']): g[f'reg_{v}'] = rf[i] nop(sig=ldunifrf(reg_length)) nop(sig=ldunifrf(reg_src)) nop(sig=ldunifrf(reg_dst)) if num_qpus == 1: num_qpus_shift = 0 mov(reg_qpu_num, 0) elif num_qpus == 8: num_qpus_shift = 3 tidx(r0) shr(r0, r0, 2) band(reg_qpu_num, r0, 0b1111) else: raise Exception('num_qpus must be 1 or 8') # addr += 4 * (thread_num + 16 * qpu_num) shl(r0, reg_qpu_num, 4) eidx(r1) add(r0, r0, r1) shl(r0, r0, 2) add(reg_src, reg_src, r0).add(reg_dst, reg_dst, r0) # stride = 4 * 16 * num_qpus mov(reg_stride, 1) shl(reg_stride, reg_stride, 6 + num_qpus_shift) # The QPU performs shifts and rotates modulo 32, so it actually supports # shift amounts [0, 31] only with small immediates. num_shifts = [*range(16), *range(-16, 0)] # length /= 16 * 8 * num_qpus * unroll shr(reg_length, reg_length, num_shifts[7 + num_qpus_shift + unroll_shift]) # This single thread switch and two instructions just before the loop are # really important for TMU read to achieve a better performance. # This also enables TMU read requests without the thread switch signal, and # the eight-depth TMU read request queue. nop(sig=thrsw) nop() bxor(reg_sum, 1, 1).mov(r1, 1) while not align_cond(code_offset + len(asm)): nop() with loop as l: unroll = 1 << unroll_shift for i in range(7): mov(tmua, reg_src).add(reg_src, reg_src, reg_stride) mov(tmua, reg_src).sub(reg_length, reg_length, r1, cond='pushz') add(reg_src, reg_src, reg_stride, sig=ldtmu(r0)) for j in range(unroll - 1): for i in range(8): mov(tmua, reg_src).add(reg_src, reg_src, reg_stride) add(reg_sum, reg_sum, r0, sig=ldtmu(r0)) for i in range(5): add(reg_sum, reg_sum, r0, sig=ldtmu(r0)) l.b(cond='na0') add(reg_sum, reg_sum, r0, sig=ldtmu(r0)) # delay slot add(reg_sum, reg_sum, r0, sig=ldtmu(r0)) # delay slot add(reg_sum, reg_sum, r0) # delay slot mov(tmud, reg_sum) mov(tmua, reg_dst) # This synchronization is needed between the last TMU operation and the # program end with the thread switch just before the loop above. barrierid(syncb, sig=thrsw) nop() nop() nop(sig=thrsw) nop(sig=thrsw) nop() nop() nop(sig=thrsw) nop() nop() nop() def summation(*, length, num_qpus=8, unroll_shift=5): assert length > 0 assert length % (16 * 8 * num_qpus * (1 << unroll_shift)) == 0 with Driver(data_area_size=(length + 1024) * 4) as drv: code = drv.program(qpu_summation, num_qpus=num_qpus, unroll_shift=unroll_shift, code_offset=drv.code_pos // 8) X = drv.alloc(length, dtype='uint32') Y = drv.alloc(16 * num_qpus, dtype='uint32') X[:] = np.arange(length, dtype=X.dtype) Y.fill(0) assert sum(Y) == 0 unif = drv.alloc(3, dtype='uint32') unif[0] = length unif[1] = X.addresses()[0] unif[2] = Y.addresses()[0] start = time.perf_counter_ns() drv.execute(code, unif.addresses()[0], thread=num_qpus) end = time.perf_counter_ns() assert sum(Y) % 2**32 == (length - 1) * length // 2 % 2**32 return [end - start] #,length * 4 / (end - start) * 1e-6] @qpu def qpu_scopy(asm, *, num_qpus, unroll_shift, code_offset, align_cond=lambda pos: pos % 512 == 259): g = globals() for i, v in enumerate(['length', 'src', 'dst', 'qpu_num', 'stride']): g[f'reg_{v}'] = rf[i] nop(sig=ldunifrf(reg_length)) nop(sig=ldunifrf(reg_src)) nop(sig=ldunifrf(reg_dst)) if num_qpus == 1: num_qpus_shift = 0 mov(reg_qpu_num, 0) elif num_qpus == 8: num_qpus_shift = 3 tidx(r0) shr(r0, r0, 2) band(reg_qpu_num, r0, 0b1111) else: raise Exception('num_qpus must be 1 or 8') # addr += 4 * (thread_num + 16 * qpu_num) shl(r0, reg_qpu_num, 4) eidx(r1) add(r0, r0, r1) shl(r0, r0, 2) add(reg_src, reg_src, r0).add(reg_dst, reg_dst, r0) # stride = 4 * 16 * num_qpus mov(reg_stride, 1) shl(reg_stride, reg_stride, 6 + num_qpus_shift) # length /= 16 * 8 * num_qpus * unroll shr(reg_length, reg_length, 7 + num_qpus_shift + unroll_shift) # This single thread switch and two nops just before the loop are really # important for TMU read to achieve a better performance. # This also enables TMU read requests without the thread switch signal, and # the eight-depth TMU read request queue. nop(sig=thrsw) nop() nop() while not align_cond(code_offset + len(asm)): nop() with loop as l: unroll = 1 << unroll_shift for i in range(8): mov(tmua, reg_src).add(reg_src, reg_src, reg_stride) for j in range(unroll - 1): for i in range(8): nop(sig=ldtmu(r0)) mov(tmua, reg_src).add(reg_src, reg_src, reg_stride) mov(tmud, r0) mov(tmua, reg_dst).add(reg_dst, reg_dst, reg_stride) for i in range(6): nop(sig=ldtmu(r0)) mov(tmud, r0) mov(tmua, reg_dst).add(reg_dst, reg_dst, reg_stride) nop(sig=ldtmu(r0)) mov(tmud, r0).sub(reg_length, reg_length, 1, cond='pushz') mov(tmua, reg_dst).add(reg_dst, reg_dst, reg_stride) l.b(cond='na0') nop(sig=ldtmu(r0)) # delay slot mov(tmud, r0) # delay slot mov(tmua, reg_dst).add(reg_dst, reg_dst, reg_stride) # delay slot # This synchronization is needed between the last TMU operation and the # program end with the thread switch just before the loop above. barrierid(syncb, sig=thrsw) nop() nop() nop(sig=thrsw) nop(sig=thrsw) nop() nop() nop(sig=thrsw) nop() nop() nop() def scopy(*, length, num_qpus=8, unroll_shift=0): assert length > 0 assert length % (16 * 8 * num_qpus * (1 << unroll_shift)) == 0 with Driver(data_area_size=(length * 2 + 1024) * 4) as drv: code = drv.program(qpu_scopy, num_qpus=num_qpus, unroll_shift=unroll_shift, code_offset=drv.code_pos // 8) X = drv.alloc(length, dtype='float32') Y = drv.alloc(length, dtype='float32') X[:] = np.arange(*X.shape, dtype=X.dtype) Y[:] = -X assert not np.array_equal(X, Y) unif = drv.alloc(3, dtype='uint32') unif[0] = length unif[1] = X.addresses()[0] unif[2] = Y.addresses()[0] start = time.perf_counter_ns() drv.execute(code, unif.addresses()[0], thread=num_qpus) end = time.perf_counter_ns() assert np.array_equal(X, Y) return[end - start] #, length * 4 / (end - start) * 1e-6] @qpu def qpu_memset(asm, *, num_qpus, unroll_shift, code_offset, align_cond=lambda pos: pos % 512 == 0): g = globals() for i, v in enumerate(['dst', 'fill', 'length', 'qpu_num', 'stride']): g[f'reg_{v}'] = rf[i] nop(sig=ldunifrf(reg_dst)) nop(sig=ldunifrf(reg_fill)) nop(sig=ldunifrf(reg_length)) if num_qpus == 1: num_qpus_shift = 0 mov(reg_qpu_num, 0) elif num_qpus == 8: num_qpus_shift = 3 tidx(r0) shr(r0, r0, 2) band(reg_qpu_num, r0, 0b1111) else: raise Exception('num_qpus must be 1 or 8') # addr += 4 * (thread_num + 16 * qpu_num) shl(r0, reg_qpu_num, 4) eidx(r1) add(r0, r0, r1) shl(r0, r0, 2) add(reg_dst, reg_dst, r0) # stride = 4 * 16 * num_qpus # r0 = 1 mov(r0, 1) shl(reg_stride, r0, 6 + num_qpus_shift) # length /= 16 * num_qpus * unroll shr(reg_length, reg_length, 4 + num_qpus_shift + unroll_shift) unroll = 1 << unroll_shift if unroll == 1: sub(reg_length, reg_length, r0, cond='pushz') while not align_cond(code_offset + len(asm)): nop() with loop as l: l.b(cond='na0') mov(tmud, reg_fill) # delay slot mov(tmua, reg_dst).add(reg_dst, reg_dst, reg_stride) # delay slot sub(reg_length, reg_length, r0, cond='pushz') # delay slot else: while not align_cond(code_offset + len(asm)): nop() with loop as l: for i in range(unroll - 2): mov(tmud, reg_fill) mov(tmua, reg_dst).add(reg_dst, reg_dst, reg_stride) mov(tmud, reg_fill).sub(reg_length, reg_length, r0, cond='pushz') l.b(cond='na0') mov(tmua, reg_dst).add(reg_dst, reg_dst, reg_stride) # delay slot mov(tmud, reg_fill) # delay slot mov(tmua, reg_dst).add(reg_dst, reg_dst, reg_stride) # delay slot nop(sig=thrsw) nop(sig=thrsw) nop() nop() nop(sig=thrsw) nop() nop() nop() def memset(*, fill, length, num_qpus=8, unroll_shift=1): assert length > 0 assert length % (16 * num_qpus * (1 << unroll_shift)) == 0 with Driver(data_area_size=(length + 1024) * 4) as drv: code = drv.program(qpu_memset, num_qpus=num_qpus, unroll_shift=unroll_shift, code_offset=drv.code_pos // 8) X = drv.alloc(length, dtype='uint32') X.fill(~fill) assert not np.array_equiv(X, fill) unif = drv.alloc(3, dtype='uint32') unif[0] = X.addresses()[0] unif[1] = fill unif[2] = length start = monotonic() drv.execute(code, unif.addresses()[0], thread=num_qpus) end = monotonic() assert np.array_equiv(X, fill) return [end - start] #, length * 4 / (end - start) * 1e-6] @qpu def qpu_clock(asm): nop(sig = ldunif) nop(sig = ldunifrf(rf0)) with loop as l: sub(r5, r5, 1, cond = 'pushn') l.b(cond = 'anyna') nop() nop() nop() mov(tmud, 1) mov(tmua, rf0) tmuwt() nop(sig = thrsw) nop(sig = thrsw) nop() nop() nop(sig = thrsw) nop() nop() nop() def test_clock(): bench = BenchHelper('./libbench_helper.so') with Driver() as drv: f = pow(2, 25) code = drv.program(qpu_clock) unif = drv.alloc(2, dtype = 'uint32') done = drv.alloc(1, dtype = 'uint32') done[:] = 0 unif[0] = f unif[1] = done.addresses()[0] with drv.compute_shader_dispatcher() as csd: start = time.perf_counter_ns() csd.dispatch(code, unif.addresses()[0]) bench.wait_address(done) end = time.perf_counter_ns() return [f * 5 / (end - start) / 1000 / 1000 * 4] #end - start] #, f * 5 / (end - start) / 1000 / 1000 * 4] @qpu def qpu_write_N(asm, N): eidx(r0, sig = ldunif) nop(sig = ldunifrf(rf0)) shl(r0, r0, 2) mov(tmud, N) add(tmua, r5, r0) tmuwt() mov(tmud, 1) mov(tmua, rf0) tmuwt() nop(sig = thrsw) nop(sig = thrsw) nop() nop() nop(sig = thrsw) nop() nop() nop() def test_multiple_dispatch_delay(): bench = BenchHelper('./libbench_helper.so') with Driver() as drv: data = drv.alloc((5, 16), dtype = 'uint32') code = [drv.program(lambda asm: qpu_write_N(asm, i)) for i in range(data.shape[0])] unif = drv.alloc((data.shape[0], 2), dtype = 'uint32') done = drv.alloc(1, dtype = 'uint32') data[:] = 0 unif[:,0] = data.addresses()[:,0] unif[:,1] = done.addresses()[0] ref_start = time.perf_counter_ns() with drv.compute_shader_dispatcher() as csd: for i in range(data.shape[0]): csd.dispatch(code[i], unif.addresses()[i,0]) ref_end = time.perf_counter_ns() assert (data == np.arange(data.shape[0]).reshape(data.shape[0],1)).all() data[:] = 0 naive_results = np.zeros(data.shape[0], dtype='float32') with drv.compute_shader_dispatcher() as csd: for i in range(data.shape[0]): done[:] = 0 start = time.perf_counter_ns() csd.dispatch(code[i], unif.addresses()[i,0]) bench.wait_address(done) end = time.perf_counter_ns() naive_results[i] = end - start assert (data == np.arange(data.shape[0]).reshape(data.shape[0],1)).all() sleep_results = np.zeros(data.shape[0], dtype='float32') with drv.compute_shader_dispatcher() as csd: for i in range(data.shape[0]): done[:] = 0 time.sleep(1) start = time.perf_counter_ns() csd.dispatch(code[i], unif.addresses()[i,0]) bench.wait_address(done) end = time.perf_counter_ns() sleep_results[i] = end - start assert (data == np.arange(data.shape[0]).reshape(data.shape[0],1)).all() return [ref_end - ref_start,np.sum(naive_results),np.sum(sleep_results)] @qpu def qpu_tmu_load_1_slot_1_qpu(asm, nops): nop(sig = ldunifrf(rf0)) # X.shape[1] nop(sig = ldunifrf(rf1)) # X nop(sig = ldunifrf(rf2)) # X.stride[1] nop(sig = ldunifrf(rf3)) # X.stride[0] nop(sig = ldunifrf(rf4)) # Y nop(sig = ldunifrf(rf5)) # done barrierid(syncb, sig = thrsw) nop() nop() tidx(r0) shr(r0, r0, 2) band(r0, r0, 0b1111, cond = 'pushz') b(R.done, cond = 'allna') nop() # delay slot nop() # delay slot nop() # delay slot eidx(r0) shl(r0, r0, 2) add(rf4, rf4, r0) eidx(r0) umul24(r0, r0, rf3) add(rf1, rf1, r0) mov(r2, 0.0) with loop as l: mov(tmua, rf1).add(rf1, rf1, rf2) for i in range(nops): nop() nop(sig = ldtmu(r3)) sub(rf0, rf0, 1, cond = 'pushz') l.b(cond = 'anyna') fadd(r2, r2, r3) # delay slot nop() # delay slot nop() # delay slot mov(tmud, r2) mov(tmua, rf4) tmuwt() mov(tmud, 1) mov(tmua, rf5) tmuwt() L.done barrierid(syncb, sig = thrsw) nop() nop() nop(sig = thrsw) nop(sig = thrsw) nop() nop() nop(sig = thrsw) nop() nop() nop() def test_tmu_load_1_slot_1_qpu(): bench = BenchHelper('./libbench_helper.so') res = [] for trans in [False, True]: with Driver() as drv: loop = 2**15 X = drv.alloc((16, loop) if trans else (loop, 16), dtype = 'float32') Y = drv.alloc(16, dtype = 'float32') unif = drv.alloc(6, dtype = 'uint32') done = drv.alloc(1, dtype = 'uint32') unif[0] = loop unif[1] = X.addresses()[0,0] unif[2] = X.strides[int(trans)] unif[3] = X.strides[1-int(trans)] unif[4] = Y.addresses()[0] unif[5] = done.addresses()[0] results = np.zeros((1, 10), dtype = 'float32') #fig = plt.figure() #ax = fig.add_subplot(1,1,1) #ax.set_title(f'TMU load latency (1 slot, 1 qpu, stride=({unif[2]},{unif[3]}))') #ax.set_xlabel('# of nop (between request and load signal)') #ax.set_ylabel('sec') for nops in range(results.shape[0]): code = drv.program(lambda asm: qpu_tmu_load_1_slot_1_qpu(asm, nops)) for i in range(results.shape[1]): with drv.compute_shader_dispatcher() as csd: X[:] = np.random.randn(*X.shape) / X.shape[int(trans)] Y[:] = 0.0 done[:] = 0 start = time.perf_counter_ns() csd.dispatch(code, unif.addresses()[0], thread = 8) bench.wait_address(done) end = time.perf_counter_ns() results[nops,i] = end - start assert np.allclose(Y, np.sum(X, axis=int(trans)), atol = 1e-4) #ax.scatter(np.zeros(results.shape[1])+nops, results[nops], s=1, c='blue') #print('{:4}/{}\t{:.9f}'.format(nops, results.shape[0], np.sum(results[nops]) / results.shape[1])) res.append(np.sum(results[nops]) / results.shape[1]) return res #ax.set_ylim(auto=True) #ax.set_xlim(0, results.shape[0]) #fig.savefig(f'benchmarks/tmu_load_1_slot_1_qpu_{unif[2]}_{unif[3]}.png') @qpu def qpu_tmu_load_2_slot_1_qpu(asm, nops): nop(sig = ldunifrf(rf0)) # X.shape[1] nop(sig = ldunifrf(rf1)) # X nop(sig = ldunifrf(rf2)) # X.stride[1] nop(sig = ldunifrf(rf3)) # X.stride[0] nop(sig = ldunifrf(rf4)) # Y nop(sig = ldunifrf(rf5)) # done barrierid(syncb, sig = thrsw) nop() nop() tidx(r0) shr(r0, r0, 2) band(r0, r0, 0b0011, cond = 'pushz') b(R.skip_bench, cond = 'allna') nop() nop() nop() eidx(r0) shl(r0, r0, 2) add(rf4, rf4, r0) tidx(r0) shr(r0, r0, 2) band(r0, r0, 0b1111) shl(r1, 4, 4) umul24(r0, r0, r1) add(rf4, rf4, r0) eidx(r0) umul24(r0, r0, rf3) add(rf1, rf1, r0) tidx(r0) shr(r0, r0, 2) band(r0, r0, 0b1111) shl(r1, rf0, 6) umul24(r0, r0, r1) add(rf1, rf1, r0) mov(r2, 0.0) with loop as l: mov(tmua, rf1).add(rf1, rf1, rf2) for i in range(nops): nop() nop(sig = ldtmu(r3)) sub(rf0, rf0, 1, cond = 'pushz') l.b(cond = 'anyna') fadd(r2, r2, r3) # delay slot nop() # delay slot nop() # delay slot mov(tmud, r2) mov(tmua, rf4) tmuwt() L.skip_bench barrierid(syncb, sig = thrsw) nop() nop() tidx(r0) shr(r0, r0, 2) band(r0, r0, 0b1111, cond = 'pushz') b(R.skip_done, cond = 'allna') nop() nop() nop() mov(tmud, 1) mov(tmua, rf5) tmuwt() L.skip_done nop(sig = thrsw) nop(sig = thrsw) nop() nop() nop(sig = thrsw) nop() nop() nop() def test_tmu_load_2_slot_1_qpu(): bench = BenchHelper('./libbench_helper.so') res=[] for trans, min_nops, max_nops in [(False, 0, 1), (True, 0, 1)]: with Driver() as drv: loop = 2**13 X = drv.alloc((8, 16, loop) if trans else (8, loop, 16), dtype = 'float32') Y = drv.alloc((8, 16), dtype = 'float32') unif = drv.alloc(6, dtype = 'uint32') done = drv.alloc(1, dtype = 'uint32') unif[0] = loop unif[1] = X.addresses()[0,0,0] unif[2] = X.strides[1+int(trans)] unif[3] = X.strides[2-int(trans)] unif[4] = Y.addresses()[0,0] unif[5] = done.addresses()[0] results = np.zeros((max_nops, 10), dtype = 'float32') #fig = plt.figure() #ax = fig.add_subplot(1,1,1) #ax.set_title(f'TMU load latency (2 slot, 1 qpu, stride=({unif[2]},{unif[3]}))') #ax.set_xlabel('# of nop (between request and load signal)') #ax.set_ylabel('sec') #print() for nops in range(min_nops, results.shape[0]): code = drv.program(lambda asm: qpu_tmu_load_2_slot_1_qpu(asm, nops)) for i in range(results.shape[1]): with drv.compute_shader_dispatcher() as csd: X[:] = np.random.randn(*X.shape) / X.shape[1+int(trans)] Y[:] = 0.0 done[:] = 0 start = time.perf_counter_ns() csd.dispatch(code, unif.addresses()[0], thread = 8) bench.wait_address(done) end = time.perf_counter_ns() results[nops,i] = end - start assert np.allclose(Y[0::4], np.sum(X[0::4], axis=1+int(trans)), atol = 1e-4) assert (Y[1:4] == 0).all() assert (Y[5:8] == 0).all() #ax.scatter(np.zeros(results.shape[1])+nops, results[nops], s=1, c='blue') #print('{:4}/{}\t{:.9f}'.format(nops, results.shape[0], np.sum(results[nops]) / results.shape[1])) res.append(np.sum(results[nops]) / results.shape[1]) #ax.set_ylim(auto=True) #ax.set_xlim(min_nops, max_nops) #fig.savefig(f'benchmarks/tmu_load_2_slot_1_qpu_{unif[2]}_{unif[3]}.png') return res def getHwAddr(ifname): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', bytes(ifname, 'utf-8')[:15])) return ':'.join('%02x' % b for b in info[18:24]) #for x in range(0,10): def main(): #for n in range(0,100): # s=int(sys.argv[1]) r=int(sys.argv[2]) #f=sys.argv[2] mac=getHwAddr('eth0') results=[] #results.append(c) #results.append(f) results.append(os.popen("vcgencmd measure_temp | cut -d = -f 2 | cut -d \"'\" -f 1").read()[:-1]) results.append(get_QPU_freq(s)) #for i in test_clock(): # results.append(i) #for i in test_clock(): # results.append(i) results.append(cpu_hash()) #results.append(os.popen("vcgencmd measure_clock core").read[:-1]) results.append(cpu_random()) results.append(cpu_true_random(r)) for i in sgemm_rnn_naive(): results.append(i) """results.append(get_QPU_freq(1)) results.append(get_QPU_freq(2)) results.append(get_QPU_freq(5)) results.append(get_QPU_freq(7)) results.append(get_QPU_freq(8)) results.append(get_QPU_freq(10)) results.append(get_QPU_freq(60))""" """results.append(cpu_true_random(1000)) results.append(cpu_true_random(1000000)) results.append(cpu_true_random(100000000)) for i in test_clock(): results.append(i) for i in test_clock(): results.append(i) for i in sgemm_rnn_naive(): results.append(i) for i in summation(length=32 * 1024 * 1024): results.append(i) for i in scopy(length=16*1024*1024): results.append(i) for i in test_multiple_dispatch_delay(): results.append(i)""" #for i in test_tmu_load_1_slot_1_qpu(): # results.append(i) #for i in test_tmu_load_2_slot_1_qpu(): # results.append(i) results.append(mac) print(*results, sep=',') #print(memset(fill=0x5a5a5a5a, length=16 * 1024 * 1024)) if __name__ == "__main__": main()
import praw if __name__ == "__main__": posts = open('posts.txt', 'r') lines = posts.readlines() for line in lines: line = line.decode('utf-8').rstrip('\n') route_syntax = ['5.', ' V', '(V', ' v', '(v'] is_route = False for syntax in route_syntax: index = line.find(syntax) if index != -1: if route_syntax.index(syntax) > 0 : if index + 2 < len(line): if line[index + 2:index + 3].isdigit(): is_route = True print '__label__1 ' + line.encode("utf8") else: is_route = True print '__label__1 ' + line.encode("utf8") break if not is_route: print '__label__2 ' + line.encode("utf8")
# Copyright Least Authority Enterprises. # See LICENSE for details. """ Hypothesis strategies useful for testing ``pykube``. """ from string import ascii_lowercase, digits from hypothesis.strategies import ( none, builds, fixed_dictionaries, lists, sampled_from, one_of, text, dictionaries, tuples, integers, booleans, ) from .. import v1_5_model as default_model # Without some attempt to cap the size of collection strategies (lists, # dictionaries), the slowness health check fails intermittently. Here are # some sizes for collections with no other opinion on the matter. # # If you write a strategy that involves a collection and there are no official # upper limits on the number of items in that collection, you should almost # certainly impose these limits to make sure your strategy runs quickly # enough. _QUICK_AVERAGE_SIZE = 3 _QUICK_MAX_SIZE = 10 def joins(sep, elements): """ Join unicode strings built by another strategy. :param unicode sep: The separate to join with. :param elements: A strategy which builds a sequence of unicode strings to join. :return: A strategy for building the joined strings. """ return builds( lambda values: sep.join(values), elements, ) join = joins def dns_labels(): # https://github.com/kubernetes/community/blob/master/contributors/design-proposals/identifiers.md # https://kubernetes.io/docs/user-guide/identifiers/#names # https://www.ietf.org/rfc/rfc1035.txt letter = ascii_lowercase letter_digit = letter + digits if isinstance(letter, bytes): # string.ascii_lowercase and string.digits are bytes on Python 2. letter = letter.decode("ascii") letter_digit = letter_digit.decode("ascii") letter_digit_hyphen = letter_digit + u"-" variations = [ # Could be just one character long (sampled_from(letter),), # Or longer (sampled_from(letter), text( letter_digit_hyphen, min_size=0, max_size=61, average_size=_QUICK_AVERAGE_SIZE, ), sampled_from(letter_digit), ), ] return one_of(list( joins(u"", tuples(*alphabet)) for alphabet in variations )) # XXX wrong object_name = object_names = dns_labels def image_names(): """ Build Docker image names. Only generate images that appear to be hosted on localhost to avoid ever actually pulling an image from anywhere on the network. """ return dns_labels().map(lambda label: u"127.0.0.1/" + label) def dns_subdomains(): # XXX wrong return joins( u".", lists( dns_labels(), min_size=1, max_size=_QUICK_MAX_SIZE, average_size=_QUICK_AVERAGE_SIZE, ), ) def label_prefixes(): return dns_subdomains() def label_names(): # https://kubernetes.io/docs/user-guide/labels/#syntax-and-character-set return dns_labels() def label_values(): # https://kubernetes.io/docs/user-guide/labels/#syntax-and-character-set return label_names() def labels(): return dictionaries( keys=one_of( join(u"/", tuples(label_prefixes(), label_names())), label_names(), ), values=label_values(), average_size=_QUICK_MAX_SIZE, max_size=_QUICK_MAX_SIZE, ) def object_metadatas(model=default_model): """ Build ``v1.ObjectMeta`` without a namespace. """ return builds( model.v1.ObjectMeta.create, fixed_dictionaries({ u"name": object_name(), u"uid": none(), u"labels": one_of( none(), labels(), ), }), ) def namespaced_object_metadatas(model=default_model): """ Build ``v1.ObjectMeta`` with a namespace. """ return builds( lambda obj_metadata, namespace: obj_metadata.set( u"namespace", namespace, ), obj_metadata=object_metadatas(model), namespace=object_name(), ) def namespace_statuses(model=default_model): """ Build ``Namespace.status``. """ return builds( model.v1.NamespaceStatus, phase=sampled_from({u"Active", u"Terminating"}), ) def creatable_namespaces(model=default_model): """ Build ``Namespace``\ s which can be created on a Kubernetes cluster. """ return builds( model.v1.Namespace, metadata=object_metadatas(model), status=none(), ) def retrievable_namespaces(model=default_model): """ Build ``Namespace``\ s which might be retrieved from a Kubernetes cluster. This includes additional fields that might be populated by the Kubernetes cluster automatically. """ return builds( lambda ns, status: ns.set(status=status), creatable_namespaces(model), status=namespace_statuses(model), ) def configmap_data_keys(): """ Build keys for the ``data`` mapping of a ``ConfigMap``. """ return builds( lambda labels, dot: dot + u".".join(labels), labels=lists(object_name(), average_size=2, min_size=1, max_size=253//2), dot=sampled_from([u"", u"."]), ).filter( lambda key: len(key) <= 253 ) def configmap_data_values(): """ Build values for the ``data`` field for a ``v1.ConfigMap``. """ return text() def configmap_datas(): """ Build the ``data`` mapping of a ``v1.ConfigMap``. """ return one_of( none(), dictionaries( keys=configmap_data_keys(), values=configmap_data_values(), average_size=_QUICK_AVERAGE_SIZE, max_size=_QUICK_MAX_SIZE, ), ) def configmaps(model=default_model): """ Build ``v1.ConfigMap``. """ return builds( model.v1.ConfigMap, metadata=namespaced_object_metadatas(model), data=configmap_datas(), ) def containers(model=default_model): """ Build ``v1.Container``. """ return builds( model.v1.Container, name=dns_labels(), # XXX Spec does not say image is required but it is image=image_names(), ) def podspecs(model=default_model): """ Build ``v1.PodSpec``. """ return builds( model.v1.PodSpec, activeDeadlineSeconds=one_of( none(), # The Swagger specification claims this is an int64. The prose # documentation says it must be a positive integer. The Golang # PodSpec struct (pkg/api/v1/types.go:PodSpec) declares it a field # of type ``*int64`` - a signed type. integers(min_value=0, max_value=2 ** 63 - 1), ), dnsPolicy=sampled_from([u"ClusterFirst", u"Default"]), hostIPC=booleans(), hostNetwork=booleans(), hostPID=booleans(), hostname=dns_labels(), # And plenty more ... containers=lists( containers(model), min_size=1, average_size=_QUICK_MAX_SIZE, max_size=_QUICK_MAX_SIZE, unique_by=lambda container: container.name, ), ) def podtemplatespecs(model=default_model): """ Build ``v1.PodTemplateSpec``. """ return builds( model.v1.PodTemplateSpec, # v1.ObjectMeta for a PodTemplateSpec must include some labels. metadata=object_metadatas(model).filter( lambda meta: meta.labels and len(meta.labels) > 0, ), spec=podspecs(model), ) def _without_activeDeadlineSeconds(template): # When part of a Deployment or ReplicaSet, activeDeadlineSeconds may not # be given a value. https://github.com/kubernetes/kubernetes/issues/38684 return template.transform(["spec", "activeDeadlineSeconds"], None) def replicasetspecs(model=default_model): """ Build ``v1beta1.ReplicaSetSpec``. """ return builds( lambda template, **kw: model.v1beta1.ReplicaSetSpec( # Make sure the selector will match Pods from the pod template # spec. selector={u"matchLabels": template.metadata.labels}, template=template, **kw ), template=podtemplatespecs(model).map(_without_activeDeadlineSeconds), minReadySeconds=integers(min_value=0, max_value=2 ** 31 - 1), # Strictly speaking, the max value is more like 2 ** 31 -1. However, # if we actually sent such a thing to Kubernetes we could probably # expect only undesirable consequences. replicas=integers(min_value=0, max_value=3), ) def replicasets(model=default_model): """ Build ``v1beta1.ReplicaSet``. """ return builds( model.v1beta1.ReplicaSet, metadata=object_metadatas(model), spec=replicasetspecs(model), ) def deploymentspecs(model=default_model): """ Build ``v1beta1.DeploymentSpec``. """ return builds( lambda template: model.v1beta1.DeploymentSpec( template=template, # The selector has to match the PodTemplateSpec. This is an easy # way to accomplish that but not the only way. selector={u"matchLabels": template.metadata.labels}, ), template=podtemplatespecs(model).map(_without_activeDeadlineSeconds), ) def deployments(model=default_model): """ Build ``v1beta1.Deployment``. """ return builds( lambda metadata, spec: model.v1beta1.Deployment( # The submitted Deployment.metadata.labels don't have to match the # Deployment.spec.template.metadata.labels but the server will # copy them up if they're missing at the top. metadata=metadata.set("labels", spec.template.metadata.labels), spec=spec, ), metadata=namespaced_object_metadatas(model), # XXX Spec is only required if you want to be able to create the # Deployment. spec=deploymentspecs(model), ) def podstatuses(): """ Build ``v1.PodStatus``. """ return none() def pods(model=default_model): """ Builds ``v1.Pod``. """ return builds( model.v1.Pod, metadata=namespaced_object_metadatas(model), spec=podspecs(model), status=podstatuses(), ) def port_numbers(min_value=1, max_value=65535): """ Builds integers in the range of TCP/UDP port numbers. """ return integers(min_value, max_value) def service_ports(model=default_model): """ Build ``v1.ServicePort``. """ return builds( model.v1.ServicePort, port=port_numbers(), # The specification doesn't document name as required, but it is. name=dns_labels().filter(lambda name: len(name) <= 24), ) def service_specs(model=default_model): """ Build ``v1.ServiceSpec``. """ return builds( model.v1.ServiceSpec, ports=lists( service_ports(model), min_size=1, max_size=_QUICK_MAX_SIZE, average_size=_QUICK_AVERAGE_SIZE, unique_by=lambda port: port.name, ) ) def services(model=default_model): """ Build ``v1.Service``. """ return builds( model.v1.Service, metadata=namespaced_object_metadatas(model), # Though the specification doesn't tell us, the spec is required. spec=service_specs(model), ) def _collections(cls, strategy, unique_by): """ A helper for defining a strategy to build ``...List`` objects. :param cls: The model class to instantiate. :param strategy: A strategy to build elements for the collection. :param unique_by: A key function compatible with the ``lists`` strategy. """ return builds( cls, items=one_of( none(), lists( strategy, average_size=_QUICK_AVERAGE_SIZE, max_size=_QUICK_MAX_SIZE, unique_by=unique_by, ), ), ) def deploymentlists(model=default_model): """ Build ``v1beta1.DeploymentList``. """ return _collections( model.v1beta1.DeploymentList, deployments(model), _unique_names_with_namespaces, ) def podlists(model=default_model): """ Build ``v1.PodList``. """ return _collections( model.v1.PodList, pods(model), _unique_names_with_namespaces, ) def replicasetlists(model=default_model): """ Build ``v1beta1.ReplicaSetList``. """ return _collections( model.v1beta1.ReplicaSetList, replicasets(model), _unique_names_with_namespaces, ) def configmaplists(model=default_model): """ Build ``v1.ConfigMapList``. """ return _collections( model.v1.ConfigMapList, configmaps(model), _unique_names_with_namespaces, ) def namespacelists(namespaces=creatable_namespaces(), model=default_model): """ Build ``v1.NamespaceList``. """ return _collections( model.v1.NamespaceList, namespaces, _unique_names, ) def servicelists(model=default_model): """ Build ``v1.ServiceList``. """ return _collections( model.v1.ServiceList, services(model), _unique_names_with_namespaces, ) def objectcollections(namespaces=creatable_namespaces(), model=default_model): """ Build ``v1.ObjectCollection``. """ return one_of( configmaplists(model), namespacelists(namespaces, model), deploymentlists(model), podlists(model), replicasetlists(model), servicelists(model), ) def _unique_names(item): """ Compute the unique key for the given (namespaceless) item within a single collection. """ return item.metadata.name def _unique_names_with_namespaces(item): """ Compute the unique key for the given (namespaced) item within a single collection. """ return (item.metadata.name, item.metadata.namespace) def iobjects(model=default_model): """ Build any one of the ``IObject`` implementations. """ return one_of( creatable_namespaces(model), retrievable_namespaces(model), configmaps(model), deployments(model), pods(model), replicasets(model), services(model), objectcollections(model=model), )
# -*- coding: utf-8 -*- """Maximum flow algorithms test suite. """ from functools import partial from nose.tools import * import networkx as nx from networkx.algorithms.flow.utils import * from networkx.algorithms.flow.edmonds_karp import * from networkx.algorithms.flow.ford_fulkerson import * from networkx.algorithms.flow.preflow_push import * from networkx.algorithms.flow.shortest_augmenting_path import * flow_funcs = [edmonds_karp, ford_fulkerson, preflow_push, shortest_augmenting_path] max_min_funcs = [nx.maximum_flow, nx.minimum_cut] flow_value_funcs = [nx.maximum_flow_value, nx.minimum_cut_value] interface_funcs = sum([max_min_funcs, flow_value_funcs], []) all_funcs = sum([flow_funcs, interface_funcs], []) msg = "Assertion failed in function: {0}" msgi = "Assertion failed in function: {0} in interface {1}" def compute_cutset(G, partition): reachable, non_reachable = partition cutset = set() for u, nbrs in ((n, G[n]) for n in reachable): cutset.update((u, v) for v in nbrs if v in non_reachable) return cutset def validate_flows(G, s, t, flowDict, solnValue, capacity, flow_func): assert_equal(set(G), set(flowDict), msg=msg.format(flow_func.__name__)) for u in G: assert_equal(set(G[u]), set(flowDict[u]), msg=msg.format(flow_func.__name__)) excess = dict((u, 0) for u in flowDict) for u in flowDict: for v, flow in flowDict[u].items(): if capacity in G[u][v]: ok_(flow <= G[u][v][capacity]) ok_(flow >= 0, msg=msg.format(flow_func.__name__)) excess[u] -= flow excess[v] += flow for u, exc in excess.items(): if u == s: assert_equal(exc, -solnValue, msg=msg.format(flow_func.__name__)) elif u == t: assert_equal(exc, solnValue, msg=msg.format(flow_func.__name__)) else: assert_equal(exc, 0, msg=msg.format(flow_func.__name__)) def validate_cuts(G, s, t, solnValue, partition, capacity, flow_func): assert_true(all(n in G for n in partition[0]), msg=msg.format(flow_func.__name__)) assert_true(all(n in G for n in partition[1]), msg=msg.format(flow_func.__name__)) cutset = compute_cutset(G, partition) assert_true(all(G.has_edge(u, v) for (u, v) in cutset), msg=msg.format(flow_func.__name__)) assert_equal(solnValue, sum(G[u][v][capacity] for (u, v) in cutset), msg=msg.format(flow_func.__name__)) H = G.copy() H.remove_edges_from(cutset) if not G.is_directed(): assert_false(nx.is_connected(H), msg=msg.format(flow_func.__name__)) else: assert_false(nx.is_strongly_connected(H), msg=msg.format(flow_func.__name__)) def compare_flows_and_cuts(G, s, t, solnFlows, solnValue, capacity='capacity'): for flow_func in flow_funcs: R = flow_func(G, s, t, capacity) # Test both legacy and new implementations. legacy = R.graph.get('algorithm') == "ford_fulkerson_legacy" flow_value = R.graph['flow_value'] if legacy: flow_dict = R.graph['flow_dict'] else: flow_dict = build_flow_dict(G, R) assert_equal(flow_value, solnValue, msg=msg.format(flow_func.__name__)) if legacy: assert_equal(flow_dict, solnFlows, msg=msg.format(flow_func.__name__)) else: validate_flows(G, s, t, flow_dict, solnValue, capacity, flow_func) # Minimum cut if legacy: cut_value, partition = nx.minimum_cut(G, s, t, capacity=capacity, flow_func=nx.ford_fulkerson) else: cut_value, partition = nx.minimum_cut(G, s, t, capacity=capacity, flow_func=flow_func) validate_cuts(G, s, t, solnValue, partition, capacity, flow_func) class TestMaxflowMinCutCommon: def test_graph1(self): # Trivial undirected graph G = nx.Graph() G.add_edge(1,2, capacity = 1.0) solnFlows = {1: {2: 1.0}, 2: {1: 1.0}} compare_flows_and_cuts(G, 1, 2, solnFlows, 1.0) def test_graph2(self): # A more complex undirected graph # adapted from www.topcoder.com/tc?module=Statc&d1=tutorials&d2=maxFlow G = nx.Graph() G.add_edge('x','a', capacity = 3.0) G.add_edge('x','b', capacity = 1.0) G.add_edge('a','c', capacity = 3.0) G.add_edge('b','c', capacity = 5.0) G.add_edge('b','d', capacity = 4.0) G.add_edge('d','e', capacity = 2.0) G.add_edge('c','y', capacity = 2.0) G.add_edge('e','y', capacity = 3.0) H = {'x': {'a': 3, 'b': 1}, 'a': {'c': 3, 'x': 3}, 'b': {'c': 1, 'd': 2, 'x': 1}, 'c': {'a': 3, 'b': 1, 'y': 2}, 'd': {'b': 2, 'e': 2}, 'e': {'d': 2, 'y': 2}, 'y': {'c': 2, 'e': 2}} compare_flows_and_cuts(G, 'x', 'y', H, 4.0) def test_digraph1(self): # The classic directed graph example G = nx.DiGraph() G.add_edge('a','b', capacity = 1000.0) G.add_edge('a','c', capacity = 1000.0) G.add_edge('b','c', capacity = 1.0) G.add_edge('b','d', capacity = 1000.0) G.add_edge('c','d', capacity = 1000.0) H = {'a': {'b': 1000.0, 'c': 1000.0}, 'b': {'c': 0, 'd': 1000.0}, 'c': {'d': 1000.0}, 'd': {}} compare_flows_and_cuts(G, 'a', 'd', H, 2000.0) def test_digraph2(self): # An example in which some edges end up with zero flow. G = nx.DiGraph() G.add_edge('s', 'b', capacity = 2) G.add_edge('s', 'c', capacity = 1) G.add_edge('c', 'd', capacity = 1) G.add_edge('d', 'a', capacity = 1) G.add_edge('b', 'a', capacity = 2) G.add_edge('a', 't', capacity = 2) H = {'s': {'b': 2, 'c': 0}, 'c': {'d': 0}, 'd': {'a': 0}, 'b': {'a': 2}, 'a': {'t': 2}, 't': {}} compare_flows_and_cuts(G, 's', 't', H, 2) def test_digraph3(self): # A directed graph example from Cormen et al. G = nx.DiGraph() G.add_edge('s','v1', capacity = 16.0) G.add_edge('s','v2', capacity = 13.0) G.add_edge('v1','v2', capacity = 10.0) G.add_edge('v2','v1', capacity = 4.0) G.add_edge('v1','v3', capacity = 12.0) G.add_edge('v3','v2', capacity = 9.0) G.add_edge('v2','v4', capacity = 14.0) G.add_edge('v4','v3', capacity = 7.0) G.add_edge('v3','t', capacity = 20.0) G.add_edge('v4','t', capacity = 4.0) H = {'s': {'v1': 12.0, 'v2': 11.0}, 'v2': {'v1': 0, 'v4': 11.0}, 'v1': {'v2': 0, 'v3': 12.0}, 'v3': {'v2': 0, 't': 19.0}, 'v4': {'v3': 7.0, 't': 4.0}, 't': {}} compare_flows_and_cuts(G, 's', 't', H, 23.0) def test_digraph4(self): # A more complex directed graph # from www.topcoder.com/tc?module=Statc&d1=tutorials&d2=maxFlow G = nx.DiGraph() G.add_edge('x','a', capacity = 3.0) G.add_edge('x','b', capacity = 1.0) G.add_edge('a','c', capacity = 3.0) G.add_edge('b','c', capacity = 5.0) G.add_edge('b','d', capacity = 4.0) G.add_edge('d','e', capacity = 2.0) G.add_edge('c','y', capacity = 2.0) G.add_edge('e','y', capacity = 3.0) H = {'x': {'a': 2.0, 'b': 1.0}, 'a': {'c': 2.0}, 'b': {'c': 0, 'd': 1.0}, 'c': {'y': 2.0}, 'd': {'e': 1.0}, 'e': {'y': 1.0}, 'y': {}} compare_flows_and_cuts(G, 'x', 'y', H, 3.0) def test_optional_capacity(self): # Test optional capacity parameter. G = nx.DiGraph() G.add_edge('x','a', spam = 3.0) G.add_edge('x','b', spam = 1.0) G.add_edge('a','c', spam = 3.0) G.add_edge('b','c', spam = 5.0) G.add_edge('b','d', spam = 4.0) G.add_edge('d','e', spam = 2.0) G.add_edge('c','y', spam = 2.0) G.add_edge('e','y', spam = 3.0) solnFlows = {'x': {'a': 2.0, 'b': 1.0}, 'a': {'c': 2.0}, 'b': {'c': 0, 'd': 1.0}, 'c': {'y': 2.0}, 'd': {'e': 1.0}, 'e': {'y': 1.0}, 'y': {}} solnValue = 3.0 s = 'x' t = 'y' compare_flows_and_cuts(G, s, t, solnFlows, solnValue, capacity = 'spam') def test_digraph_infcap_edges(self): # DiGraph with infinite capacity edges G = nx.DiGraph() G.add_edge('s', 'a') G.add_edge('s', 'b', capacity = 30) G.add_edge('a', 'c', capacity = 25) G.add_edge('b', 'c', capacity = 12) G.add_edge('a', 't', capacity = 60) G.add_edge('c', 't') H = {'s': {'a': 85, 'b': 12}, 'a': {'c': 25, 't': 60}, 'b': {'c': 12}, 'c': {'t': 37}, 't': {}} compare_flows_and_cuts(G, 's', 't', H, 97) # DiGraph with infinite capacity digon G = nx.DiGraph() G.add_edge('s', 'a', capacity = 85) G.add_edge('s', 'b', capacity = 30) G.add_edge('a', 'c') G.add_edge('c', 'a') G.add_edge('b', 'c', capacity = 12) G.add_edge('a', 't', capacity = 60) G.add_edge('c', 't', capacity = 37) H = {'s': {'a': 85, 'b': 12}, 'a': {'c': 25, 't': 60}, 'c': {'a': 0, 't': 37}, 'b': {'c': 12}, 't': {}} compare_flows_and_cuts(G, 's', 't', H, 97) def test_digraph_infcap_path(self): # Graph with infinite capacity (s, t)-path G = nx.DiGraph() G.add_edge('s', 'a') G.add_edge('s', 'b', capacity = 30) G.add_edge('a', 'c') G.add_edge('b', 'c', capacity = 12) G.add_edge('a', 't', capacity = 60) G.add_edge('c', 't') for flow_func in all_funcs: assert_raises(nx.NetworkXUnbounded, flow_func, G, 's', 't') def test_graph_infcap_edges(self): # Undirected graph with infinite capacity edges G = nx.Graph() G.add_edge('s', 'a') G.add_edge('s', 'b', capacity = 30) G.add_edge('a', 'c', capacity = 25) G.add_edge('b', 'c', capacity = 12) G.add_edge('a', 't', capacity = 60) G.add_edge('c', 't') H = {'s': {'a': 85, 'b': 12}, 'a': {'c': 25, 's': 85, 't': 60}, 'b': {'c': 12, 's': 12}, 'c': {'a': 25, 'b': 12, 't': 37}, 't': {'a': 60, 'c': 37}} compare_flows_and_cuts(G, 's', 't', H, 97) def test_digraph4(self): # From ticket #429 by mfrasca. G = nx.DiGraph() G.add_edge('s', 'a', capacity = 2) G.add_edge('s', 'b', capacity = 2) G.add_edge('a', 'b', capacity = 5) G.add_edge('a', 't', capacity = 1) G.add_edge('b', 'a', capacity = 1) G.add_edge('b', 't', capacity = 3) flowSoln = {'a': {'b': 1, 't': 1}, 'b': {'a': 0, 't': 3}, 's': {'a': 2, 'b': 2}, 't': {}} compare_flows_and_cuts(G, 's', 't', flowSoln, 4) def test_disconnected(self): G = nx.Graph() G.add_weighted_edges_from([(0,1,1),(1,2,1),(2,3,1)],weight='capacity') G.remove_node(1) assert_equal(nx.maximum_flow_value(G,0,3), 0) flowSoln = {0: {}, 2: {3: 0}, 3: {2: 0}} compare_flows_and_cuts(G, 0, 3, flowSoln, 0) def test_source_target_not_in_graph(self): G = nx.Graph() G.add_weighted_edges_from([(0,1,1),(1,2,1),(2,3,1)],weight='capacity') G.remove_node(0) for flow_func in all_funcs: assert_raises(nx.NetworkXError, flow_func, G, 0, 3) G.add_weighted_edges_from([(0,1,1),(1,2,1),(2,3,1)],weight='capacity') G.remove_node(3) for flow_func in all_funcs: assert_raises(nx.NetworkXError, flow_func, G, 0, 3) def test_source_target_coincide(self): G = nx.Graph() G.add_node(0) for flow_func in all_funcs: assert_raises(nx.NetworkXError, flow_func, G, 0, 0) class TestMaxFlowMinCutInterface: def setup(self): G = nx.DiGraph() G.add_edge('x','a', capacity = 3.0) G.add_edge('x','b', capacity = 1.0) G.add_edge('a','c', capacity = 3.0) G.add_edge('b','c', capacity = 5.0) G.add_edge('b','d', capacity = 4.0) G.add_edge('d','e', capacity = 2.0) G.add_edge('c','y', capacity = 2.0) G.add_edge('e','y', capacity = 3.0) self.G = G H = nx.DiGraph() H.add_edge(0, 1, capacity = 1.0) H.add_edge(1, 2, capacity = 1.0) self.H = H def test_flow_func_not_callable(self): elements = ['this_should_be_callable', 10, set([1,2,3])] G = nx.Graph() G.add_weighted_edges_from([(0,1,1),(1,2,1),(2,3,1)], weight='capacity') for flow_func in interface_funcs: for element in elements: assert_raises(nx.NetworkXError, flow_func, G, 0, 1, flow_func=element) assert_raises(nx.NetworkXError, flow_func, G, 0, 1, flow_func=element) def test_flow_func_parameters(self): G = self.G fv = 3.0 for interface_func in interface_funcs: for flow_func in flow_funcs: result = interface_func(G, 'x', 'y', flow_func=flow_func) if interface_func in max_min_funcs: result = result[0] assert_equal(fv, result, msg=msgi.format(flow_func.__name__, interface_func.__name__)) def test_minimum_cut_no_cutoff(self): G = self.G for flow_func in flow_funcs: assert_raises(nx.NetworkXError, nx.minimum_cut, G, 'x', 'y', flow_func=flow_func, cutoff=1.0) assert_raises(nx.NetworkXError, nx.minimum_cut_value, G, 'x', 'y', flow_func=flow_func, cutoff=1.0) def test_kwargs(self): G = self.H fv = 1.0 to_test = ( (nx.shortest_augmenting_path, dict(two_phase=True)), (nx.preflow_push, dict(global_relabel_freq=5)), ) for interface_func in interface_funcs: for flow_func, kwargs in to_test: result = interface_func(G, 0, 2, flow_func=flow_func, **kwargs) if interface_func in max_min_funcs: result = result[0] assert_equal(fv, result, msg=msgi.format(flow_func.__name__, interface_func.__name__)) def test_kwargs_default_flow_func(self): G = self.H for interface_func in interface_funcs: assert_raises(nx.NetworkXError, interface_func, G, 0, 1, global_relabel_freq=2) def test_reusing_residual(self): G = self.G fv = 3.0 s, t = 'x', 'y' R = build_residual_network(G, 'capacity') for interface_func in interface_funcs: for flow_func in flow_funcs: for i in range(3): result = interface_func(G, 'x', 'y', flow_func=flow_func, residual=R) if interface_func in max_min_funcs: result = result[0] assert_equal(fv, result, msg=msgi.format(flow_func.__name__, interface_func.__name__)) # Tests specific to one algorithm def test_preflow_push_global_relabel_freq(): G = nx.DiGraph() G.add_edge(1, 2, capacity=1) R = nx.preflow_push(G, 1, 2, global_relabel_freq=None) assert_equal(R.graph['flow_value'], 1) assert_raises(nx.NetworkXError, preflow_push, G, 1, 2, global_relabel_freq=-1) def test_shortest_augmenting_path_two_phase(): k = 5 p = 1000 G = nx.DiGraph() for i in range(k): G.add_edge('s', (i, 0), capacity=1) G.add_path(((i, j) for j in range(p)), capacity=1) G.add_edge((i, p - 1), 't', capacity=1) R = shortest_augmenting_path(G, 's', 't', two_phase=True) assert_equal(R.graph['flow_value'], k) R = shortest_augmenting_path(G, 's', 't', two_phase=False) assert_equal(R.graph['flow_value'], k) class TestCutoff: def test_cutoff(self): k = 5 p = 1000 G = nx.DiGraph() for i in range(k): G.add_edge('s', (i, 0), capacity=2) G.add_path(((i, j) for j in range(p)), capacity=2) G.add_edge((i, p - 1), 't', capacity=2) R = shortest_augmenting_path(G, 's', 't', two_phase=True, cutoff=k) ok_(k <= R.graph['flow_value'] <= 2 * k) R = shortest_augmenting_path(G, 's', 't', two_phase=False, cutoff=k) ok_(k <= R.graph['flow_value'] <= 2 * k) R = edmonds_karp(G, 's', 't', cutoff=k) ok_(k <= R.graph['flow_value'] <= 2 * k) def test_complete_graph_cutoff(self): G = nx.complete_graph(5) nx.set_edge_attributes(G, 'capacity', dict(((u, v), 1) for u, v in G.edges())) for flow_func in [shortest_augmenting_path, edmonds_karp]: for cutoff in [3, 2, 1]: result = nx.maximum_flow_value(G, 0, 4, flow_func=flow_func, cutoff=cutoff) assert_equal(cutoff, result, msg="cutoff error in {0}".format(flow_func.__name__))
from typing import List class SortingAlgorithm: def __init__(self, data: List[List[str]]): self.data = data self.high = [] self.middle = [] self.low = [] """ Example data structure: Name | Ability | Gender ---|---|--- John Lim | High | M """ for i in data: if i[1].lower() == "high" or i[1].lower() == "h": self.high.append(i[0]) elif i[1].lower() == "middle" or i[1].lower() == "m": self.middle.append(i[0]) elif i[1].lower() == "low" or i[1].lower() == "l": self.low.append(i[0]) self.biggest_ability = [] self.smallest_ability = [] self.middle_ability = [] self.final_list = [] abilities_sorted = sorted([self.high, self.middle, self.low], key=len, reverse=True) print("Sorted abilities:", abilities_sorted) self.biggest_ability = abilities_sorted[0] self.middle_ability = abilities_sorted[1] self.smallest_ability = abilities_sorted[2] print("Self smallest ability:", self.smallest_ability) print("Self middle ability:", self.middle_ability) print("Self biggest ability:", self.biggest_ability) def sort_ability(self): # Clear any existing elements in the list # self.final_list.clear() self.final_list = [] print("Final list (start):", self.final_list) self.final_list.append(self.high) self.final_list.append(self.middle) self.final_list.append(self.low) print("Final list (end):", self.final_list) return self.final_list def __sort_values(self, ability_group: List[str], groups: int) -> List[int]: result = [] for n in range(groups): result.append(len(ability_group) // groups) for v in range(len(ability_group) % groups): result[v] += 1 return result def sort_mixed(self, groups: int): # Assign locally so as to not overwrite the class variable smallest_ability = self.smallest_ability middle_ability = self.middle_ability biggest_ability = self.biggest_ability print("Smallest ability:", smallest_ability) print("Middle ability:", middle_ability) print("Biggest ability:", biggest_ability) print("Self smallest ability:", self.smallest_ability) print("Self middle ability:", self.middle_ability) print("Self biggest ability:", self.biggest_ability) # Sorting smallest/middle/biggest abilities into equal numbers in groups smallest_ability_grp = self.__sort_values(smallest_ability, groups) smallest_ability_grp.sort(reverse=True) # self.smallestabilitygrp.sort() middle_ability_grp = self.__sort_values(middle_ability, groups) middle_ability_grp.sort() biggest_ability_grp = self.__sort_values(biggest_ability, groups) biggest_ability_grp.sort() mixed_ability = [] # Clear any existing elements in the final list # self.final_list.clear() self.final_list = [] for i in range(groups): mixed_ability.append([smallest_ability_grp[i], middle_ability_grp[i], biggest_ability_grp[i]]) grouper = [] for x in range((mixed_ability[i])[0]): grouper.append(smallest_ability[0]) smallest_ability.pop(0) for x in range((mixed_ability[i])[1]): grouper.append(middle_ability[0]) middle_ability.pop(0) for x in range((mixed_ability[i])[2]): grouper.append(biggest_ability[0]) biggest_ability.pop(0) self.final_list.append(grouper) return self.final_list
"""Helpers for AIOGitHubAPI.""" from __future__ import annotations from typing import Optional import aiohttp from .const import HttpMethod, Repository, RepositoryType from .legacy.helpers import ( async_call_api as legacy_async_call_api, short_message, short_sha, ) from .objects.base import AIOGitHubAPIResponse def repository_full_name(repository: RepositoryType) -> str: """Return the repository name.""" if isinstance(repository, str): return repository if isinstance(repository, Repository): return repository.full_name return f"{repository['owner']}/{repository['repo']}" async def async_call_api( session: aiohttp.ClientSession, method: HttpMethod, url: str, headers: dict, params: Optional[dict] = None, data: dict or str or None = None, jsondata: bool = True, returnjson: bool = True, ) -> AIOGitHubAPIResponse: """Deprecated: Execute the API call.""" return await legacy_async_call_api( session, method, url, headers, params, data, jsondata, returnjson )
"""Sqlite database wrapper.""" from hashlib import sha256 from pathlib import Path import typing as t from . import generator from .batch import group_by_file_extension from .initializer import DotSlipbox from .processor import process_batch class Slipbox: """Slipbox main functions.""" def __init__(self, dot: DotSlipbox): self.conn = dot.database() self.dot = dot self.config = dot.config @property def basedir(self) -> Path: """Return base directory regardless of current working directory.""" return self.dot.parent def close(self) -> None: """Close database connection.""" self.conn.close() def __enter__(self) -> "Slipbox": return self def __exit__(self, exc_type, exc_value, traceback) -> None: # type: ignore self.close() def find_new_notes(self, paths: t.Iterable[Path]) -> t.Iterable[Path]: """Yield files that are not yet in the database.""" sql = "SELECT filename FROM Files" in_db = set(r[0] for r in self.conn.execute(sql)) for path in paths: filename = str(path.relative_to(self.basedir)) if filename not in in_db: yield path def find_notes(self) -> t.Iterable[Path]: """Find notes in root.""" root = self.basedir patterns = self.dot.patterns for path in root.rglob('*'): if path.is_file() and any(path.match(pat) for pat in patterns): yield path def purge(self) -> t.Iterable[Path]: """Purge outdated/missing files and sections from the database. Also returns all notes found. """ digests = { p: sha256(p.read_bytes()).hexdigest() for p in self.find_notes() } outdated = [] cur = self.conn.cursor() cur.execute("PRAGMA foreign_keys=ON") for filename, _hash in cur.execute("SELECT filename, hash FROM Files"): path = self.basedir/filename if digests.get(path) != _hash: outdated.append(filename) cur.executemany("DELETE FROM Files WHERE filename IN (?)", ((filename,) for filename in outdated)) self.conn.commit() return digests.keys() def process(self, paths: t.Iterable[Path]) -> None: """Process input files.""" inputs = list(paths) for batch in group_by_file_extension(inputs): process_batch(self.conn, batch, self.config, self.basedir) def compile(self) -> None: """Compile processed HTML into final output.""" pandoc = Path(self.config.get("slipbox", "pandoc_path")) options = self.config.get("slipbox", "document_options") output_directory = self.basedir/self.config.get("slipbox", "output_directory") title = self.config.get("slipbox", "title") generator.main(self.conn, pandoc, options, output_directory, title) def run(self) -> None: """Run all steps needed to compile output.""" notes = self.find_new_notes(self.purge()) self.process(notes) self.compile()
import arpy from gzip import GzipFile from lzma import LZMAFile from tarfile import TarFile # see https://github.com/viraptor/arpy/pull/5 class Deb(): def __init__( self, filename ): super().__init__() self.filename = filename def _readControl( self ): ar = arpy.Archive( self.filename ) ar.read_all_headers() if b'control.tar.xz' in ar.archived_files: tar = LZMAFile( filename=ar.archived_files[ b'control.tar.xz' ] ) # NOTE: this requires https://github.com/viraptor/arpy/pull/5 elif b'control.tar.gz' in ar.archived_files: tar = GzipFile( fileobj=ar.archived_files[ b'control.tar.gz' ] ) else: raise ValueError( 'Unable to find control file' ) raw = TarFile( fileobj=tar ) control = raw.extractfile( './control' ).read() raw.close() tar.close() ar.close() return control def getControlFields( self ): order = [] results = {} results[ 'Description' ] = '' control = self._readControl() doDescription = False for line in control.splitlines(): line = line.decode() if doDescription: results[ 'Description' ] += '\n' results[ 'Description' ] += str( line ) else: pos = line.find( ':' ) key = line[ 0:pos ] value = line[ pos + 1: ] key = key.strip() order.append( key ) results[ key ] = value.strip() if key == 'Description': doDescription = True return ( order, results )
import mock import unittest from pastry.exceptions import HttpError def raise_error(): raise HttpError('message', 'statuscode') class PastryClientTestCase(unittest.TestCase): def test_init(self): self.assertRaises(HttpError, raise_error) def test_str(self): error = HttpError('message', 'statuscode') self.assertEqual(str(error), '(statuscode) message')
import dbutils import config import log import sys log = log.Log() FILEDS = ['username', 'age', 'tel', 'email'] FILENAME = "51reboot.ini" cfg = config.CONFIG() msg, ok = cfg.readconfig(FILENAME, 'db') if ok: db = dbutils.DB(msg['host'], msg['user'], msg['password'], int(msg['port'])) class User(object): def add(self, name, age, tel, email): res, ok = db.select("select username from users where username = '{}';".format(name)) if not ok: fields_string = ','.join(FILEDS) values_string = "'{}', {}, '{}', '{}'".format(name, age, tel, email) sql = '''INSERT INTO users({}) VALUES({});'''.format(fields_string, values_string) db.insert(sql) else: print("username: {} already exists.".format(name)) def delete(self, name): res, ok = db.select("select username from users where username = '{}';".format(name)) if ok: sql = "delete from users where username = '{}';".format(name) msg, ok = db.delete(sql) log.opLog().info(msg) else: print("username: {} not found.".format(name)) def update(self, name, field, value): res, ok = db.select("select username from users where username = '{}';".format(name)) if ok: sql = '''update users set {} = '{}' where username = '{}';'''.format(field, value, name) db.update(sql) else: print('username: {} not found.'.format(name)) def find(self, name): data, ok = db.select("select * from users where username = '{}';".format(name)) if ok: return data else: print('username: {} not found.'.format(name)) def list(self): data, ok = db.select("select * from users;") return data def display(self, page, pagesize=5): if page and not pagesize: pagesize = 5 start = (int(page) - 1) * pagesize end = start + pagesize sql = "select * from users;" res, ok = db.select(sql) if ok: result = [list(i) for i in res] return result[start:end] elif page and pagesize: start = (int(page) - 1) * int(pagesize) end = start + int(pagesize) sql = "select * from users;" res, ok = db.select(sql) if ok: result = [list(i) for i in res] return result[start:end]
"""Settings for live deployed environments: stating, qa, production, etc.""" from .base import * # noqa DEBUG = False SECRET_KEY = os.environ['SECRET_KEY'] ALLOWED_HOSTS = os.environ['ALLOWED_HOSTS'].split(';') STATIC_ROOT = os.path.join(BASE_DIR, 'public', 'static') MEDIA_ROOT = os.path.join(BASE_DIR, 'public', 'media') # Security settings SSL_ENABLED = not os.environ.get('SSL_DISABLED', False) SECURE_SSL_REDIRECT = SSL_ENABLED SECURE_HSTS_SECONDS = 60 * 60 * 24 * 365 if SSL_ENABLED else 0 SECURE_BROWSER_XSS_FILTER = True SECURE_CONTENT_TYPE_NOSNIFF = True SESSION_COOKIE_SECURE = SSL_ENABLED SESSION_COOKIE_HTTPONLY = True CSRF_COOKIE_SECURE = SSL_ENABLED CSRF_COOKIE_HTTPONLY = True X_FRAME_OPTIONS = 'DENY'
#!/usr/bin/env python3 ''' Computing exponentials using recursion ''' __project__ = "Calculate exponents using recursion" __author__ = "michael ketiku" def compute_exponent(a, n): '''Computes the exponent of 'a' given 'n' ''' if n <= 1: return a else: return (a * compute_exponent(a, n - 1)) res = compute_exponent(10, 2) print(res)
# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2017-06-25 17:50 from __future__ import unicode_literals from decimal import Decimal import django.core.validators from django.db import migrations, models import django.db.models.deletion import djangosige.apps.cadastro.models.empresa class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Banco', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('banco', models.CharField(blank=True, choices=[('001', '001 - BANCO DO BRASIL S.A.'), ('003', '003 - BANCO DA AMAZONIA S.A.'), ('004', '004 - BANCO DO NORDESTE DO BRASIL S.A.'), ('012', '012 - BANCO STANDARD DE INVESTIMENTOS S.A.'), ('014', '014 - NATIXIS BRASIL S.A. BANCO MÚLTIPLO'), ('019', '019 - BANCO AZTECA DO BRASIL S.A.'), ('021', '021 - BANESTES S.A. BANCO DO ESTADO DO ESPIRITO SANTO'), ('024', '024 - BANCO DE PERNAMBUCO S.A. - BANDEPE'), ('025', '025 - BANCO ALFA S.A.'), ('029', '029 - BANCO BANERJ S.A.'), ('031', '031 - BANCO BEG S.A.'), ('033', '033 - BANCO SANTANDER (BRASIL) S.A.'), ('036', '036 - BANCO BRADESCO BBI S.A.'), ('037', '037 - BANCO DO ESTADO DO PARÁ S.A.'), ('040', '040 - BANCO CARGILL S.A.'), ('041', '041 - BANCO DO ESTADO DO RIO GRANDE DO SUL S.A.'), ('044', '044 - BANCO BVA S.A.'), ('045', '045 - BANCO OPPORTUNITY S.A.'), ('047', '047 - BANCO DO ESTADO DE SERGIPE S.A.'), ('062', '062 - HIPERCARD BANCO MÚLTIPLO S.A.'), ('063', '063 - BANCO IBI S.A. - BANCO MÚLTIPLO'), ('065', '065 - BANCO LEMON S.A.'), ('066', '066 - BANCO MORGAN STANLEY S.A.'), ('069', '069 - BPN BRASIL BANCO MÚLTIPLO S.A.'), ('070', '070 - BRB - BANCO DE BRASILIA S.A.'), ('072', '072 - BANCO RURAL MAIS S.A.'), ('073', '073 - BB BANCO POPULAR DO BRASIL S.A.'), ('074', '074 - BANCO J. SAFRA S.A.'), ('075', '075 - BANCO CR2 S/A'), ('076', '076 - BANCO KDB DO BRASIL S.A.'), ('077', '077 - BANCO INTERMEDIUM S/A'), ('078', '078 - BES INVESTIMENTO DO BRASIL S.A. - BANCO DE INVESTIMENTO'), ('079', '079 - JBS BANCO S/A'), ('081', '081 - CONCÓRDIA BANCO S.A.'), ('082', '082 - BANCO TOPÁZIO S.A.'), ('083', '083 - BANCO DA CHINA BRASIL S.A'), ('096', '096 - BANCO BM&F DE SERVIÇOS DE LIQUIDAÇÃO E CUSTÓDIA S.A.'), ('104', '104 - CAIXA ECONOMICA FEDERAL'), ('107', '107 - BANCO BBM S/A'), ('151', '151 - BANCO NOSSA CAIXA S.A.'), ('184', '184 - BANCO ITAÚ BBA S.A.'), ('204', '204 - BANCO BRADESCO CARTÕES S.A.'), ('208', '208 - BANCO UBS PACTUAL S.A.'), ('212', '212 - BANCO MATONE S.A.'), ('213', '213 - BANCO ARBI S.A.'), ('214', '214 - BANCO DIBENS S.A.'), ('215', '215 - BANCO COMERCIAL E DE INVESTIMENTO SUDAMERIS S.A.'), ('217', '217 - BANCO JOHN DEERE S.A.'), ('218', '218 - BANCO BONSUCESSO S.A.'), ('222', '222 - BANCO CALYON BRASIL S.A.'), ('224', '224 - BANCO FIBRA S.A.'), ('225', '225 - BANCO BRASCAN S.A.'), ('229', '229 - BANCO CRUZEIRO DO SUL S.A.'), ('230', '230 - UNICARD BANCO MÚLTIPLO S.A.'), ('233', '233 - BANCO GE CAPITAL S.A.'), ('237', '237 - BANCO BRADESCO S.A.'), ('241', '241 - BANCO CLASSICO S.A.'), ('243', '243 - BANCO MÁXIMA S.A.'), ('246', '246 - BANCO ABC BRASIL S.A.'), ('248', '248 - BANCO BOAVISTA INTERATLANTICO S.A.'), ('249', '249 - BANCO INVESTCRED UNIBANCO S.A.'), ('250', '250 - BANCO SCHAHIN S.A.'), ('254', '254 - PARANÁ BANCO S.A.'), ('263', '263 - BANCO CACIQUE S.A.'), ('265', '265 - BANCO FATOR S.A.'), ( '266', '266 - BANCO CEDULA S.A.'), ('300', '300 - BANCO DE LA NACION ARGENTINA'), ('318', '318 - BANCO BMG S.A.'), ('320', '320 - BANCO INDUSTRIAL E COMERCIAL S.A.'), ('341', '341 - BANCO ITAÚ S.A.'), ('366', '366 - BANCO SOCIETE GENERALE BRASIL S.A.'), ('370', '370 - BANCO WESTLB DO BRASIL S.A.'), ('376', '376 - BANCO J.P. MORGAN S.A.'), ('389', '389 - BANCO MERCANTIL DO BRASIL S.A.'), ('394', '394 - BANCO FINASA BMC S.A.'), ('399', '399 - HSBC BANK BRASIL S.A. - BANCO MULTIPLO'), ('409', '409 - UNIBANCO-UNIAO DE BANCOS BRASILEIROS S.A.'), ('412', '412 - BANCO CAPITAL S.A.'), ('422', '422 - BANCO SAFRA S.A.'), ('453', '453 - BANCO RURAL S.A.'), ('456', '456 - BANCO DE TOKYO-MITSUBISHI UFJ BRASIL S/A'), ('464', '464 - BANCO SUMITOMO MITSUI BRASILEIRO S.A.'), ('473', '473 - BANCO CAIXA GERAL - BRASIL S.A.'), ('477', '477 - CITIBANK N.A.'), ('479', '479 - BANCO ITAUBANK S.A.'), ('487', '487 - DEUTSCHE BANK S.A. - BANCO ALEMAO'), ('488', '488 - JPMORGAN CHASE BANK, NATIONAL ASSOCIATION'), ('492', '492 - ING BANK N.V.'), ('494', '494 - BANCO DE LA REPUBLICA ORIENTAL DEL URUGUAY'), ('495', '495 - BANCO DE LA PROVINCIA DE BUENOS AIRES'), ('505', '505 - BANCO CREDIT SUISSE (BRASIL) S.A.'), ('600', '600 - BANCO LUSO BRASILEIRO S.A.'), ('604', '604 - BANCO INDUSTRIAL DO BRASIL S.A.'), ('610', '610 - BANCO VR S.A.'), ('611', '611 - BANCO PAULISTA S.A.'), ('612', '612 - BANCO GUANABARA S.A.'), ('613', '613 - BANCO PECUNIA S.A.'), ('623', '623 - BANCO PANAMERICANO S.A.'), ('626', '626 - BANCO FICSA S.A.'), ('630', '630 - BANCO INTERCAP S.A.'), ('633', '633 - BANCO RENDIMENTO S.A.'), ('634', '634 - BANCO TRIANGULO S.A.'), ('637', '637 - BANCO SOFISA S.A.'), ('638', '638 - BANCO PROSPER S.A.'), ('641', '641 - BANCO ALVORADA S.A.'), ('643', '643 - BANCO PINE S.A.'), ('652', '652 - ITAÚ UNIBANCO BANCO MÚLTIPLO S.A.'), ('653', '653 - BANCO INDUSVAL S.A.'), ('654', '654 - BANCO A.J. RENNER S.A.'), ('655', '655 - BANCO VOTORANTIM S.A.'), ('707', '707 - BANCO DAYCOVAL S.A.'), ('719', '719 - BANIF - BANCO INTERNACIONAL DO FUNCHAL (BRASIL), S.A.'), ('721', '721 - BANCO CREDIBEL S.A.'), ('734', '734 - BANCO GERDAU S.A'), ('735', '735 - BANCO POTTENCIAL S.A.'), ('738', '738 - BANCO MORADA S.A'), ('739', '739 - BANCO BGN S.A.'), ('740', '740 - BANCO BARCLAYS S.A.'), ('741', '741 - BANCO RIBEIRAO PRETO S.A.'), ('743', '743 - BANCO SEMEAR S.A.'), ('745', '745 - BANCO CITIBANK S.A.'), ('746', '746 - BANCO MODAL S.A.'), ('747', '747 - BANCO RABOBANK INTERNATIONAL BRASIL S.A.'), ('748', '748 - BANCO COOPERATIVO SICREDI S.A.'), ('749', '749 - BANCO SIMPLES S.A.'), ('751', '751 - DRESDNER BANK BRASIL S.A. BANCO MULTIPLO'), ('752', '752 - BANCO BNP PARIBAS BRASIL S.A.'), ('753', '753 - NBC BANK BRASIL S. A. - BANCO MÚLTIPLO'), ('756', '756 - BANCO COOPERATIVO DO BRASIL S.A. - BANCOOB'), ('757', '757 - BANCO KEB DO BRASIL S.A.')], max_length=3, null=True)), ('agencia', models.CharField(blank=True, max_length=8, null=True)), ('conta', models.CharField(blank=True, max_length=32, null=True)), ('digito', models.CharField(blank=True, max_length=8, null=True)), ], ), migrations.CreateModel( name='Categoria', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('categoria_desc', models.CharField(max_length=32)), ], ), migrations.CreateModel( name='Documento', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tipo', models.CharField(max_length=32)), ('documento', models.CharField(max_length=255)), ], ), migrations.CreateModel( name='Email', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('email', models.CharField(max_length=255)), ], ), migrations.CreateModel( name='Endereco', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tipo_endereco', models.CharField(blank=True, choices=[('UNI', 'Único'), ('RES', 'Residencial'), ( 'COM', 'Comercial'), ('COB', 'Cobrança'), ('ENT', 'Entrega'), ('OUT', 'Outro')], max_length=3, null=True)), ('logradouro', models.CharField( blank=True, max_length=255, null=True)), ('numero', models.CharField(blank=True, max_length=16, null=True)), ('bairro', models.CharField(blank=True, max_length=64, null=True)), ('complemento', models.CharField( blank=True, max_length=64, null=True)), ('pais', models.CharField(blank=True, default='Brasil', max_length=32, null=True)), ('cpais', models.CharField(blank=True, default='1058', max_length=5, null=True)), ('municipio', models.CharField(blank=True, max_length=64, null=True)), ('cmun', models.CharField(blank=True, max_length=9, null=True)), ('cep', models.CharField(blank=True, max_length=16, null=True)), ('uf', models.CharField(blank=True, choices=[('AC', 'AC'), ('AL', 'AL'), ('AP', 'AP'), ('AM', 'AM'), ('BA', 'BA'), ('CE', 'CE'), ('DF', 'DF'), ('ES', 'ES'), ('EX', 'EX'), ('GO', 'GO'), ('MA', 'MA'), ('MT', 'MT'), ('MS', 'MS'), ( 'MG', 'MG'), ('PA', 'PA'), ('PB', 'PB'), ('PR', 'PR'), ('PE', 'PE'), ('PI', 'PI'), ('RJ', 'RJ'), ('RN', 'RN'), ('RS', 'RS'), ('RO', 'RO'), ('RR', 'RR'), ('SC', 'SC'), ('SP', 'SP'), ('SE', 'SE'), ('TO', 'TO')], max_length=3, null=True)), ], ), migrations.CreateModel( name='Marca', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('marca_desc', models.CharField(max_length=32)), ], ), migrations.CreateModel( name='MinhaEmpresa', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ], ), migrations.CreateModel( name='Pessoa', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('nome_razao_social', models.CharField(max_length=255)), ('tipo_pessoa', models.CharField(choices=[ ('PF', 'Pessoa Física'), ('PJ', 'Pessoa Jurídica')], max_length=2)), ('inscricao_municipal', models.CharField( blank=True, max_length=32, null=True)), ('informacoes_adicionais', models.CharField( blank=True, max_length=1055, null=True)), ('data_criacao', models.DateTimeField(editable=False)), ('data_edicao', models.DateTimeField()), ], ), migrations.CreateModel( name='Produto', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('codigo', models.CharField(max_length=15)), ('codigo_barras', models.CharField( blank=True, max_length=16, null=True)), ('descricao', models.CharField(max_length=255)), ('custo', models.DecimalField(decimal_places=2, default=Decimal( '0.00'), max_digits=16, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])), ('venda', models.DecimalField(decimal_places=2, default=Decimal( '0.00'), max_digits=16, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])), ('inf_adicionais', models.CharField( blank=True, max_length=255, null=True)), ('ncm', models.CharField(blank=True, max_length=11, null=True)), ('origem', models.CharField(choices=[('0', '0 - Nacional'), ('1', '1 - Estrangeira - Importação direta.'), ('2', '2 - Estrangeira - Adquirida no mercado interno.'), ('3', '3 - Nacional - Mercadoria ou bem com Conteúdo de Importação superior a 40% e inferior ou igual a 70%.'), ('4', '4 - Nacional - Cuja produção tenha sido feita em conformidade com os processos produtivos básicos de que tratam o Decreto-Lei nº 288/67, e as Leis nºs 8.248/91, 8.387/91, 10.176/01 e 11.484/ 07'), ( '5', '5 - Nacional - Mercadoria ou bem com Conteúdo de Importação inferior ou igual a 40% (quarenta por cento)'), ('6', '6 - Estrangeira - Importação direta, sem similar nacional, constante em lista da Resolução CAMEX nº 79/2012 e gás natural'), ('7', '7 - Estrangeira - Adquirida no mercado interno, sem similar nacional, constante em lista da Resolução CAMEX nº 79/2012 e gás natural'), ('8', '8 - Nacional - Mercadoria ou bem com Conteúdo de Importação superior a 70% (setenta por cento).')], default='0', max_length=1)), ('cest', models.CharField(blank=True, max_length=7, null=True)), ('estoque_minimo', models.DecimalField(decimal_places=2, default=Decimal( '0.00'), max_digits=16, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])), ('estoque_atual', models.DecimalField(decimal_places=2, default=Decimal( '0.00'), max_digits=16, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])), ('controlar_estoque', models.BooleanField(default=True)), ('categoria', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cadastro.Categoria')), ], ), migrations.CreateModel( name='Site', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('site', models.CharField(max_length=255)), ], ), migrations.CreateModel( name='Telefone', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tipo_telefone', models.CharField(blank=True, choices=[ ('FIX', 'Fixo'), ('CEL', 'Celular'), ('FAX', 'Fax'), ('OUT', 'Outro')], max_length=8, null=True)), ('telefone', models.CharField(max_length=32)), ], ), migrations.CreateModel( name='Unidade', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sigla_unidade', models.CharField(max_length=3)), ('unidade_desc', models.CharField(max_length=16)), ], ), migrations.CreateModel( name='Veiculo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('descricao', models.CharField(max_length=255)), ('placa', models.CharField(blank=True, max_length=8, null=True)), ('uf', models.CharField(blank=True, choices=[('AC', 'AC'), ('AL', 'AL'), ('AP', 'AP'), ('AM', 'AM'), ('BA', 'BA'), ('CE', 'CE'), ('DF', 'DF'), ('ES', 'ES'), ('EX', 'EX'), ('GO', 'GO'), ('MA', 'MA'), ('MT', 'MT'), ('MS', 'MS'), ( 'MG', 'MG'), ('PA', 'PA'), ('PB', 'PB'), ('PR', 'PR'), ('PE', 'PE'), ('PI', 'PI'), ('RJ', 'RJ'), ('RN', 'RN'), ('RS', 'RS'), ('RO', 'RO'), ('RR', 'RR'), ('SC', 'SC'), ('SP', 'SP'), ('SE', 'SE'), ('TO', 'TO')], max_length=3, null=True)), ], ), migrations.CreateModel( name='Cliente', fields=[ ('pessoa_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='cadastro.Pessoa')), ('limite_de_credito', models.DecimalField( blank=True, decimal_places=2, default=Decimal('0.00'), max_digits=15, null=True)), ('indicador_ie', models.CharField(choices=[('1', 'Contribuinte ICMS'), ( '2', 'Contribuinte isento de Inscrição'), ('9', 'Não Contribuinte')], default='9', max_length=1)), ('id_estrangeiro', models.CharField( blank=True, max_length=20, null=True)), ], bases=('cadastro.pessoa',), ), migrations.CreateModel( name='Empresa', fields=[ ('pessoa_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='cadastro.Pessoa')), ('logo_file', models.ImageField(blank=True, default='imagens/logo.png', null=True, upload_to=djangosige.apps.cadastro.models.empresa.logo_directory_path)), ('cnae', models.CharField(blank=True, max_length=10, null=True)), ('iest', models.CharField(blank=True, max_length=32, null=True)), ], bases=('cadastro.pessoa',), ), migrations.CreateModel( name='Fornecedor', fields=[ ('pessoa_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='cadastro.Pessoa')), ('ramo', models.CharField(blank=True, max_length=64, null=True)), ], bases=('cadastro.pessoa',), ), migrations.CreateModel( name='PessoaFisica', fields=[ ('pessoa_id', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='pessoa_fis_info', serialize=False, to='cadastro.Pessoa')), ('cpf', models.CharField(blank=True, max_length=32, null=True)), ('rg', models.CharField(blank=True, max_length=32, null=True)), ('nascimento', models.DateField(blank=True, null=True)), ], ), migrations.CreateModel( name='PessoaJuridica', fields=[ ('pessoa_id', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='pessoa_jur_info', serialize=False, to='cadastro.Pessoa')), ('cnpj', models.CharField(blank=True, max_length=32, null=True)), ('nome_fantasia', models.CharField( blank=True, max_length=255, null=True)), ('inscricao_estadual', models.CharField( blank=True, max_length=32, null=True)), ('responsavel', models.CharField( blank=True, max_length=32, null=True)), ('sit_fiscal', models.CharField(blank=True, choices=[('LR', 'Lucro Real'), ('LP', 'Lucro Presumido'), ( 'SN', 'Simples Nacional'), ('SE', 'Simples Nacional, , excesso sublimite de receita bruta')], max_length=2, null=True)), ('suframa', models.CharField(blank=True, max_length=16, null=True)), ], ), migrations.CreateModel( name='Transportadora', fields=[ ('pessoa_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='cadastro.Pessoa')), ], bases=('cadastro.pessoa',), ), migrations.AddField( model_name='telefone', name='pessoa_tel', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='telefone', to='cadastro.Pessoa'), ), migrations.AddField( model_name='site', name='pessoa_site', field=models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name='site', to='cadastro.Pessoa'), ), ]
import json import re from threading import Thread from typing import List, Callable, Any, TypeVar, Generic, Union from .constants import requestMaxRetries, cacheFolder globalCache = {} T = TypeVar("T") TOptList = Union[T, List[T]] class Cache(Generic[T]): def __init__(self, filename: str, writeToDisk=True): self.filename = re.sub(r"[/\\:*?\"<>|]", "_", filename) self.writeToDisk = writeToDisk def get( self, requests: Union[Callable[[], T], List[Callable[[], T]]], onDone: Callable[[bool, TOptList], None], onError: Callable[[], Any] = None, beforeRequest: Callable[[], Any] = None, processResponse: Callable[[TOptList], TOptList] = None, reload: bool = False ) -> None: """ Gets the data from either the cache synchronously or loads it asynchronously by calling "requests". You have to check yourself in the calling thread when the requests have finished! Parameters ---------- requests A single function or list of functions executing the requests and returning thier responses (new thread) onDone Called when the data has been loaded either from cache (calling thread) or from requests (new thread). The first argument is True when the data was loaded from cache. The second argument is the result from the responses optionally passed through processResponse onError Called when there was an error thrown from one of the request functions (new thread) beforeRequest Called before a request is made in the calling thread processResponse Called with all responses and should return the data that should be cached (new thread) reload When False (default), tries to read from cache and then executes the requests if reading failed. When True ignores the cache. """ global globalCache wasNoList = False if not isinstance(requests, list): requests = [requests] wasNoList = True def load(): responses = [] for request in requests: res = None err = False for i in range(requestMaxRetries): err = False try: res = request() break except Exception as e: err = e if err and onError: onError(err) return responses.append(res) if len(responses) == 1 and wasNoList: responses = responses[0] if processResponse: responses = processResponse(responses) if self.writeToDisk: with open(cacheFolder + self.filename + ".json", "w") as f: json.dump(responses, f) else: globalCache[self.filename] = responses onDone(False, responses) if not reload: if self.writeToDisk: try: with open(cacheFolder + self.filename + ".json", "r") as f: onDone(True, json.load(f)) except FileNotFoundError: reload = True elif self.filename in globalCache: onDone(True, globalCache[self.filename]) else: reload = True if reload: if beforeRequest: beforeRequest() t = Thread(target=load) t.daemon = True t.start()
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Routines to get synthetic photometry from Dan Kasen's Kilonova model - Gautham Narayan (gnarayan@stsci.edu), 20180325 """ import sys import os import numpy as np import astropy import astropy.constants import astropy.coordinates as c import astropy.units as u import astropy.table as at import h5py import bisect import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages cdbs = os.getenv('PYSYN_CDBS') if cdbs is None: cdbs = '~/work/synphot/' cdbs = os.path.expanduser(cdbs) os.environ['PYSYN_CDBS'] = cdbs import pysynphot as S import webbpsf as W # define some constants SPEED_OF_LIGHT = astropy.constants.c.cgs.value TELESCOPE_AREA = 25.0 * 10000 # cm^2 -- Area of the telescope has to be in centimeters because pysynphot... S.setref(area=TELESCOPE_AREA) SEC_TO_DAY = u.second.to(u.day) CM_TO_ANGSTROM = u.centimeter.to(u.angstrom) ANGSTROM_TO_CM = u.angstrom.to(u.centimeter) FNU_TO_MJY = (u.erg/(u.centimeter**2)/u.second/u.Hertz).to(u.microjansky) ANGSTROM_TO_MICRON = u.angstrom.to(u.micron) MPC_TO_CM = u.megaparsec.to(u.centimeter) DISTANCE = [50, 120, 200] TMAX = 90 class Kilonova(object): def __init__(self): """ Read Dan's Kilonova spectral model and return the base arrays """ name = 'spectrum.h5' fin = h5py.File(name,'r') # frequency in Hz nu = np.array(fin['nu'],dtype='d') # array of time in seconds times = np.array(fin['time']) # covert time to days times *= SEC_TO_DAY # specific luminosity (ergs/s/Hz) # this is a 2D array, Lnu[times][nu] Lnu_all = np.array(fin['Lnu'],dtype='d') self._times = times self._nu = nu self._Lnu_all = Lnu_all def get_model(self, phase): """ Get the flam spectrum for some specific phase """ it = bisect.bisect(self._times, phase) it -= 1 # I think Dan's array indexing is off by 1 Lnu = self._Lnu_all[it,:] # if you want thing in Flambda (ergs/s/Angstrom) lam = SPEED_OF_LIGHT/self._nu*CM_TO_ANGSTROM Llam = Lnu*self._nu**2.0/SPEED_OF_LIGHT/CM_TO_ANGSTROM return lam, Llam def get_norm_model(self, phase, distance): """ Get the flam spectrum for some specific phase and distance """ dist = c.Distance(distance*u.megaparsec) z = dist.z lam, flam = self.get_model(phase) lamz = lam*(1.+z) fnorm = flam/(4*np.pi*(distance*MPC_TO_CM)**2.) return lamz, fnorm def main(): # save the figures figdir = 'Figures' # just listing the wide filters nircam_bandpasses = 'F070W,F090W,F115W,F150W,F200W,F277W,F356W,F444W' miri_bandpasses = 'F560W,F770W,F1000W,F1130W,F1280W,F1500W,F1800W,F2100W,F2550W' nircam_bandpasses = nircam_bandpasses.split(',') miri_bandpasses = miri_bandpasses.split(',') # configure the instruments nircam = W.NIRCam() miri = W.MIRI() # load the bandpasses bandpasses = {} for bp in nircam_bandpasses: nircam.filter = bp bpmodel = nircam._getSynphotBandpass(nircam.filter) bandpasses[bp] = bpmodel for bp in miri_bandpasses: miri.filter = bp bpmodel = miri._getSynphotBandpass(miri.filter) bandpasses[bp] = bpmodel # we just need a couple of bandpasses for testing use_bandpasses = nircam_bandpasses + miri_bandpasses[0:3] # init the kilonova model and create some arrays to store output kn = Kilonova() # do this for a few distances for j, dmpc in enumerate(DISTANCE): time = [] rfphase = [] flux = {} if j == 0: fig = plt.figure(figsize=(8,15)) ax = fig.add_subplot(1,1,1) for i, phase in enumerate(kn._times): # get the kilonova model and spectrum for this phase and distance lam, flam = kn.get_model(phase) lamz, fnorm = kn.get_norm_model(phase, dmpc) name = 'kilonova_{:+.2f}'.format(phase) spec = S.ArraySpectrum(wave=lamz, flux=fnorm, waveunits='angstrom', fluxunits='flam', name=name) # get synthetic mags in each passband for bp in use_bandpasses: passband = bandpasses[bp] obs = S.Observation(spec, passband, force='taper') try: mag = obs.effstim('abmag') except ValueError as e: mag = np.nan thispb = flux.get(bp) if thispb is None: thispb = [mag,] else: thispb.append(mag) flux[bp] = thispb # keep a track of rest-frame phase + observer frame days (should make much difference at these distances) dist = c.Distance(dmpc*u.megaparsec) z = dist.z rfphase.append(phase) time.append(phase*(1.+z)) # write output photometry tables if i % 5 == 0: # convert flam -> fnu -> mJy (YUCK) lam_micron = lamz *ANGSTROM_TO_MICRON f_nu = (((lamz * ANGSTROM_TO_CM)**2.) / SPEED_OF_LIGHT) * fnorm / ANGSTROM_TO_MICRON f_mjy = f_nu * FNU_TO_MJY table_name = 'Tables/kilonova_orig_{}Mpc_p{:+.2f}.txt'.format(dmpc, phase) this_spec = at.Table([lam_micron, f_mjy], names=['wave_micron','flux_mjy']) this_spec.sort('wave_micron') this_spec.write(table_name, format='ascii.fixed_width', delimiter=' ',overwrite='True') # plot output spectral sequence if j == 0: fplot = flam/flam.mean() ax.plot(lam*ANGSTROM_TO_MICRON, fplot+180 - i, 'k-') # finalize spectral sequence plot if j==0: ax.tick_params(axis='both', which='major', labelsize='large') ax.set_xlabel(r'Rest Wavelength ($\mu$m)', fontsize='xx-large') ax.set_ylabel(r'Relative F$_{\lambda}$ + constant', fontsize='xx-large') ax.set_xlim(0.5, 9.5) fig.tight_layout(rect=[0,0,1,0.96]) plt.savefig('{}/kilonova_spec.pdf'.format(figdir)) plt.close(fig) # dump output mag tables arrays = [rfphase, time,] + [flux[bp] for bp in use_bandpasses] names = ['rfphase', 'ofphase'] + [bp for bp in use_bandpasses] out = at.Table(arrays, names=names) out.write('Tables/kilonova_phottable_{}Mpc.txt'.format(dmpc), delimiter=' ', format='ascii.fixed_width', overwrite=True) # plot up the lightcurves npbs = len(use_bandpasses) color=iter(plt.cm.tab20(np.linspace(0,1,npbs))) with PdfPages('{}/kilonova_phot_{}Mpc.pdf'.format(figdir, dmpc)) as pdf: fig = plt.figure(figsize=(10,10)) for i, bp in enumerate(use_bandpasses): # save four passbands per page if i%4 == 0 and i > 0: fig.suptitle('Kilonova Synthetic Photometry {} Mpc'.format(dmpc), fontsize='xx-large') fig.tight_layout(rect=[0,0,1,0.93]) pdf.savefig(fig) plt.close(fig) fig = plt.figure(figsize=(10,10)) # plot up a passband ax = fig.add_subplot(2,2,i%4+1) thiscol = next(color) ax.plot(out['ofphase'], out[bp], marker='o', linestyle='-', lw=0.5, label=bp, color=thiscol) ax.tick_params(axis='both', which='major', labelsize='large') ax.set_ylabel('{} (AB mag)'.format(bp), fontsize='xx-large') ax.set_xlabel('Observer-frame Phase (Days)', fontsize='xx-large') ax.legend(loc='upper right', frameon=False) ymin, ymax = ax.get_ylim() ax.set_ylim((ymax, ymin)) # finalize lightcurve plot if i == npbs-1: fig.suptitle('Kilonova Synthetic Photometry {} Mpc'.format(dmpc), fontsize='xx-large') fig.tight_layout(rect=[0,0,1,0.93]) pdf.savefig(fig) plt.close(fig) if __name__=='__main__': sys.exit(main())
from collections import defaultdict import aoc_helper from aoc_helper import ( decode_text, extract_ints, frange, irange, iter, list, map, range, tail_call, ) def rotate(facing, up, point): new_point = list([0, 0, 0]) match facing: case "+x": new_point[0] = point[0] match up: case "+y": new_point[1] = point[1] new_point[2] = point[2] case "-y": new_point[1] = -point[1] new_point[2] = -point[2] case "+z": new_point[1] = point[2] new_point[2] = -point[1] case "-z": new_point[1] = -point[2] new_point[2] = point[1] case "-x": new_point[0] = -point[0] match up: case "+y": new_point[1] = point[1] new_point[2] = -point[2] case "-y": new_point[1] = -point[1] new_point[2] = point[2] case "+z": new_point[1] = point[2] new_point[2] = point[1] case "-z": new_point[1] = -point[2] new_point[2] = -point[1] case "+y": new_point[0] = point[1] match up: case "+x": new_point[1] = point[0] new_point[2] = -point[2] case "-x": new_point[1] = -point[0] new_point[2] = point[2] case "+z": new_point[1] = point[2] new_point[2] = point[0] case "-z": new_point[1] = -point[2] new_point[2] = -point[0] case "-y": new_point[0] = -point[1] match up: case "+x": new_point[1] = point[0] new_point[2] = point[2] case "-x": new_point[1] = -point[0] new_point[2] = -point[2] case "+z": new_point[1] = point[2] new_point[2] = -point[0] case "-z": new_point[1] = -point[2] new_point[2] = point[0] case "+z": new_point[0] = point[2] match up: case "+x": new_point[1] = point[0] new_point[2] = point[1] case "-x": new_point[1] = -point[0] new_point[2] = -point[1] case "+y": new_point[1] = point[1] new_point[2] = -point[0] case "-y": new_point[1] = -point[1] new_point[2] = point[0] case "-z": new_point[0] = -point[2] match up: case "+x": new_point[1] = point[0] new_point[2] = -point[1] case "-x": new_point[1] = -point[0] new_point[2] = point[1] case "+y": new_point[1] = point[1] new_point[2] = point[0] case "-y": new_point[1] = -point[1] new_point[2] = -point[0] return tuple(new_point) def translate(point, off): return point[0] + off[0], point[1] + off[1], point[2] + off[2] def untranslate(src, dest): return src[0] - dest[0], src[1] - dest[1], src[2] - dest[2] rotations = [ (sa + da, sb + db) for sa in "+-" for sb in "+-" for da in "xyz" for db in "xyz" if db != da ] def common_with_offset(reference, new, offset): return len(set(reference) & {translate(beacon, offset) for beacon in new}) def report_partial_match(reference, new): for reference_beacon in reference: for new_beacon in new: offset = untranslate(reference_beacon, new_beacon) if common_with_offset(reference, new, offset) >= 12: return offset def report_match(reference, new): for facing, up in rotations: transformed = {rotate(facing,up,point) for point in new} found = report_partial_match(reference, transformed) if found: return {translate(point, found) for point in transformed}, found def match_one(reports, fixed_reports, matched): for i, report in enumerate(reports): for other_report, _ in fixed_reports: found = report_match(other_report, report) if found: beacons, _ = found matched |= beacons fixed_reports.append(found) reports.pop(i) return def match(reports): matched, *to_match = reports fixed_reports = [(matched, (0,0,0))] matched = matched.copy() while to_match: print('finding match:',len(to_match),'left') match_one(to_match,fixed_reports,matched) return matched, fixed_reports def part1(path): data = [] with open(path) as input: reports = input.read().split("\n\n") data = list( set(tuple(extract_ints(line)) for line in report.splitlines()[1:]) for report in reports ) beacons, reports = match(data) return len(beacons)
# Faça um Programa que peça o raio de um círculo, calcule e mostre sua área. ray = float(input('Informe o raio do curculo: ')) area = 3.14 * (ray ** 2) print(f'A area do curculo é {area}')
from .coco import CocoDataset from .builder import DATASETS @DATASETS.register_module() class SKUCocoDataset(CocoDataset): CLASSES = ('item', )
from distutils.core import setup setup( name='GsFileLock', version='0.1.0', author='Evan Fosmark', author_email='me@evanfosmark.com', packages=['gsfilelock','gsfilelock.test'], url='https://github.com/JEdward7777/GsFileLock', license='LICENSE.txt', description='Google Storage File locking library', long_description=open('README.txt').read(), )
import luz import torch import unittest.mock as mock class IntegerDataset(luz.Dataset): def __init__(self, n): x = torch.arange(start=0, end=n, step=1, dtype=torch.float) y = x ** 2 data = [luz.Data(x=_x, y=_y) for _x, _y in zip(x, y)] super().__init__(data) class DummyModel(luz.Model): def forward(self, x): pass class DummyLearner(luz.Learner): def model(self): pass def criterion(self): pass def optimizer(self, model): pass def fit_params(self): pass def test_cross_validation(): cv = luz.CrossValidation(num_folds=3, shuffle=False) dataset = IntegerDataset(n=15) m = DummyModel() m.fit = mock.MagicMock(return_value=0.0) m.test = mock.MagicMock(return_value=0.0) learner = DummyLearner() learner.learn = mock.MagicMock(return_value=m) cv.score(learner, dataset, "cpu") mock_calls = learner.learn.mock_calls fold_inds = [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]] assert len(mock_calls) == 4 for i, (name, args, kwargs) in enumerate(mock_calls[:-1]): train_folds = kwargs["train_dataset"].datasets train_fold_inds = (x for j, x in enumerate(fold_inds) if j != i) assert len(train_folds) == 2 assert train_folds[0].indices == next(train_fold_inds) assert train_folds[1].indices == next(train_fold_inds) _, _, kwargs = learner.learn.mock_calls[-1] assert len(kwargs["train_dataset"]) == 15 cv = luz.CrossValidation(num_folds=3, shuffle=False) dataset = IntegerDataset(n=17) m = DummyModel() m.fit = mock.MagicMock(return_value=0.0) m.test = mock.MagicMock(return_value=0.0) learner = DummyLearner() learner.learn = mock.MagicMock(return_value=m) cv.score(learner, dataset, "cpu") mock_calls = learner.learn.mock_calls fold_inds = [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16]] assert len(mock_calls) == 4 for i, (name, args, kwargs) in enumerate(mock_calls[:-1]): train_folds = kwargs["train_dataset"].datasets train_fold_inds = (x for j, x in enumerate(fold_inds) if j != i) assert len(train_folds) == 2 assert train_folds[0].indices == next(train_fold_inds) assert train_folds[1].indices == next(train_fold_inds) _, _, kwargs = learner.learn.mock_calls[-1] assert len(kwargs["train_dataset"]) == 17 cv = luz.CrossValidation(num_folds=6, shuffle=False) dataset = IntegerDataset(n=17) m = DummyModel() m.fit = mock.MagicMock(return_value=0.0) m.test = mock.MagicMock(return_value=0.0) learner = DummyLearner() learner.learn = mock.MagicMock(return_value=m) cv.score(learner, dataset, "cpu") mock_calls = learner.learn.mock_calls fold_inds = [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11], [12, 13, 14], [15, 16]] assert len(mock_calls) == 7 for i, (name, args, kwargs) in enumerate(mock_calls[:-1]): train_folds = kwargs["train_dataset"].datasets train_fold_inds = (x for j, x in enumerate(fold_inds) if j != i) assert len(train_folds) == 5 assert train_folds[0].indices == next(train_fold_inds) assert train_folds[1].indices == next(train_fold_inds) assert train_folds[2].indices == next(train_fold_inds) assert train_folds[3].indices == next(train_fold_inds) assert train_folds[4].indices == next(train_fold_inds) _, _, kwargs = learner.learn.mock_calls[-1] assert len(kwargs["train_dataset"]) == 17
from nhps.distance.consensus.consensus_decoder import ConsensusDecoder
# -*- coding: utf-8 -*- """ wirexfers.utils ~~~~~~~~~~~~~~~ This module provides utility functions that are used within WireXfers, but might be also useful externally. :copyright: (c) 2012-2014 Priit Laes :license: ISC, see LICENSE for more details. """ from Crypto.PublicKey import RSA def load_key(path, password=None): """Import an RSA key (private or public half). :param string path: path to key half. :param password: password for private key. :type password: string or None :rtype: :py:class:`Crypto.PublicKey.RSA._RSAobj` """ with open(path, 'r') as f: key = RSA.importKey(f.read(), password) if not key: raise RuntimeError('Invalid key file: "%s"\n' % path) return key def ref_731(n): """Reference number calculator. Returns reference number calculated using 7-3-1 algorithm used in Estonian banks. :param string n: base number (client id, etc) :rtype: string """ return "%s%d" % (n,((10 - (sum(map(\ lambda l: int(n[-l])*(7,3,1)[(l-1) % 3], \ xrange(1, len(n)+1))))) % 10))
# author Serj Sintsov <ssivikt@gmail.com>, 2009, Public Domain import sys import time import math """Output example: [######....] 75% """ def progress(start=0, bar_width=30): if start > 100: start = 100 if start < 0: start = 0 for percent in range(start, 101): marks = math.floor(bar_width * (percent / 100.0)) spaces = math.floor(bar_width - marks) marks = int(marks) spaces = int(spaces) loader = '[' + ('#' * marks) + ('.' * spaces) + ']' yield " %s %d%%\r" % (loader, percent) if percent == 100: yield "\n" # usage bars = progress() for i in bars: sys.stdout.write(i) sys.stdout.flush() time.sleep(0.05)
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn_anchor.py', '../_base_/datasets/coco_instance_multi_scale2.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] work_dir = '/data2/nsathish/results/work_dirs/mrcnn_r50_multi_scale_anchor_modified/'
from dataclasses import dataclass, asdict, field, fields from uncertainties import ufloat from Helper.numbers import print_unc SI_unit_dict = { -2: 'c', -3: 'm', -6: '\u03BC', -9: 'n', 0: ' ', 3: 'k', 6: 'M', 9: 'G', 12: 'T' } inv_SI_unit_dict = {k: i for i, k in SI_unit_dict.items()} inv_SI_unit_dict.update({'u':-6}) @dataclass(eq=False) class Unit: scale: str = field() unit: str = field() def __post_init__(self): self.e = inv_SI_unit_dict[self.scale] def __repr__(self): return self.scale + self.unit def __eq__(self, other): if self.unit == other.unit: return True def __mul__(self, other): newscale = SI_unit_dict[self.e + other.e] newunit = self.unit + other.unit return Unit(newscale, '*'.join(newunit)) @dataclass(eq=False) class Measurement: value: float=field() unc: float=field() unit: Unit=field() def __post_init__(self): self.ufloat=self.__ufloat__() self.interval=self.__interval__() def __ufloat__(self): return ufloat( self.value, self.unc) * \ 10**inv_SI_unit_dict[self.unit.scale] def __interval__(self): return [self.ufloat.n-self.ufloat.s, self.ufloat.n+self.ufloat.s] def __str__(self): val, unc, digs=print_unc(self.value, self.unc, False) return f'{val} +/- {unc} {self.unit}' def __eq__(self, other): if self.unit != other.unit: return False if self.interval[0] < other.interval[1] < self.interval[1]: return True if self.interval[0] < other.interval[0] < self.interval[1]: return True return False def __mul__(self, other): return Measurement(self.value * other.value, (self.unc**2 + other.unc**2)**.5, self.unit*other.unit)
""" Выполнить представление через множества и ленточное представления бинарного дерева, представленного на рис. 1 """ from tree_module import BinaryTree def main(): # Элемент на 1 уровне r = BinaryTree("8") # Элементы на 2 уровне r_1 = r.insert_left("4") r_2 = r.insert_right("12") # Элементы на 3 уровне r_11 = r_1.insert_left("2") r_12 = r_1.insert_right("6") r_21 = r_2.insert_left("10") r_22 = r_2.insert_right("14") # Добавление элементов на 4 уровень r_11.insert_left("1") r_11.insert_right("3") r_12.insert_left("5") r_12.insert_right("7") r_21.insert_left("9") r_21.insert_right("11") r_22.insert_left("13") r_22.insert_right("15") print("Реализованное дерево:") print(r) if __name__ == "__main__": main()
# AUTONA - UI automation server (Python 3) # Copyright (C) 2021 Marco Alvarado # Visit http://qaware.org class Automator(): def MovePointer( self, x, y, # pixels smooth = True): return def ClickButton( self, button, toggle = False, down = True): return def TapKeys( self, keys, # : array toggle = False, down = True): return def CaptureScreen( self): return None def ScreenSize( self): return (0, 0)
# -*- coding: utf-8 -*- from __future__ import unicode_literals, division, absolute_import from builtins import * # noqa pylint: disable=unused-import, redefined-builtin import pytest from flexget.api.app import base_message from flexget.api.plugins.trakt_lookup import ObjectsContainer as oc from flexget.utils import json @pytest.mark.online class TestTraktSeriesLookupAPI(object): config = 'tasks: {}' def test_trakt_series_lookup_no_params(self, api_client, schema_match): # Bad API call rsp = api_client.get('/trakt/series/') assert rsp.status_code == 404, 'Response code is %s' % rsp.status_code rsp = api_client.get('/trakt/series/the x-files/') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(oc.series_return_object, data) assert not errors values = { 'id': 4063, 'imdb_id': 'tt0106179', 'language': 'en', 'title': 'The X-Files', 'tmdb_id': 4087, 'tvdb_id': 77398, 'tvrage_id': 6312, 'year': 1993 } for field, value in values.items(): assert data.get(field) == value def test_trakt_series_lookup_with_year_param(self, api_client, schema_match): values = { 'id': 235, 'imdb_id': 'tt0098798', 'language': 'en', 'title': 'The Flash', 'tmdb_id': 236, 'tvdb_id': 78650, 'tvrage_id': 5781, 'year': 1990 } rsp = api_client.get('/trakt/series/the flash/?year=1990') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(oc.series_return_object, data) assert not errors for field, value in values.items(): assert data.get(field) == value def test_trakt_series_lookup_with_trakt_slug_id_param(self, api_client, schema_match): values = { 'id': 75481, 'title': 'The Flash', 'tvdb_id': 272094, 'year': 1967 } rsp = api_client.get('/trakt/series/the flash/?trakt_slug=the-flash-1967') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(oc.series_return_object, data) assert not errors for field, value in values.items(): assert data.get(field) == value def test_trakt_series_lookup_with_tmdb_id_param(self, api_client, schema_match): values = { 'id': 60300, 'imdb_id': 'tt3107288', 'title': 'The Flash', 'tmdb_id': 60735, 'tvdb_id': 279121, 'tvrage_id': 36939, 'year': 2014 } rsp = api_client.get('/trakt/series/the flash/?tmdb_id=60735') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(oc.series_return_object, data) assert not errors for field, value in values.items(): assert data.get(field) == value def test_trakt_series_lookup_with_imdb_id_param(self, api_client): values = { 'id': 60300, 'imdb_id': 'tt3107288', 'title': 'The Flash', 'tmdb_id': 60735, 'tvdb_id': 279121, 'tvrage_id': 36939, 'year': 2014 } rsp = api_client.get('/trakt/series/the flash/?imdb_id=tt3107288') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) for field, value in values.items(): assert data.get(field) == value def test_trakt_series_lookup_with_tvdb_id_param(self, api_client, schema_match): values = { 'id': 60300, 'imdb_id': 'tt3107288', 'title': 'The Flash', 'tmdb_id': 60735, 'tvdb_id': 279121, 'tvrage_id': 36939, 'year': 2014 } rsp = api_client.get('/trakt/series/the flash/?tvdb_id=279121') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(oc.series_return_object, data) assert not errors for field, value in values.items(): assert data.get(field) == value def test_trakt_series_lookup_with_tvrage_id_param(self, api_client, schema_match): values = { 'id': 60300, 'imdb_id': 'tt3107288', 'title': 'The Flash', 'tmdb_id': 60735, 'tvdb_id': 279121, 'tvrage_id': 36939, 'year': 2014 } rsp = api_client.get('/trakt/series/the flash/?tvrage_id=36939') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(oc.series_return_object, data) assert not errors for field, value in values.items(): assert data.get(field) == value def test_trakt_series_lookup_with_trakt_id_param(self, api_client, schema_match): values = { 'id': 75481, 'title': 'The Flash', 'year': 1967 } rsp = api_client.get('/trakt/series/the flash/?trakt_id=75481') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(oc.series_return_object, data) assert not errors for field, value in values.items(): assert data.get(field) == value def test_trakt_series_lookup_with_actors_param(self, api_client, schema_match): rsp = api_client.get('/trakt/series/the x-files/?include_actors=true') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(oc.series_return_object, data) assert not errors assert 'actors' in data assert len(data['actors']) > 0 def test_trakt_series_lookup_with_translations_param(self, api_client, schema_match): rsp = api_client.get('/trakt/series/game of thrones/?include_translations=true') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(oc.series_return_object, data) assert not errors assert 'translations' in data def test_trakt_series_lookup_error(self, api_client, schema_match): rsp = api_client.get('/trakt/series/sdfgsdfgsdfgsdfgsdfg/') assert rsp.status_code == 404, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(base_message, data) assert not errors @pytest.mark.online class TestTraktMovieLookupAPI(object): config = 'tasks: {}' def test_trakt_movies_lookup_no_params(self, api_client, schema_match): # Bad API call rsp = api_client.get('/trakt/movies/') assert rsp.status_code == 404, 'Response code is %s' % rsp.status_code rsp = api_client.get('/trakt/movies/the matrix/') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(oc.movie_return_object, data) assert not errors values = { 'id': 481, 'title': 'The Matrix', 'year': 1999, 'tmdb_id': 603, 'imdb_id': 'tt0133093' } for field, value in values.items(): assert data.get(field) == value def test_trakt_movies_lookup_year_param(self, api_client, schema_match): rsp = api_client.get('/trakt/movies/the matrix/?year=2003') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(oc.movie_return_object, data) assert not errors values = { 'id': 483, 'title': 'The Matrix Revolutions', 'year': 2003, 'tmdb_id': 605, 'imdb_id': 'tt0242653' } for field, value in values.items(): assert data.get(field) == value def test_trakt_movies_lookup_slug_param(self, api_client, schema_match): rsp = api_client.get('/trakt/movies/the matrix/?trakt_slug=the-matrix-reloaded-2003') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(oc.movie_return_object, data) assert not errors values = { 'id': 482, 'title': 'The Matrix Reloaded', 'year': 2003, 'tmdb_id': 604, 'imdb_id': 'tt0234215' } for field, value in values.items(): assert data.get(field) == value def test_trakt_movies_lookup_actors_params(self, api_client, schema_match): rsp = api_client.get('/trakt/movies/the matrix/?include_actors=true') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(oc.movie_return_object, data) assert not errors values = { 'id': 481, 'title': 'The Matrix', 'year': 1999, 'tmdb_id': 603, 'imdb_id': 'tt0133093' } for field, value in values.items(): assert data.get(field) == value assert 'actors' in data assert len(data['actors']) > 0 def test_trakt_movies_lookup_translations_params(self, api_client, schema_match): rsp = api_client.get('/trakt/movies/the matrix/?include_translations=true') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(oc.movie_return_object, data) assert not errors values = { 'id': 481, 'title': 'The Matrix', 'year': 1999, 'tmdb_id': 603, 'imdb_id': 'tt0133093' } for field, value in values.items(): assert data.get(field) == value assert 'translations' in data assert len(data['translations']) > 0 def test_trakt_movies_lookup_error(self, api_client, schema_match): rsp = api_client.get('/trakt/movies/sdfgsdfgsdfgsdfgsdfg/') assert rsp.status_code == 404, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(base_message, data) assert not errors
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy of this # software and associated documentation files (the "Software"), to deal in the Software # without restriction, including without limitation the rights to use, copy, modify, # merge, publish, distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, # INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A # PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from __future__ import print_function import json import boto3 import os from botocore.exceptions import ClientError # lambda function that receives the Budget Notification and subsequently # triggers the action lambda (via step functions) # Input parameters: # Notification from Budget # Environment Variable: StateMachineArn # Environment Variable: GroupName # Environment Variable: DetachPolicyArn # Environment Variable: AttachPolicyArn # This function starts the execution of the # configured Step Function State Machine. def lambda_handler(event, context): try: #budgetNotification = event['Records'][0]['Sns']['Message'] #print("Notification from Budget (via SNS): " + budgetNotification) sfnClient = boto3.client('stepfunctions') inputs = {} inputs['AttachPolicyArn']=os.environ['AttachPolicyArn'] inputs['DetachPolicyArn']=os.environ['DetachPolicyArn'] inputs['GroupName'] = os.environ['GroupName'] print (json.dumps(inputs)) stateMachineArn = os.environ['StateMachineArn'] response = sfnClient.start_execution( stateMachineArn=stateMachineArn, input=json.dumps(inputs) ) print("Step Function Message ID: " + response['executionArn']) return 200 except ClientError as e: print(e.response['Error']['Code']) if e.response['Error']['Code'] == 'NotFound': print("Incorrect Topic Arn. Could not find this topic") return e.response['ResponseMetadata']['HTTPStatusCode'] else: print("Unexpected error: %s" % e) return 500
import sys n,m=map(int,input().split()) b=list(map(int,input().split())) a=s=j=0 for i in b: if a+i<m:a+=i elif a+i==m: a+=i s=a break else: temp=a while temp+i>m: a-=b[j] temp=a if a+i<m:a+=i elif temp+i==m: a+=i s=a break j+=1 s=max(s,a) print(s)
from django.contrib import admin from django.contrib import admin from django.contrib.auth import admin as auth_admin from django.contrib.auth.models import Permission from django.contrib import admin from .forms import UserCreationForm, UserChangeForm from .models import User # Register your models here. @admin.register(User) class UserAdmin(auth_admin.UserAdmin): form = UserChangeForm add_form = UserCreationForm model = User fieldsets = ( ('Personal info', {'fields': ('username', 'password', 'email')}), ('Permissions', {'fields': ('is_active', 'is_staff', 'is_superuser', 'groups', 'user_permissions')}), ) admin.site.register(Permission)
import os import sqlite3 import csv from flask import Flask, flash, redirect, render_template, request, session, url_for from application import app, db, bcrypt from application.models import Users from application.forms import RegistrationForm, LoginForm, UpdateAccountForm, RequestResetForm, ResetPasswordForm, LicensePlate from application.handlers import error_403, error_404, error_500 from application.utils import send_reset_email, api_call, all_countries, get_the_news from flask_login import login_user, current_user, logout_user, login_required # COMMENT-OUT LIST, NOT RELEVANT SINCE LAST POLICY CHANGE BY THE GOVERNMENT # cantons = [] # with open('application/static/cantons.csv', newline='') as csv_file: # csv_reader = csv.reader(csv_file) # cantons = list(csv_reader) @app.route("/", methods=["GET", "POST"]) @app.route("/home", methods=["GET", "POST"]) def home(): # Use functions containing data from API call for select dropdown on GET method and user query on POST method if request.method == "GET": countries = all_countries() cr_covid = api_call('CRI') return render_template('index.html', title='Home', cr_covid=cr_covid, countries=countries) cr_covid = api_call('cri') country = request.form.get('choose_country') country_covid = api_call(country) articles = get_the_news() return render_template('index.html', title='Home', cr_covid=cr_covid, country_covid=country_covid, country=country, articles=articles) @app.route("/register", methods=["GET", "POST"]) def register(): if current_user.is_authenticated: return redirect(url_for('home')) form = RegistrationForm() if form.validate_on_submit(): hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8') user = Users(username=form.username.data, email=form.email.data, password=hashed_password) db.session.add(user) db.session.commit() flash(f'Account {form.username.data} created! Now you can log in', 'primary') return redirect(url_for('login')) return render_template('register.html', title='Register', form=form) @app.route("/login", methods=["GET", "POST"]) def login(): if current_user.is_authenticated: return redirect(url_for('home')) form = LoginForm() if form.validate_on_submit(): user = Users.query.filter_by(email=form.email.data).first() if user and bcrypt.check_password_hash(user.password, form.password.data): login_user(user, remember=form.remember.data) next_page = request.args.get('next') return redirect(next_page) if next_page else redirect(url_for('home')) else: flash('Invalid email and/or password', 'danger') return render_template('login.html', title='Login', form=form) @app.route("/logout") def logout(): logout_user() return redirect(url_for('home')) @app.route("/account", methods=["GET", "POST"]) @login_required def account(): form = UpdateAccountForm() if form.validate_on_submit(): current_user.username = form.username.data current_user.email = form.email.data db.session.commit() flash('Account updated successfully!', 'primary') return redirect(url_for('account')) elif request.method == 'GET': form.username.data = current_user.username form.email.data = current_user.email return render_template('account.html', title='Account Changes', form=form) @app.route("/reset_password", methods=["GET", "POST"]) def reset_request(): if current_user.is_authenticated: return redirect(url_for('home')) form = RequestResetForm() if form.validate_on_submit(): user = Users.query.filter_by(email=form.email.data).first() send_reset_email(user) flash('An email with instructions has been sent to help you reset your password', 'primary') return redirect(url_for('login')) return render_template('reset_request.html', title='Reset Password', form=form) @app.route("/reset_password/<token>", methods=["GET", "POST"]) def reset_token(token): if current_user.is_authenticated: return redirect(url_for('home')) user = Users.verify_reset_token(token) if user is None: flash('Your token is invalid or expired', 'danger') return redirect(url_for('reset_request')) form = ResetPasswordForm() if form.validate_on_submit(): hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8') user.password = hashed_password db.session.commit() flash('Your password has been updated! Now you can log in', 'primary') return redirect(url_for('login')) return render_template('reset_token.html', title='Reset Password', form=form) @app.route("/transportation", methods=["GET", "POST"]) @login_required def transportation(): form = LicensePlate() even = [0, 2, 4, 6, 8] odd = [1, 3, 5, 7, 9] if form.validate_on_submit(): plate = int(form.digits.data) return render_template('transportation.html', title='Transportation', plate=plate, form=form, even=even, odd=odd) return render_template('transportation.html', title='Transportation', form=form) @app.route("/news") @login_required def news(): news = get_the_news() return render_template('news.html', title='News', news=news) @app.route("/stats", methods=["GET", "POST"]) @login_required def stats(): # Use functions containing data from API call for select dropdown on GET method and user query on POST method if request.method == "GET": countries = all_countries() return render_template('stats.html', title='Stats', countries=countries) countries = all_countries() cr_covid = api_call('cri') country = request.form.get('choose_country') country_covid = api_call(country) return render_template('stats.html', title='Stats', cr_covid=cr_covid, country_covid=country_covid, countries=countries, country=country)
# -*- coding: utf-8 -*- u"""Cliches are cliché.""" from proselint.tools import memoize, existence_check @memoize def check_cliches_garner(text): """Check the text. source: Garner's Modern American Usage source_url: http://bit.ly/1T4alrY """ err = "cliches.garner" msg = u"'{}' is cliché." cliches = [ "a fate worse than death", "alas and alack", "at the end of the day", "bald-faced lie", "between a rock and a hard place", "between Scylla and Charybdis", "between the devil and the deep blue see", "betwixt and between", "blissful ignorance", "blow a fuse", "bulk large", "but that's another story", "cast aspersions", "chase a red herring", "comparing apples and oranges", "compleat", "conspicuous by its absence", "crystal clear", "cutting edge", "decision-making process", "dubious distinction", "duly authorized", "eyes peeled", "far be it from me", "fast and loose", "fills the bill", "first and foremost", "for free", "get with the program", "gilding the lily", "have a short fuse", "he's got his hands full", "his own worst enemy", "his work cut out for him", "hither and yon", "Hobson's choice", "horns of a dilemma", "if you catch my drift", "in light of", "in the final analysis", "in the last analysis", "innocent bystander", "it's not what you know, it's who you know", "last but not least", "make a mockery of", "male chauvinism", "moment of truth", "more in sorrow than in anger", "more sinned against than sinning", "my better half", "nip in the bud", "olden days", "on the same page", "presidential timber", "pulled no punches", "quantum jump", "quantum leap", "redound to one's credit", "redound to the benefit of", "sea change", "shirked his duties", "six of one, half a dozen of the other", "stretched to the breaking point", "than you can shake a stick at", "the cream of the crop", "the cream rises to the top", "the straw that broke the camel's back", "thick as thieves", "thinking outside the box", "thought leaders?", "throw the baby out with the bathwater", "various and sundry", "viable alternative", "wax eloquent", "wax poetic", "we've got a situation here", "whet (?:the|your) appetite", "wool pulled over our eyes", "writ large", ] return existence_check(text, cliches, err, msg, join=True) @memoize def check_cliches_write_good(text): """Check the text. source: write-good source_url: https://github.com/btford/write-good """ err = "cliches.write_good" msg = u"'{}' is a cliché." cliches = [ "a chip off the old block", "a clean slate", "a dark and stormy night", "a far cry", "a fine kettle of fish", "a loose cannon", "a penny saved is a penny earned", "a tough row to hoe", "a word to the wise", "ace in the hole", "acid test", "add insult to injury", "against all odds", "air your dirty laundry", "all fun and games", "all in a day's work", "all talk, no action", "all thumbs", "all your eggs in one basket", "all's fair in love and war", "all's well that ends well", "almighty dollar", "American as apple pie", "an axe to grind", "another day, another dollar", "armed to the teeth", "as luck would have it", "as old as time", "as the crow flies", "at loose ends", "at my wits end", "avoid like the plague", "babe in the woods", "back against the wall", "back in the saddle", "back to square one", "back to the drawing board", "bad to the bone", "badge of honor", "bald faced liar", "ballpark figure", "banging your head against a brick wall", "baptism by fire", "barking up the wrong tree", "bat out of hell", "be all and end all", "beat a dead horse", "beat around the bush", "been there, done that", "beggars can't be choosers", "behind the eight ball", "bend over backwards", "benefit of the doubt", "bent out of shape", "best thing since sliced bread", "bet your bottom dollar", "better half", "better late than never", "better mousetrap", "better safe than sorry", "between a rock and a hard place", "beyond the pale", "bide your time", "big as life", "big cheese", "big fish in a small pond", "big man on campus", "bigger they are the harder they fall", "bird in the hand", "bird's eye view", "birds and the bees", "birds of a feather flock together", "bit the hand that feeds you", "bite the bullet", "bite the dust", "bitten off more than he can chew", "black as coal", "black as pitch", "black as the ace of spades", "blast from the past", "bleeding heart", "blessing in disguise", "blind ambition", "blind as a bat", "blind leading the blind", "blood is thicker than water", "blood sweat and tears", "blow off steam", "blow your own horn", "blushing bride", "boils down to", "bolt from the blue", "bone to pick", "bored stiff", "bored to tears", "bottomless pit", "boys will be boys", "bright and early", "brings home the bacon", "broad across the beam", "broken record", "brought back to reality", "bull by the horns", "bull in a china shop", "burn the midnight oil", "burning question", "burning the candle at both ends", "burst your bubble", "bury the hatchet", "busy as a bee", "by hook or by crook", "call a spade a spade", "called onto the carpet", "calm before the storm", "can of worms", "can't cut the mustard", "can't hold a candle to", "case of mistaken identity", "cat got your tongue", "cat's meow", "caught in the crossfire", "caught red-handed", "checkered past", "chomping at the bit", "cleanliness is next to godliness", "clear as a bell", "clear as mud", "close to the vest", "cock and bull story", "cold shoulder", "come hell or high water", "cool as a cucumber", "cool, calm, and collected", "cost a king's ransom", "count your blessings", "crack of dawn", "crash course", "creature comforts", "cross that bridge when you come to it", "crushing blow", "cry like a baby", "cry me a river", "cry over spilt milk", "crystal clear", "curiosity killed the cat", "cut and dried", "cut through the red tape", "cut to the chase", "cute as a bugs ear", "cute as a button", "cute as a puppy", "cuts to the quick", "dark before the dawn", "day in, day out", "dead as a doornail", "devil is in the details", "dime a dozen", "divide and conquer", "dog and pony show", "dog days", "dog eat dog", "dog tired", "don't burn your bridges", "don't count your chickens", "don't look a gift horse in the mouth", "don't rock the boat", "don't step on anyone's toes", "don't take any wooden nickels", "down and out", "down at the heels", "down in the dumps", "down the hatch", "down to earth", "draw the line", "dressed to kill", "dressed to the nines", "drives me up the wall", "dull as dishwater", "dyed in the wool", "eagle eye", "ear to the ground", "early bird catches the worm", "easier said than done", "easy as pie", "eat your heart out", "eat your words", "eleventh hour", "even the playing field", "every dog has its day", "every fiber of my being", "everything but the kitchen sink", "eye for an eye", "face the music", "facts of life", "fair weather friend", "fall by the wayside", "fan the flames", "feast or famine", "feather your nest", "feathered friends", "few and far between", "fifteen minutes of fame", "filthy vermin", "fine kettle of fish", "fish out of water", "fishing for a compliment", "fit as a fiddle", "fit the bill", "fit to be tied", "flash in the pan", "flat as a pancake", "flip your lid", "flog a dead horse", "fly by night", "fly the coop", "follow your heart", "for all intents and purposes", "for the birds", "for what it's worth", "force of nature", "force to be reckoned with", "forgive and forget", "fox in the henhouse", "free and easy", "free as a bird", "fresh as a daisy", "full steam ahead", "fun in the sun", "garbage in, garbage out", "gentle as a lamb", "get a kick out of", "get a leg up", "get down and dirty", "get the lead out", "get to the bottom of", "get your feet wet", "gets my goat", "gilding the lily", "give and take", "go against the grain", "go at it tooth and nail", "go for broke", "go him one better", "go the extra mile", "go with the flow", "goes without saying", "good as gold", "good deed for the day", "good things come to those who wait", "good time was had by all", "good times were had by all", "greased lightning", "greek to me", "green thumb", "green-eyed monster", "grist for the mill", "growing like a weed", "hair of the dog", "hand to mouth", "happy as a clam", "happy as a lark", "hasn't a clue", "have a nice day", "have high hopes", "have the last laugh", "haven't got a row to hoe", "head honcho", "head over heels", "hear a pin drop", "heard it through the grapevine", "heart's content", "heavy as lead", "hem and haw", "high and dry", "high and mighty", "high as a kite", "hit paydirt", "hold your head up high", "hold your horses", "hold your own", "hold your tongue", "honest as the day is long", "horns of a dilemma", "horse of a different color", "hot under the collar", "hour of need", "I beg to differ", "icing on the cake", "if the shoe fits", "if the shoe were on the other foot", "in a jam", "in a jiffy", "in a nutshell", "in a pig's eye", "in a pinch", "in a word", "in hot water", "in the gutter", "in the nick of time", "in the thick of it", "in your dreams", "it ain't over till the fat lady sings", "it goes without saying", "it takes all kinds", "it takes one to know one", "it's a small world", "it's only a matter of time", "ivory tower", "Jack of all trades", "jockey for position", "jog your memory", "joined at the hip", "judge a book by its cover", "jump down your throat", "jump in with both feet", "jump on the bandwagon", "jump the gun", "jump to conclusions", "just a hop, skip, and a jump", "just the ticket", "justice is blind", "keep a stiff upper lip", "keep an eye on", "keep it simple, stupid", "keep the home fires burning", "keep up with the Joneses", "keep your chin up", "keep your fingers crossed", "kick the bucket", "kick up your heels", "kick your feet up", "kid in a candy store", "kill two birds with one stone", "kiss of death", "knock it out of the park", "knock on wood", "knock your socks off", "know him from Adam", "know the ropes", "know the score", "knuckle down", "knuckle sandwich", "knuckle under", "labor of love", "ladder of success", "land on your feet", "lap of luxury", "last but not least", "last hurrah", "last-ditch effort", "law of the jungle", "law of the land", "lay down the law", "leaps and bounds", "let sleeping dogs lie", "let the cat out of the bag", "let the good times roll", "let your hair down", "let's talk turkey", "letter perfect", "lick your wounds", "lies like a rug", "life's a bitch", "life's a grind", "light at the end of the tunnel", "lighter than a feather", "lighter than air", "like clockwork", "like father like son", "like taking candy from a baby", "like there's no tomorrow", "lion's share", "live and learn", "live and let live", "long and short of it", "long lost love", "look before you leap", "look down your nose", "look what the cat dragged in", "looking a gift horse in the mouth", "looks like death warmed over", "loose cannon", "lose your head", "lose your temper", "loud as a horn", "lounge lizard", "loved and lost", "low man on the totem pole", "luck of the draw", "luck of the Irish", "make hay while the sun shines", "make money hand over fist", "make my day", "make the best of a bad situation", "make the best of it", "make your blood boil", "man of few words", "man's best friend", "mark my words", "meaningful dialogue", "missed the boat on that one", "moment in the sun", "moment of glory", "moment of truth", "money to burn", "more power to you", "more than one way to skin a cat", "movers and shakers", "moving experience", "naked as a jaybird", "naked truth", "neat as a pin", "needle in a haystack", "needless to say", "neither here nor there", "never look back", "never say never", "nip and tuck", "nip it in the bud", "no guts, no glory", "no love lost", "no pain, no gain", "no skin off my back", "no stone unturned", "no time like the present", "no use crying over spilled milk", "nose to the grindstone", "not a hope in hell", "not a minute's peace", "not in my backyard", "not playing with a full deck", "not the end of the world", "not written in stone", "nothing to sneeze at", "nothing ventured nothing gained", "now we're cooking", "off the top of my head", "off the wagon", "off the wall", "old hat", "older and wiser", "older than dirt", "older than Methuselah", "on a roll", "on cloud nine", "on pins and needles", "on the bandwagon", "on the money", "on the nose", "on the rocks", "on the spot", "on the tip of my tongue", "on the wagon", "on thin ice", "once bitten, twice shy", "one bad apple doesn't spoil the bushel", "one born every minute", "one brick short", "one foot in the grave", "one in a million", "one red cent", "only game in town", "open a can of worms", "open and shut case", "open the flood gates", "opportunity doesn't knock twice", "out of pocket", "out of sight, out of mind", "out of the frying pan into the fire", "out of the woods", "out on a limb", "over a barrel", "over the hump", "pain and suffering", "pain in the", "panic button", "par for the course", "part and parcel", "party pooper", "pass the buck", "patience is a virtue", "pay through the nose", "penny pincher", "perfect storm", "pig in a poke", "pile it on", "pillar of the community", "pin your hopes on", "pitter patter of little feet", "plain as day", "plain as the nose on your face", "play by the rules", "play your cards right", "playing the field", "playing with fire", "pleased as punch", "plenty of fish in the sea", "point with pride", "poor as a church mouse", "pot calling the kettle black", "pretty as a picture", "pull a fast one", "pull your punches", "pulling your leg", "pure as the driven snow", "put it in a nutshell", "put one over on you", "put the cart before the horse", "put the pedal to the metal", "put your best foot forward", "put your foot down", "quick as a bunny", "quick as a lick", "quick as a wink", "quick as lightning", "quiet as a dormouse", "rags to riches", "raining buckets", "raining cats and dogs", "rank and file", "rat race", "reap what you sow", "red as a beet", "red herring", "reinvent the wheel", "rich and famous", "rings a bell", "ripe old age", "ripped me off", "rise and shine", "road to hell is paved with good intentions", "rob Peter to pay Paul", "roll over in the grave", "rub the wrong way", "ruled the roost", "running in circles", "sad but true", "sadder but wiser", "salt of the earth", "scared stiff", "scared to death", "sealed with a kiss", "second to none", "see eye to eye", "seen the light", "seize the day", "set the record straight", "set the world on fire", "set your teeth on edge", "sharp as a tack", "shoot for the moon", "shoot the breeze", "shot in the dark", "shoulder to the wheel", "sick as a dog", "sigh of relief", "signed, sealed, and delivered", "sink or swim", "six of one, half a dozen of another", "skating on thin ice", "slept like a log", "slinging mud", "slippery as an eel", "slow as molasses", "smart as a whip", "smooth as a baby's bottom", "sneaking suspicion", "snug as a bug in a rug", "sow wild oats", "spare the rod, spoil the child", "speak of the devil", "spilled the beans", "spinning your wheels", "spitting image of", "spoke with relish", "spread like wildfire", "spring to life", "squeaky wheel gets the grease", "stands out like a sore thumb", "start from scratch", "stick in the mud", "still waters run deep", "stitch in time", "stop and smell the roses", "straight as an arrow", "straw that broke the camel's back", "strong as an ox", "stubborn as a mule", "stuff that dreams are made of", "stuffed shirt", "sweating blood", "sweating bullets", "take a load off", "take one for the team", "take the bait", "take the bull by the horns", "take the plunge", "takes one to know one", "takes two to tango", "the more the merrier", "the real deal", "the real McCoy", "the red carpet treatment", "the same old story", "there is no accounting for taste", "thick as a brick", "thick as thieves", "thin as a rail", "think outside of the box", "third time's the charm", "this day and age", "this hurts me worse than it hurts you", "this point in time", "three sheets to the wind", "through thick and thin", "throw in the towel", "tie one on", "tighter than a drum", "time and time again", "time is of the essence", "tip of the iceberg", "tired but happy", "to coin a phrase", "to each his own", "to make a long story short", "to the best of my knowledge", "toe the line", "tongue in cheek", "too good to be true", "too hot to handle", "too numerous to mention", "touch with a ten foot pole", "tough as nails", "trial and error", "trials and tribulations", "tried and true", "trip down memory lane", "twist of fate", "two cents worth", "two peas in a pod", "ugly as sin", "under the counter", "under the gun", "under the same roof", "under the weather", "until the cows come home", "unvarnished truth", "up the creek", "uphill battle", "upper crust", "upset the applecart", "vain attempt", "vain effort", "vanquish the enemy", "vested interest", "waiting for the other shoe to drop", "wakeup call", "warm welcome", "watch your p's and q's", "watch your tongue", "watching the clock", "water under the bridge", "weather the storm", "weed them out", "week of Sundays", "went belly up", "wet behind the ears", "what goes around comes around", "what you see is what you get", "when it rains, it pours", "when push comes to shove", "when the cat's away", "when the going gets tough, the tough get going", "white as a sheet", "whole ball of wax", "whole hog", "whole nine yards", "wild goose chase", "will wonders never cease?", "wisdom of the ages", "wise as an owl", "wolf at the door", "words fail me", "work like a dog", "world weary", "worst nightmare", "worth its weight in gold", "wrong side of the bed", "yanking your chain", "yappy as a dog", "years young", "you are what you eat", "you can run but you can't hide", "you only live once", "you're the boss ", "young and foolish", "young and vibrant", ] return existence_check(text, cliches, err, msg, join=True) @memoize def check_cliches_gnu_diction(text): """Check the text. source: GNU diction source_url: https://directory.fsf.org/wiki/Diction """ err = "cliches.gnu_diction" msg = u"'{}' is a cliché." list = [ "a matter of concern", "all things being equal", "as a last resort", "attached hereto", "by no means", "conspicuous by its absence", "easier said than done", "enclosed herewith", "if and when", "in reference to", "in short supply", "in the foreseeable future", "in the long run", "in the matter of", "it stands to reason", "many and diverse", "on the right track", "par for the course", "please feel free to", "pursuant to your request", "regarding the matter of", "slowly but surely", "this will acknowledge", "we are pleased to advice", "we regret to inform you", "we wish to state", "you are hereby advised that", ] return existence_check(text, list, err, msg, join=True, ignore_case=True)