content
stringlengths 5
1.05M
|
|---|
import sys, os
import pandas as pd
from FlokAlgorithmLocal import FlokDataFrame, FlokAlgorithmLocal
import cv2
import json
import pyarrow
import time
class Batch_ImageWrite(FlokAlgorithmLocal):
def run(self, inputDataSets, params):
#这个path会从前端传入,是一个已经存在的文件夹路径,是绝对路径,图片会写入到这个路径。
path=params['path']
image_dict = inputDataSets.get(0)
if "hdfs://" not in path:
if not os.path.exists(path):
raise Exception("输出的指向的文件夹不存在")
#把字典中的图片文件全部写到路径上,其中文件名是“当前时间+原来的文件名”
for image_name,image in image_dict.items():
# file_name=os.path.join(path,time.strftime("image_%Y-%m-%d_%H:%M:%S_")+str(i))
new_name=time.strftime('%Y_%m_%d_%H_%M_%S_')+image_name
file_name=os.path.join(path,new_name)
cv2.imwrite(file_name,image)
else:
ip = path.split(':')[1][2:]
tmp = path.split(':')[2]
port = tmp[:tmp.index('/')]
file_path = tmp[tmp.index('/'):]
hdfs = pyarrow.hdfs.connect(host=ip, port=int(port))
if not hdfs.exists(file_path):
raise Exception("输出指向的文件夹不存在")
#先把文件写入本地
folder_name = file_path.split('/')[-1]
local_tmp_path = "/tmp/flok-tmp/" + folder_name
os.mkdir(local_tmp_path)
for image_name,image in image_dict.items():
new_name=time.strftime('%Y_%m_%d_%H_%M_%S_')+image_name
file_name=os.path.join(local_tmp_path,new_name)
cv2.imwrite(file_name,image)
# 把本地文件夹复制到hdfs上,后面要加/*,否则是把文件夹整个复制到path中,作为其子文件夹。
cmd = "hadoop fs -cp file://%s %s" % (local_tmp_path+'/*',path)
os.system(cmd)
# 删除本地文件
cmd = "rm -r " + local_tmp_path
os.system(cmd)
result = FlokDataFrame()
return result
if __name__ == "__main__":
all_info = json.loads(sys.argv[1])
# all_info = {
# "input": ["/tmp/flok/abcd"],
# "inputFormat": ["jpg"],
# "inputLocation":["local_fs"],
# "output": [""],
# "outputFormat": [""],
# "outputLocation": [""],
# "parameters": {"path": "hdfs://172.16.244.5:9000/图片输出",}
# }
params = all_info["parameters"]
inputPaths = all_info["input"]
inputTypes = all_info["inputFormat"]
inputLocation = all_info["inputLocation"]
outputPaths = all_info["output"]
outputTypes = all_info["outputFormat"]
outputLocation = all_info["outputLocation"]
algorithm = Batch_ImageWrite()
dataSet = algorithm.read(inputPaths,inputTypes,inputLocation,outputPaths,outputTypes)
result = algorithm.run(dataSet, params)
algorithm.write(outputPaths,result,outputTypes,outputLocation)
|
"""Print 'Hello world' to the terminal.
RST uses single backticks or back-quotes for various
things including interpreted text roles and references.
Here `example is missing a closing backtick.
That is considered to be an error, and should fail::
$ flake8 --select RST RST215/backticks.py
RST215/backticks.py:7:1: RST215 Inline interpreted text or phrase reference start-string without end-string.
"""
print("Hello world")
|
#!/usr/bin/env python
"""
Performs 1D scans over all of the systematics in a pipeline (or multiple
pipelines) and saves the output. This is to check the their likelihood spaces.
"""
from __future__ import absolute_import
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from collections import OrderedDict
from os.path import expanduser, expandvars, isfile, join
from pisa.analysis.analysis import Analysis
from pisa.core.distribution_maker import DistributionMaker
from pisa.utils.fileio import from_file, to_file, mkdir
from pisa.utils.log import logging, set_verbosity
__author__ = 'S. Wren, T. Ehrhardt, J. Lanfranchi'
__license__ = '''Copyright (c) 2014-2017, The IceCube Collaboration
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
def parse_args():
"""Parse command line arguments and return as a dict.
Returns
-------
kwargs
"""
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--template-settings',
metavar='CONFIGFILE', required=True, action='append',
help='''Settings for generating template distributions; repeat
this option to define multiple pipelines.'''
)
parser.add_argument(
'--steps', type=int, required=True,
help='''Provide a number of steps to scan the likelihood space over.'''
)
parser.add_argument(
'--hypo-param-selections', type=str, nargs='+', required=False,
help='''Selection of params to use in order to generate the
hypothesised Asimov distributions.'''
)
parser.add_argument(
'--outdir', required=True,
metavar='DIR', type=str,
help='Directory into which to store results.'
)
parser.add_argument(
'--minimizer-settings', type=str,
metavar='JSONFILE', required=True,
help='''Settings related to the minimizer used in the LLR analysis.'''
)
parser.add_argument(
'--metric', type=str,
choices=['llh', 'chi2', 'conv_llh', 'mod_chi2'], required=True,
help='''Settings related to the minimizer used in the LLR analysis.'''
)
parser.add_argument(
'--debug-mode', type=int, choices=[0, 1, 2], required=False, default=1,
help='''How much information to keep in the output file. 0 for only
essentials for a physics analysis, 1 for more minimizer history, 2 for
whatever can be recorded.'''
)
parser.add_argument(
'-v', action='count', default=None,
help='set verbosity level'
)
kwargs = vars(parser.parse_args())
set_verbosity(kwargs.pop('v'))
return kwargs
def scan_allsyst(template_settings, steps, hypo_param_selections, outdir,
minimizer_settings, metric, debug_mode):
"""Scan (separately) all systematics (i.e., non-fixed params).
Parameters
----------
template_settings
steps
hypo_param_selections
outdir
minimizer_settings
metric
debug_mode
Returns
-------
restults : dict
Keys are param names, values are the scan results
"""
outdir = expanduser(expandvars(outdir))
mkdir(outdir, warn=False)
hypo_maker = DistributionMaker(template_settings)
hypo_maker.select_params(hypo_param_selections)
data_dist = hypo_maker.get_outputs(return_sum=True)
minimizer_settings = from_file(minimizer_settings)
analysis = Analysis()
results = OrderedDict() # pylint: disable=redefined-outer-name
for param in hypo_maker.params:
if param.is_fixed:
continue
logging.info('Scanning %s', param.name)
nominal_value = param.value
outfile = join(
outdir,
'{:s}_{:d}_steps_{:s}_scan.json'.format(param.name, steps,
metric)
)
if isfile(outfile):
raise IOError('`outfile` "{}" exists, not overwriting.'
.format(outfile))
results[param.name] = analysis.scan(
data_dist=data_dist,
hypo_maker=hypo_maker,
hypo_param_selections=hypo_param_selections,
metric=metric,
param_names=param.name,
steps=steps,
only_points=None,
outer=True,
profile=False,
minimizer_settings=minimizer_settings,
outfile=outfile,
debug_mode=debug_mode
)
to_file(results[param.name], outfile)
param.value = nominal_value
logging.info('Done scanning param "%s"', param.name)
logging.info('Done.')
return results
def main():
"""Run scan_allsyst with arguments from command line"""
return scan_allsyst(**parse_args())
if __name__ == '__main__':
results = main() # pylint: disable=invalid-name
|
from operator import itemgetter
from rdkit import Chem
"""
create new RDKIT residue
mi = Chem.AtomPDBResidueInfo()
mi.SetResidueName('MOL')
mi.SetResidueNumber(1)
mi.SetOccupancy(0.0)
mi.SetTempFactor(0.0)
source: https://sourceforge.net/p/rdkit/mailman/message/36404394/
"""
from collections import namedtuple
PDBAtomInfo = namedtuple('PDBAtomInfo', "name resName resNum chain")
mini_periodic_table = {
1:'H', 2:'He',
3:'Li', 4:'Be', 5:'B', 6:'C', 7:'N', 8:'O', 9:'F', 10:'Ne',
11:'Na', 12:'Mg', 13:'Al', 14:'Si', 15:'P', 16:'S', 17:'Cl', 18:'Ar',
19:'K', 20:'Ca', 21:'Sc', 22:'Ti', 23:'V', 24:'Cr', 25:'Mn', 26:'Fe', 27:'Co', 28:'Ni', 29:'Cu', 30:'Zn',
31:'Ga', 32:'Ge', 33:'As', 34:'Se', 35:'Br', 36:'Kr',
37:'Rb', 38:'Sr', 39:'Y', 40:'Zr', 41:'Nb', 42:'Mo', 43:'Tc', 44:'Ru', 45:'Rh', 46:'Pd', 47:'Ag', 48:'Cd',
49:'In', 50:'Sn', 51:'Sb', 52:'Te', 53:'I', 54:'Xe',
55:'Cs', 56:'Ba',
57:'La', 58:'Ce', 59:'Pr', 60:'Nd', 61:'Pm', 62:'Sm', 63:'Eu', 64:'Gd', 65:'Tb', 66:'Dy', 67:'Ho', 68:'Er', 69:'Tm', 70:'Yb',
71:'Lu', 72:'Hf', 73:'Ta', 74:'W', 75:'Re', 76:'Os', 77:'Ir', 78:'Pt', 79:'Au', 80:'Hg',
81:'Tl', 82:'Pb', 83:'Bi', 84:'Po', 85:'At', 86:'Rn',
87:'Fr', 88:'Ra'
}
def getPdbInfoNoNull(atom):
"""extract information for populating an ATOM/HETATM line
in the PDB"""
# res = atom.GetResidue()
minfo = atom.GetMonomerInfo()
if minfo is None:
atomic_number = atom.GetAtomicNum()
if atomic_number == 0:
name = '%-2s' % '*'
else:
name = '%-2s' % mini_periodic_table[atomic_number]
chain = ' '
resNum = 1
resName = 'UNL'
else:
name = minfo.GetName()
chain = minfo.GetChainId()
resNum = minfo.GetResidueNumber()
resName = minfo.GetResidueName()
return PDBAtomInfo(name=name, resName=resName, resNum=resNum, chain=chain)
class Mol2MolSupplier():
""" RDKit Mol2 molecule supplier.
Parameters
sanitize: perform RDKit sanitization of Mol2 molecule"""
def __init__(self, filename, sanitize=True, removeHs=False, cleanupSubstructures=True):
self.fp = open(filename, 'r')
self._opts = {'sanitize':sanitize,
'removeHs':removeHs,
'cleanupSubstructures':cleanupSubstructures }
self.buff = []
def __iter__(self):
return self
def __next__(self):
""" iterator step """
while True:
line = self.fp.readline()
# empty line
if not line:
if len(self.buff):
# buffer full, returning last molecule
mol=Chem.MolFromMol2Block("".join(self.buff), **self._opts)
self.buff = []
return mol
# buffer empty, stopping the iteration
self.fp.close()
raise StopIteration
if '@<TRIPOS>MOLECULE' in line:
# first molecule parsed
if len(self.buff)==0:
self.buff.append(line)
else:
# found the next molecule, breaking to return the complete one
break
else:
# adding another line in the current molecule
self.buff.append(line)
# found a complete molecule, returning it
mol=Chem.MolFromMol2Block("".join(self.buff), **self._opts)
self.buff = [line]
return mol
class HJKRingDetection(object):
"""Implementation of the Hanser-Jauffret-Kaufmann exhaustive ring detection
algorithm:
ref:
Th. Hanser, Ph. Jauffret, and G. Kaufmann
J. Chem. Inf. Comput. Sci. 1996, 36, 1146-1152
"""
def __init__(self, mgraph):
self.mgraph = {key: [x for x in values] for (key, values) in mgraph.items()}
self.rings = []
self._iterations = 0
def scan(self):
"""run the full protocol for exhaustive ring detection"""
self.prune()
self.build_pgraph()
self.vertices = self._get_sorted_vertices()
while self.vertices:
self._remove_vertex(self.vertices[0])
output_rings = []
for ring in self.rings:
output_rings.append(tuple(ring[:-1]))
return output_rings
def _get_sorted_vertices(self):
"""function to return the vertices to be removed, sorted by increasing
connectivity order (see paper)"""
vertices = ((k, len(v)) for k, v in self.mgraph.items())
return [x[0] for x in sorted(vertices, key=itemgetter(1))]
def prune(self):
"""iteratively prune graph until there are no nodes with only one
connection"""
while True:
prune = []
for node, neighbors in self.mgraph.items():
if len(neighbors) == 1:
prune.append((node, neighbors))
if len(prune) == 0:
break
for node, neighbors in prune:
self.mgraph.pop(node)
for n in neighbors:
self.mgraph[n].remove(node)
def build_pgraph(self, prune=True):
"""convert the M-graph (molecular graph) into the P-graph (path/bond graph)"""
self.pgraph = []
for node, neigh in self.mgraph.items():
for n in neigh:
# use sets for unique id
edge = set((node, n))
if not edge in self.pgraph:
self.pgraph.append(edge)
# re-convert the edges to lists because order matters in cycle detection
self.pgraph = [list(x) for x in self.pgraph]
def _remove_vertex(self, vertex):
"""remove a vertex and join all edges connected by that vertex (this is
the REMOVE function from the paper)
"""
visited = {}
remove = []
pool = []
for path in self.pgraph:
if self._has_vertex(vertex, path):
pool.append(path)
for i, path1 in enumerate(pool):
for j, path2 in enumerate(pool):
if i == j:
continue
self._iterations += 1
pair_id = tuple(set((i, j)))
if pair_id in visited:
continue
visited[pair_id] = None
common = list(set(path1) & set(path2))
common_count = len(common)
# check if two paths have only this vertex in common or (or
# two, if they're a cycle)
if not 1 <= common_count <= 2:
continue
# generate the joint path
joint_path = self._concatenate_path(path1, path2, vertex)
is_ring = joint_path[0] == joint_path[-1]
# if paths share more than two vertices but they're not a ring, then skip
if (common_count == 2) and not is_ring:
continue
# store the ring...
if is_ring:
self._add_ring(joint_path)
# ...or the common path
elif not joint_path in self.pgraph:
self.pgraph.append(joint_path)
# remove used paths
for p in pool:
self.pgraph.remove(p)
# remove the used vertex
self.vertices.remove(vertex)
def _add_ring(self, ring):
"""add newly found rings to the list (if not already there)"""
r = set(ring)
for candidate in self.rings:
if r == set(candidate):
return
self.rings.append(ring)
def _has_vertex(self, vertex, edge):
"""check if the vertex is part of this edge, and if true, return the
sorted edge so that the vertex is the first in the list"""
if edge[0] == vertex:
return edge
if edge[-1] == vertex:
return edge[::-1]
return None
def _concatenate_path(self, path1, path2, v):
"""concatenate two paths sharing a common vertex
a-b, c-b => a-b-c : idx1=1, idx2=1
b-a, c-b => a-b-c : idx1=0, idx2=1
a-b, b-c => a-b-c : idx1=1, idx2=0
b-a, b-c => a-b-c : idx1=0, idx2=0
"""
if not path1[-1] == v:
path1.reverse()
if not path2[0] == v:
path2.reverse()
return path1 + path2[1:]
def _edge_in_pgraph(self, edge):
"""check if edge is already in pgraph"""
e = set(edge)
for p in self.pgraph:
if e == set(p) and len(p) == len(edge):
return True
return False
|
from os import name
import pandas as pd
import numpy as np
from score_table.table import Team, ScoreTable, HistoricScoreTable
from score_table.create import score_season
from team_value import set_value
if __name__ == '__main__':
df = pd.read_csv('../data/england-transformed.csv')
epl_df = pd.read_csv('../data/epl-2020.csv') #english premier league 2020 dataframe matches
score_season_2018 = score_season(df, 2018)
score_season_2019 = score_season(df, 2019)
def get_table_position(team, score_table):
if not score_table.team_exists(team):
return 21
position = score_table.get_position(team)
return position
# Home and visitor position in the last season
home_last_table_position = []
visitor_last_table_position = []
# Home and visitor position in the penultimate season
home_penultimate_table_position = []
visitor_penultimate_table_position = []
for index, row in epl_df.iterrows():
home, visitor = row.home, row.visitor
home_last_table_position.append(get_table_position(home, score_season_2019))
visitor_last_table_position.append(get_table_position(visitor, score_season_2019))
home_penultimate_table_position.append(get_table_position(home, score_season_2018))
visitor_penultimate_table_position.append(get_table_position(visitor, score_season_2018))
epl_df['home_last_table_position'] = home_last_table_position
epl_df['visitor_last_table_position'] = visitor_last_table_position
epl_df['home_penultimate_table_position'] = home_penultimate_table_position
epl_df['visitor_penultimate_table_position'] = visitor_penultimate_table_position
print(epl_df.head())
print(epl_df.tail())
epl_df.to_csv('../data/england-transformed-2020.csv', index=False)
wp = rp = '../data/england-transformed-2020.csv'
set_value(wp, rp)
|
"""
A scaled down version of the Brunel model useful for testing (see OMV files: .test.*)
"""
from brunel08 import runBrunelNetwork
from pyNN.utility import get_script_args
simulator_name = get_script_args(1)[0]
simtime = 1000
order = 100
eta = 2.0 # rel rate of external input
g = 5.0
runBrunelNetwork(g=g, eta=eta, simtime = simtime, order = order, save=True, simulator_name=simulator_name,N_rec=500)
|
"""
.. module:: COptimizerPGD
:synopsis: Optimizer using Projected Gradient Descent
.. moduleauthor:: Battista Biggio <battista.biggio@unica.it>
.. moduleauthor:: Ambra Demontis <ambra.demontis@unica.it>
"""
from secml.array import CArray
from secml.optim.optimizers import COptimizer
class COptimizerPGD(COptimizer):
"""Solves the following problem:
min f(x)
s.t. d(x,x0) <= dmax
x_lb <= x <= x_ub
f(x) is the objective function (either linear or nonlinear),
d(x,x0) <= dmax is a distance constraint in feature space (l1 or l2),
and x_lb <= x <= x_ub is a box constraint on x.
The solution algorithm is based on the classic gradient descent algorithm.
Attributes
----------
class_type : 'pgd'
"""
__class_type = 'pgd'
def __init__(self, fun,
constr=None,
bounds=None,
eta=1e-3,
eps=1e-4,
max_iter=200):
COptimizer.__init__(self, fun=fun,
constr=constr, bounds=bounds)
# Read/write attributes
self.eta = eta # gradient step size
self.max_iter = max_iter # maximum number of iterations
self.eps = eps # tolerance value for stop criterion
###########################################################################
# READ/WRITE ATTRIBUTES
###########################################################################
@property
def eta(self):
"""Return gradient descent step"""
return self._eta
@eta.setter
def eta(self, value):
"""Set gradient descent step"""
self._eta = float(value)
@property
def max_iter(self):
"""Returns the maximum number of gradient descent iteration"""
return self._max_iter
@max_iter.setter
def max_iter(self, value):
"""Set the maximum number of gradient descent iteration"""
self._max_iter = int(value)
@property
def eps(self):
"""Return tolerance value for stop criterion"""
return self._eps
@eps.setter
def eps(self, value):
"""Set tolerance value for stop criterion"""
self._eps = float(value)
#############################################
# METHODS
#############################################
def _return_best_solution(self, i):
"""Search the best solution between the ones found so far.
Parameters
----------
i : int
Index of the current iteration.
Returns
-------
x_opt : CArray
Best point found so far.
"""
f_seq = self.f_seq[:i]
best_sol_idx = f_seq.argmin()
self._x_seq = self.x_seq[:best_sol_idx + 1, :]
self._f_seq = self.f_seq[:best_sol_idx + 1]
self._x_opt = self._x_seq[-1, :]
return self._x_opt
def minimize(self, x_init, args=(), **kwargs):
"""Interface to minimizers.
Implements:
min fun(x)
s.t. constraint
Parameters
----------
x_init : CArray
The initial input point.
args : tuple, optional
Extra arguments passed to the objective function and its gradient.
Returns
-------
f_seq : CArray
Array containing values of f during optimization.
x_seq : CArray
Array containing values of x during optimization.
"""
if len(kwargs) != 0:
raise ValueError(
"{:} does not accept additional parameters.".format(
self.__class__.__name__))
# reset fun and grad eval counts for both fun and f (by default fun==f)
self._f.reset_eval()
self._fun.reset_eval()
x = x_init.deepcopy()
if self.constr is not None and self.constr.is_violated(x):
x = self.constr.projection(x)
if self.bounds is not None and self.bounds.is_violated(x):
x = self.bounds.projection(x)
self._x_seq = CArray.zeros((self._max_iter, x.size))
self._f_seq = CArray.zeros(self._max_iter)
i = 0
for i in range(self._max_iter):
self._x_seq[i, :] = x
self._f_seq[i] = self._fun.fun(x, *args)
if i > 0 and abs(self.f_seq[i - 1] - self.f_seq[i]) < self.eps:
self.logger.debug("Flat region, exiting... {:} {:}".format(
self._f_seq[i], self._f_seq[i - 1]))
return self._return_best_solution(i)
if i > 6 and self.f_seq[-3:].mean() < self.f_seq[-6:-3].mean():
self.logger.debug(
"Decreasing function, exiting... {:} {:}".format(
self.f_seq[-3:].mean(), self.f_seq[-6:-3].mean()))
return self._return_best_solution(i)
grad = self._fun.gradient(x, *args)
# debugging information
self.logger.debug(
'Iter.: ' + str(i) + ', x: ' + str(x) + ', f(x): ' +
str(self._f_seq[i]) + '|g(x)|_2: ' + str(grad.norm()))
# make a step into the deepest descent direction
x -= self.eta * grad
# project x onto the feasible domain
if self.constr is not None and self.constr.is_violated(x):
x = self.constr.projection(x)
if self.bounds is not None and self.bounds.is_violated(x):
x = self.bounds.projection(x)
return self._return_best_solution(i)
|
"""
Created on 16 Nov 2016
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
https://raspberrypi.stackexchange.com/questions/2086/how-do-i-get-the-serial-number
"""
import os
import re
import socket
from pathlib import Path
from subprocess import check_output, call, Popen, PIPE, DEVNULL
from scs_core.estate.git_pull import GitPull
from scs_core.sys.disk_usage import DiskUsage
from scs_core.sys.disk_volume import DiskVolume
from scs_core.sys.ipv4_address import IPv4Address
from scs_core.sys.network import Networks
from scs_core.sys.node import IoTNode
from scs_core.sys.persistence_manager import FilesystemPersistenceManager
from scs_core.sys.uptime_datum import UptimeDatum
from scs_host.sys.host_status import HostStatus
# --------------------------------------------------------------------------------------------------------------------
class Host(IoTNode, FilesystemPersistenceManager):
"""
Broadcom BCM2837 64bit ARMv7 quad core processor
"""
OS_ENV_PATH = 'SCS_ROOT_PATH'
I2C_EEPROM = 3
I2C_APPLICATION = 1
DFE_EEPROM_ADDR = 0x50
DFE_UID_ADDR = 0x58
# ----------------------------------------------------------------------------------------------------------------
# devices...
__OPC_SPI_BUS = 0 # based on spidev
__OPC_SPI_DEVICE = 0 # based on spidev
__NDIR_SPI_BUS = 0 # based on spidev
__NDIR_SPI_DEVICE = 1 # based on spidev
__NDIR_USB_DEVICE = "/dev/ttyUSB0" # hard-coded path
__GPS_SERIAL_DEVICE = 0 # hard-coded path (should be 0 on full-fat pies)
# ----------------------------------------------------------------------------------------------------------------
# time marker...
__TIME_SYNCHRONIZED = "/run/systemd/timesync/synchronized"
# ----------------------------------------------------------------------------------------------------------------
# directories and files...
__DEFAULT_HOME_DIR = "/home/pi" # hard-coded abs path
__LOCK_DIR = "/run/lock/southcoastscience" # hard-coded abs path
__TMP_DIR = "/tmp/southcoastscience" # hard-coded abs path
__SCS_DIR = "SCS" # hard-coded rel path
__COMMAND_DIR = "cmd" # hard-coded rel path
__LATEST_UPDATE = "latest_update.txt" # hard-coded rel path
__DFE_EEP_IMAGE = "dfe_cape.eep" # hard-coded rel path
# ----------------------------------------------------------------------------------------------------------------
# host acting as DHCP server...
__SERVER_IPV4_ADDRESS = '172.22.15.1' # had-coded abs path
# ----------------------------------------------------------------------------------------------------------------
@staticmethod
def serial_number():
cpuinfo = os.popen("cat /proc/cpuinfo").readlines()
line = cpuinfo[-1]
match = re.match(r'Serial\s*:\s*([0-9A-Fa-f]+)', line)
if match is None:
return None
fields = match.groups()
serial = fields[0]
return serial
@staticmethod
def enable_eeprom_access():
call(['sudo', 'dtoverlay', 'i2c-gpio', 'i2c_gpio_sda=0', 'i2c_gpio_scl=1'])
@staticmethod
def shutdown():
call(['systemctl', 'poweroff', '-i'])
@classmethod
def software_update_report(cls):
git_pull = GitPull.load(cls)
return None if git_pull is None else str(git_pull.pulled_on.datetime.date())
# ----------------------------------------------------------------------------------------------------------------
# network identity...
@classmethod
def name(cls):
return socket.gethostname()
@classmethod
def server_ipv4_address(cls):
return IPv4Address.construct(cls.__SERVER_IPV4_ADDRESS)
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def gps_device(cls):
return cls.__GPS_SERIAL_DEVICE
@classmethod
def ndir_usb_device(cls):
return cls.__NDIR_USB_DEVICE # we might have to search for it instead
@classmethod
def psu_device(cls):
raise NotImplementedError()
# ----------------------------------------------------------------------------------------------------------------
# status...
@classmethod
def status(cls):
message = str(os.popen("vcgencmd measure_temp").readline())
message = message.replace("temp=", "").replace("'C\n", "")
temp = float(message)
return HostStatus(temp)
# ----------------------------------------------------------------------------------------------------------------
# networks and modem...
@classmethod
def networks(cls):
p = Popen(['nmcli', 'd'], stdout=PIPE, stderr=DEVNULL)
stdout, _ = p.communicate(timeout=10)
if p.returncode != 0:
return None
return Networks.construct_from_nmcli(stdout.decode().splitlines())
@classmethod
def modem(cls):
return None
@classmethod
def modem_conn(cls):
return None
@classmethod
def sim(cls):
return None
# ----------------------------------------------------------------------------------------------------------------
# SPI...
@classmethod
def ndir_spi_bus(cls):
return cls.__NDIR_SPI_BUS
@classmethod
def ndir_spi_device(cls):
return cls.__NDIR_SPI_DEVICE
@classmethod
def opc_spi_bus(cls):
return cls.__OPC_SPI_BUS
@classmethod
def opc_spi_device(cls):
return cls.__OPC_SPI_DEVICE
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def disk_volume(cls, mounted_on):
process = Popen(['df'], stdout=PIPE)
out, _ = process.communicate()
rows = out.decode().splitlines()[1:]
for row in rows:
volume = DiskVolume.construct_from_df_row(row)
if volume.mounted_on == mounted_on:
return volume
return None
@classmethod
def disk_usage(cls, path):
try:
st = os.statvfs(path)
except OSError:
return None
return DiskUsage.construct_from_statvfs(path, st)
# ----------------------------------------------------------------------------------------------------------------
# time...
@classmethod
def time_is_synchronized(cls):
return Path(cls.__TIME_SYNCHRONIZED).exists()
@classmethod
def uptime(cls, now=None):
raw = check_output('uptime')
report = raw.decode()
return UptimeDatum.construct_from_report(now, report)
# ----------------------------------------------------------------------------------------------------------------
# tmp directories...
@classmethod
def lock_dir(cls):
return cls.__LOCK_DIR
@classmethod
def tmp_dir(cls):
return cls.__TMP_DIR
# ----------------------------------------------------------------------------------------------------------------
# filesystem paths...
@classmethod
def home_path(cls):
return os.environ[cls.OS_ENV_PATH] if cls.OS_ENV_PATH in os.environ else cls.__DEFAULT_HOME_DIR
@classmethod
def scs_path(cls):
return os.path.join(cls.home_path(), cls.__SCS_DIR)
@classmethod
def command_path(cls):
return os.path.join(cls.scs_path(), cls.__COMMAND_DIR)
@classmethod
def eep_image(cls):
return os.path.join(cls.scs_path(), cls.__DFE_EEP_IMAGE)
|
class UserAuthenticationInfo(object):
password = None
def get_data():
return self
def set_data(data):
self.password = data["password"]
return self
def get_pass():
return self.password
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pyautocad.types import APoint, Vector
class ALine(object):
"""3D Line work with APoint and support in draw in `AutoCAD`
Usage::
>>> l1 = ALine([10, 10], [20, 20])
Aline(APoint(10.00, 10.00, 0.00), APoint(20.00, 20.00, 0.00))
"""
def __init__(self, start_point, end_point):
if isinstance(start_point, APoint):
self.start = start_point
else:
self.start = APoint(*start_point)
if isinstance(end_point, APoint):
self.end = end_point
else:
self.end = APoint(*end_point)
@property
def length(self):
"""The length of 3D line"""
return self.start.distance_to(self.end)
@property
def middle(self):
"""The middle point of 3D line"""
return APoint((self.start.x + self.end.x) / 2, (self.start.y + self.end.y) / 2, (self.start.z + self.end.z) / 2)
@staticmethod
def create_from_vector(v, pnt):
"""
"""
if v is None or not isinstance(v, Vector) or pnt is None:
return None
v = v.normalized()
# TODO: Change into APoint + Vector
return ALine(pnt, pnt + v)
def __str__(self):
return 'Aline(%s, %s)' % (self.start, self.end)
def __eq__(self, v):
return isinstance(v, ALine) and self.start == v.start and self.end == v.end
class ACircle(object):
"""2D Circle
"""
def __init__(self, pnt_center, radius):
"""Circle initial func
:param pnt_center: Center point of circle :class: `APoint`
:param radius: Radius of circle :class: `float`
"""
if pnt_center is None or radius <= 0:
raise ValueError('Center point is None or radius is negative')
self.center = pnt_center
self.radius = radius
def diameter(self):
return self.radius * 2
def __str__(self):
return 'ACircle(Center=%s, radius=%.2f)' % (self.center, self.radius)
class APolyline(object):
"""Polyline
"""
def __init__(self, pnts=None):
"""Polyline initial func
:param pnts: Point list(item need to be APoint or coordinate tuple) :class: `list` or `tuple`
"""
self.points = [] # type: list[APoint]
if pnts is not None and (isinstance(pnts, pnts) or isinstance(pnts, tuple)):
for pnt in pnts:
self.append(pnt)
def append(self, pnt_or_list):
"""Add node into polyline, node need to be `APoint` or (x, y)/(x, y, z) `OCS` coordinate
(z coordinate will be ignored)
:param pnt_or_list: Node :class: `APoint`, `list` or `tuple`
"""
if pnt_or_list is None:
return
if isinstance(pnt_or_list, APoint):
self.points.append(pnt_or_list)
if (isinstance(pnt_or_list, list) or isinstance(pnt_or_list, tuple)) \
and len(pnt_or_list) >= 2:
self.points.append(APoint(pnt_or_list))
def __str__(self):
rs = ""
for pnt in self.points:
rs += ',(%.2f, %.2f, %.2f)' % (pnt.x, pnt.y, pnt.z)
return 'APolyline(%s)' % rs[1:]
|
# -*- coding: utf-8 -*-
from django_webtest import DjangoTestApp, WebTestMixin
import pytest
from testapp.articles.factories import AuthorFactory, ArticleFactory, TeamFactory
@pytest.fixture(scope='function')
def app(request):
wtm = WebTestMixin()
wtm._patch_settings()
wtm._disable_csrf_checks()
request.addfinalizer(wtm._unpatch_settings)
return DjangoTestApp()
@pytest.fixture(scope='function')
def data(request):
teams = [
TeamFactory()
for x in range(0, 2)
]
authors = [
AuthorFactory(team=team)
for team in teams
for x in range(0, 5)
]
articles = [
ArticleFactory(author=author)
for author in authors
for x in range(0, 10)
]
return {
'teams': teams,
'authors': authors,
'articles': articles,
}
|
"""
Copyright (c) 2015 Frank Lamar
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
#This script creats a simple shooping list
shopping_list = []
print("What should we pick up at the store?")
print("Enter 'Done' to stop adding items.")
while True:
new_item = input("> ")
if new_item == 'DONE' 'Done' or 'done':
break
shopping_list.append(new_item)
print("Added List has {} items.".format(len(shopping_list)))
continue
print("Here is your shopping_list: ")
for item in shopping_list:
print(item)
|
import sys
import math
import random
class motion:
def __init__(self, call):
self.call = call
def forward_step(self, step):
data = [0x10, 0x01, 0x00, 0x00]
mm = 100
data[2] = mm//256
data[3] = mm % 256
for i in range(0, step):
self.call.blewrite(data)
self.call.blewait(0x88)
def forward(self, step):
data = [0x10, 0x01, 0x00, 0x00]
mm = None
if isinstance(step, (int, float)):
mm = round(step)
mm = mm*10
elif isinstance(step, str):
if(step == "step_1"):
self.forward_step(1)
return
elif(step == "step_2"):
self.forward_step(2)
return
elif(step == "step_3"):
self.forward_step(3)
return
elif(step == "step_4"):
self.forward_step(4)
return
elif(step == "step_5"):
self.forward_step(5)
return
elif(step == "step_6"):
self.forward_step(6)
return
elif(step == "random_step"):
mm = random.randint(1, 6)
self.forward_step(mm)
return
elif(step == "random"):
mm = random.randint(1, 6)
mm = mm*100
if(mm == None):
pass
data[2] = mm//256
data[3] = mm % 256
self.call.blewrite(data)
self.call.blewait(0x88)
def backward_step(self, step):
data = [0x10, 0x02, 0x00, 0x00]
mm = 100
data[2] = mm//256
data[3] = mm % 256
for i in range(0, step):
self.call.blewrite(data)
self.call.blewait(0x88)
def backward(self, step):
data = [0x10, 0x02, 0x00, 0x00]
mm = None
if isinstance(step, int):
mm = step
mm = mm*10
elif isinstance(step, float):
mm = round(step)
mm = mm*10
elif isinstance(step, str):
if(step == "step_1"):
self.backward_step(1)
return
elif(step == "step_2"):
self.backward_step(2)
return
elif(step == "step_3"):
self.backward_step(3)
return
elif(step == "step_4"):
self.backward_step(4)
return
elif(step == "step_5"):
self.backward_step(5)
return
elif(step == "step_6"):
self.backward_step(6)
return
elif(step == "random_step"):
mm = random.randint(1, 6)
self.backward_step(mm)
return
elif(step == "random"):
mm = random.randint(1, 6)
mm = mm*100
else:
mm = 100
data[2] = mm//256
data[3] = mm % 256
self.call.blewrite(data)
self.call.blewait(0x88)
def turn_left(self, angle):
data = [0x10, 0x03, 0x00, 0x00]
mm = None
if isinstance(angle, int):
mm = angle
elif isinstance(angle, float):
mm = round(angle)
elif isinstance(angle, str):
if(angle == "random"):
mm = random.randint(30, 180)
elif(angle == "30degree"):
mm = 30
elif(angle == "36degree"):
mm = 36
elif(angle == "45degree"):
mm = 45
elif(angle == "60degree"):
mm = 60
elif(angle == "72degree"):
mm = 72
elif(angle == "90degree"):
mm = 90
elif(angle == "108degree"):
mm = 108
elif(angle == "120degree"):
mm = 120
elif(angle == "135degree"):
mm = 135
elif(angle == "145degree"):
mm = 145
elif(angle == "150degree"):
mm = 150
elif(angle == "180degree"):
mm = 180
else:
mm = 30
data[2] = mm//256
data[3] = mm % 256
self.call.blewrite(data)
self.call.blewait()
def turn_right(self, angle: int):
data = [0x10, 0x04, 0x00, 0x00]
mm = None
if isinstance(angle, int):
mm = angle
elif isinstance(angle, float):
mm = round(angle)
elif isinstance(angle, str):
if(angle == "random"):
mm = random.randint(30, 180)
elif(angle == "30degree"):
mm = 30
elif(angle == "36degree"):
mm = 36
elif(angle == "45degree"):
mm = 45
elif(angle == "60degree"):
mm = 60
elif(angle == "72degree"):
mm = 72
elif(angle == "90degree"):
mm = 90
elif(angle == "108degree"):
mm = 108
elif(angle == "120degree"):
mm = 120
elif(angle == "135degree"):
mm = 135
elif(angle == "145degree"):
mm = 145
elif(angle == "150degree"):
mm = 150
elif(angle == "180degree"):
mm = 180
else:
mm = 30
data[2] = mm//256
data[3] = mm % 256
self.call.blewrite(data)
self.call.blewait()
def move_position(self, position):
# print(str(position))
if(float(position) > 9999):
position = 9999
if(float(position) < -9999):
position = -9999
position = position*10
position = round(position)
data = [0x10, 0x01, 0x00, 0x00]
mm = None
if(position > 0):
data[1] = 0x01
if(position > 1000):
position = 1000
mm = position
else:
data[1] = 0x02
if(position < -1000):
position = -1000
mm = 0-position
data[2] = mm//256
data[3] = mm % 256
self.call.blewrite(data)
self.call.blewait()
def move_angle(self, angle):
if(float(angle) > 9999):
angle = 9999
if(float(angle) < -9999):
angle = -9999
angle = round(angle)
data = [0x10, 0x03, 0x00, 0x00]
mm = None
if(angle > 0):
data[1] = 0x04
if(angle > 360):
angle = 360
mm = angle
else:
data[1] = 0x03
if(angle < -360):
angle = -360
mm = 0-angle
data[2] = mm//256
data[3] = mm % 256
self.call.blewrite(data)
self.call.blewait()
def move_speed(self, left_speed, right_speed):
left_s = 0
right_s = 0
data = [0x11, 0x03, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00]
if isinstance(left_speed, int):
left_s = 144*left_speed/9.70
elif isinstance(left_speed, float):
left_s = 144*left_speed/9.70
elif isinstance(left_speed, str):
if(left_speed == "gear_1"):
left_s = 70
elif(left_speed == "gear_2"):
left_s = 105
elif(left_speed == "gear_3"):
left_s = 140
elif(left_speed == "gear_4"):
left_s = 175
elif(left_speed == "gear_5"):
left_s = 210
elif(left_speed == "gear_6"):
left_s = 245
elif(left_speed == "inf"):
left_s = 245
elif(left_speed == "gear_stop"):
left_s = 0
elif(left_speed == "gear_random"):
a = random.randint(0, 5)
left_s = 70+35*a
elif(left_speed == "backgear_1"):
left_s = -70
elif(left_speed == "backgear_2"):
left_s = -105
elif(left_speed == "backgear_3"):
left_s = -140
elif(left_speed == "backgear_4"):
left_s = -175
elif(left_speed == "backgear_5"):
left_s = -175
elif(left_speed == "backgear_6"):
left_s = -210
elif(left_speed == "-inf"):
left_s = -210
elif(left_speed == "backgear_stop"):
left_s = 0
elif(left_speed == "backgear_random"):
a = random.randint(0, 5)
left_s = -70-35*a
if isinstance(right_speed, int):
right_s = 144*right_speed/9.70
elif isinstance(right_speed, float):
right_s = 144*right_speed/9.70
elif isinstance(right_speed, str):
if(right_speed == "gear_1"):
right_s = 70
elif(right_speed == "gear_2"):
right_s = 105
elif(right_speed == "gear_3"):
right_s = 140
elif(right_speed == "gear_4"):
right_s = 175
elif(right_speed == "gear_5"):
right_s = 210
elif(right_speed == "gear_6"):
right_s = 245
elif(right_speed == "inf"):
right_s = 245
elif(right_speed == "gear_stop"):
right_s = 0
elif(right_speed == "gear_random"):
a = random.randint(0, 5)
right_s = 70+35*a
elif(right_speed == "backgear_1"):
right_s = -70
elif(right_speed == "backgear_2"):
right_s = -105
elif(right_speed == "backgear_3"):
right_s = -140
elif(right_speed == "backgear_4"):
right_s = -175
elif(right_speed == "backgear_5"):
right_s = -175
elif(right_speed == "backgear_6"):
right_s = -210
elif(right_speed == "-inf"):
right_s = -210
elif(right_speed == "backgear_stop"):
right_s = 0
elif(right_speed == "backgear_random"):
a = random.randint(0, 5)
right_s = -70-35*a
if(left_s > 0):
left_s = left_s
data[2] = 0x01
else:
left_s = 0-left_s
data[2] = 0x02
if(right_s > 0):
right_s = right_s
data[5] = 0x01
else:
right_s = 0-right_s
data[5] = 0x02
left_s = round(left_s)
right_s = round(right_s)
data[3] = left_s//256
data[4] = left_s % 256
data[6] = right_s//256
data[7] = right_s % 256
self.call.blewrite(data)
def move_right_speed(self, right_speed):
right_s = 0
data = [0x11, 0x01, 0x01, 0x00, 0x00]
if isinstance(right_speed, int):
right_s = 144*right_speed/9.70
elif isinstance(right_speed, float):
right_s = 144*right_speed/9.70
elif isinstance(right_speed, str):
if(right_speed == "gear_1"):
right_s = 70
elif(right_speed == "gear_2"):
right_s = 105
elif(right_speed == "gear_3"):
right_s = 140
elif(right_speed == "gear_4"):
right_s = 175
elif(right_speed == "gear_5"):
right_s = 210
elif(right_speed == "gear_6"):
right_s = 245
elif(right_speed == "inf"):
right_s = 245
elif(right_speed == "gear_stop"):
right_s = 0
elif(right_speed == "gear_random"):
a = random.randint(0, 5)
right_s = 70+35*a
elif(right_speed == "backgear_1"):
right_s = -70
elif(right_speed == "backgear_2"):
right_s = -105
elif(right_speed == "backgear_3"):
right_s = -140
elif(right_speed == "backgear_4"):
right_s = -175
elif(right_speed == "backgear_5"):
right_s = -175
elif(right_speed == "backgear_6"):
right_s = -210
elif(right_speed == "-inf"):
right_s = -210
elif(right_speed == "backgear_stop"):
right_s = 0
elif(right_speed == "backgear_random"):
a = random.randint(0, 5)
right_s = -70-35*a
if(right_s > 0):
right_s = right_s
data[2] = 0x01
else:
right_s = 0-right_s
data[2] = 0x02
right_s = round(right_s)
data[3] = right_s//256
data[4] = right_s % 256
self.call.blewrite(data)
def move_left_speed(self, left_speed):
left_s = 0
data = [0x11, 0x02, 0x01, 0x00, 0x00]
if isinstance(left_speed, int):
left_s = 144*left_speed/9.70
elif isinstance(left_speed, float):
left_s = 144*left_speed/9.70
elif isinstance(left_speed, str):
if(left_speed == "gear_1"):
left_s = 70
elif(left_speed == "gear_2"):
left_s = 105
elif(left_speed == "gear_3"):
left_s = 140
elif(left_speed == "gear_4"):
left_s = 175
elif(left_speed == "gear_5"):
left_s = 210
elif(left_speed == "gear_6"):
left_s = 245
elif(left_speed == "inf"):
left_s = 245
elif(left_speed == "gear_stop"):
left_s = 0
elif(left_speed == "gear_random"):
a = random.randint(0, 5)
left_s = 70+35*a
elif(left_speed == "backgear_1"):
left_s = -70
elif(left_speed == "backgear_2"):
left_s = -105
elif(left_speed == "backgear_3"):
left_s = -140
elif(left_speed == "backgear_4"):
left_s = -175
elif(left_speed == "backgear_5"):
left_s = -175
elif(left_speed == "backgear_6"):
left_s = -210
elif(left_speed == "-inf"):
left_s = -210
elif(left_speed == "backgear_stop"):
left_s = 0
elif(left_speed == "backgear_random"):
a = random.randint(0, 5)
left_s = -70-35*a
if(left_s > 0):
left_s = left_s
data[2] = 0x01
else:
left_s = 0-left_s
data[2] = 0x02
left_s = round(left_s)
data[3] = left_s//256
data[4] = left_s % 256
self.call.blewrite(data)
def stop(self, wheel):
data = None
if(wheel == "left"):
data = [0x11, 0x01, 0x01, 0x00, 0x00]
elif(wheel == "right"):
data = [0x11, 0x02, 0x01, 0x00, 0x00]
elif(wheel == "all"):
data = [0x11, 0x03, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00]
else:
data = [0x11, 0x03, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00]
self.call.blewrite(data)
|
import os
from setuptools import setup, find_packages
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name="clinto",
version="0.4.0",
packages=find_packages(),
scripts=[],
install_requires=["six"],
include_package_data=True,
description="Clinto",
url="http://www.github.com/wooey/clinto",
author="Chris Mitchell",
author_email="chris.mit7@gmail.com",
classifiers=[
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
)
|
import os
import pandas as pd
# path to MIMIC-III as split into episodes by mimic3-benchmarks
patients_dir = 'data/mimic'
class Patient:
def __init__(self, directory, id):
self.directory = directory
self.id = id
def get_stays(self):
return pd.read_csv(os.path.join(self.directory, 'stays.csv'))
class Episode:
def __init__(self, patient, number):
self.patient = patient
self.number = number
def get_info(self):
return pd.read_csv(os.path.join(self.patient.directory,
'episode' + self.number + '.csv'))
def get_timeseries(self):
return pd.read_csv(os.path.join(self.patient.directory,
'episode' + self.number + '_timeseries.csv'))
def get_noteconcepts(self):
try:
return pd.read_csv(os.path.join(self.patient.directory,
'episode' + self.number + '_noteconcepts.csv'))
except FileNotFoundError:
return pd.DataFrame()
def get_concepts(self, timedelta=None):
nc_df = self.get_noteconcepts()
if not nc_df.empty:
admit_time = pd.to_datetime(self.get_stay()['ADMITTIME'])
for _, row in nc_df.iterrows():
concepts = row['CONCEPTS']
chartdate = pd.to_datetime(row['CHARTDATE'])
if pd.isna(concepts):
continue
if timedelta and chartdate - admit_time > timedelta:
continue
concept_set = set()
for concept in filter(lambda c: c[0] != '!', concepts.split(' ')):
concept_set.add(concept)
yield concept_set
def get_stay(self):
stays_df = self.patient.get_stays()
info_df = self.get_info()
stay_df = stays_df.loc[stays_df['ICUSTAY_ID'] == info_df.iloc[0]['Icustay']]
assert(len(stay_df) == 1)
return stay_df.iloc[0]
def patients(partition=None):
if partition:
pdir = os.path.join(patients_dir, partition)
patdirs = [os.path.join(pdir, p) for p in
filter(str.isdigit, os.listdir(pdir))]
else:
train_dir = os.path.join(patients_dir, 'train')
patdirs = [os.path.join(train_dir, p) for p in
filter(str.isdigit, os.listdir(train_dir))]
test_dir = os.path.join(patients_dir, 'test')
patdirs += [os.path.join(test_dir, p) for p in
filter(str.isdigit, os.listdir(test_dir))]
for patdir in patdirs:
yield Patient(patdir, os.path.basename(patdir))
def episodes(partition=None):
for pat in patients(partition):
ts_files = list(filter(lambda x: x.endswith('_timeseries.csv'),
os.listdir(pat.directory)))
eps = \
[ts[7] + (ts[8] if ts[8].isdigit() else '') for ts in ts_files]
for ep in eps:
yield Episode(pat, ep)
|
from maze import Maze
from game_controller import GameController
def test_constructor():
g = GameController(600, 400)
m = Maze(600, 400, 150, 450,
100, 300, g)
assert m.LEFT_VERT == 150
assert m.RIGHT_VERT == 450
assert m.TOP_HORIZ == 100
assert m.BOTTOM_HORIZ == 300
assert m.WIDTH == 600
assert m.HEIGHT == 400
assert m.gc is g
assert m.dots.dots_left() == ((m.dots.WIDTH//m.dots.SPACING + 1) * 2 +
(m.dots.HEIGHT//m.dots.SPACING + 1) * 2)
def test_eat_dots():
g = GameController(600, 400)
m = Maze(600, 400, 150, 450,
100, 300, g)
assert m.eat_dots(0, m.TOP_HORIZ)[0][0].x == m.dots.SPACING
assert m.dots.top_row[-1].x == m.dots.WIDTH - m.dots.SPACING
assert len(m.eat_dots(m.LEFT_VERT - m.dots.EAT_DIST, m.BOTTOM_HORIZ)[1])\
== m.dots.WIDTH//m.dots.SPACING
assert len(m.dots.left_col) == m.dots.HEIGHT//m.dots.SPACING
|
# Copyright (c) OpenMMLab. All rights reserved.
import io
import os
import os.path as osp
import pkgutil
import re
import time
import warnings
from collections import OrderedDict
from importlib import import_module
from tempfile import TemporaryDirectory
import torch
import torchvision
from torch.optim import Optimizer
from torch.utils import model_zoo
import mmcv
from ..fileio import FileClient
from ..fileio import load as load_file
from ..parallel import is_module_wrapper
from ..utils import mkdir_or_exist
from .dist_utils import get_dist_info
ENV_MMCV_HOME = 'MMCV_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
def _get_mmcv_home():
mmcv_home = os.path.expanduser(
os.getenv(
ENV_MMCV_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv')))
mkdir_or_exist(mmcv_home)
return mmcv_home
def load_state_dict(module, state_dict, strict=False, logger=None):
"""Load state_dict to a module.
This method is modified from :meth:`torch.nn.Module.load_state_dict`.
Default value for ``strict`` is set to ``False`` and the message for
param mismatch will be shown even if strict is False.
Args:
module (Module): Module that receives the state_dict.
state_dict (OrderedDict): Weights.
strict (bool): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
logger (:obj:`logging.Logger`, optional): Logger to log the error
message. If not specified, print function will be used.
"""
unexpected_keys = []
all_missing_keys = []
err_msg = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
# use _load_from_state_dict to enable checkpoint version control
def load(module, prefix=''):
# recursively check parallel module in case that the model has a
# complicated structure, e.g., nn.Module(nn.Module(DDP))
if is_module_wrapper(module):
module = module.module
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(state_dict, prefix, local_metadata, True,
all_missing_keys, unexpected_keys,
err_msg)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(module)
load = None # break load->load reference cycle
# ignore "num_batches_tracked" of BN layers
missing_keys = [
key for key in all_missing_keys if 'num_batches_tracked' not in key
]
if unexpected_keys:
err_msg.append('unexpected key in source '
f'state_dict: {", ".join(unexpected_keys)}\n')
if missing_keys:
err_msg.append(
f'missing keys in source state_dict: {", ".join(missing_keys)}\n')
rank, _ = get_dist_info()
if len(err_msg) > 0 and rank == 0:
err_msg.insert(
0, 'The model and loaded state dict do not match exactly\n')
err_msg = '\n'.join(err_msg)
if strict:
raise RuntimeError(err_msg)
elif logger is not None:
logger.warning(err_msg)
else:
print(err_msg)
def get_torchvision_models():
model_urls = dict()
for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__):
if ispkg:
continue
_zoo = import_module(f'torchvision.models.{name}')
if hasattr(_zoo, 'model_urls'):
_urls = getattr(_zoo, 'model_urls')
model_urls.update(_urls)
return model_urls
def get_external_models():
mmcv_home = _get_mmcv_home()
default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')
default_urls = load_file(default_json_path)
assert isinstance(default_urls, dict)
external_json_path = osp.join(mmcv_home, 'open_mmlab.json')
if osp.exists(external_json_path):
external_urls = load_file(external_json_path)
assert isinstance(external_urls, dict)
default_urls.update(external_urls)
return default_urls
def get_mmcls_models():
mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json')
mmcls_urls = load_file(mmcls_json_path)
return mmcls_urls
def get_deprecated_model_names():
deprecate_json_path = osp.join(mmcv.__path__[0],
'model_zoo/deprecated.json')
deprecate_urls = load_file(deprecate_json_path)
assert isinstance(deprecate_urls, dict)
return deprecate_urls
def _process_mmcls_checkpoint(checkpoint):
state_dict = checkpoint['state_dict']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if k.startswith('backbone.'):
new_state_dict[k[9:]] = v
new_checkpoint = dict(state_dict=new_state_dict)
return new_checkpoint
class CheckpointLoader:
"""A general checkpoint loader to manage all schemes."""
_schemes = {}
@classmethod
def _register_scheme(cls, prefixes, loader, force=False):
if isinstance(prefixes, str):
prefixes = [prefixes]
else:
assert isinstance(prefixes, (list, tuple))
for prefix in prefixes:
if (prefix not in cls._schemes) or force:
cls._schemes[prefix] = loader
else:
raise KeyError(
f'{prefix} is already registered as a loader backend, '
'add "force=True" if you want to override it')
# sort, longer prefixes take priority
cls._schemes = OrderedDict(
sorted(cls._schemes.items(), key=lambda t: t[0], reverse=True))
@classmethod
def register_scheme(cls, prefixes, loader=None, force=False):
"""Register a loader to CheckpointLoader.
This method can be used as a normal class method or a decorator.
Args:
prefixes (str or list[str] or tuple[str]):
The prefix of the registered loader.
loader (function, optional): The loader function to be registered.
When this method is used as a decorator, loader is None.
Defaults to None.
force (bool, optional): Whether to override the loader
if the prefix has already been registered. Defaults to False.
"""
if loader is not None:
cls._register_scheme(prefixes, loader, force=force)
return
def _register(loader_cls):
cls._register_scheme(prefixes, loader_cls, force=force)
return loader_cls
return _register
@classmethod
def _get_checkpoint_loader(cls, path):
"""Finds a loader that supports the given path. Falls back to the local
loader if no other loader is found.
Args:
path (str): checkpoint path
Returns:
loader (function): checkpoint loader
"""
for p in cls._schemes:
if path.startswith(p):
return cls._schemes[p]
@classmethod
def load_checkpoint(cls, filename, map_location=None, logger=None):
"""load checkpoint through URL scheme path.
Args:
filename (str): checkpoint file name with given prefix
map_location (str, optional): Same as :func:`torch.load`.
Default: None
logger (:mod:`logging.Logger`, optional): The logger for message.
Default: None
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint_loader = cls._get_checkpoint_loader(filename)
class_name = checkpoint_loader.__name__
mmcv.print_log(
f'load checkpoint from {class_name[10:]} path: {filename}', logger)
return checkpoint_loader(filename, map_location)
@CheckpointLoader.register_scheme(prefixes='')
def load_from_local(filename, map_location):
"""load checkpoint by local file path.
Args:
filename (str): local checkpoint file path
map_location (str, optional): Same as :func:`torch.load`.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
if not osp.isfile(filename):
raise IOError(f'{filename} is not a checkpoint file')
checkpoint = torch.load(filename, map_location=map_location)
return checkpoint
@CheckpointLoader.register_scheme(prefixes=('http://', 'https://'))
def load_from_http(filename, map_location=None, model_dir=None):
"""load checkpoint through HTTP or HTTPS scheme path. In distributed
setting, this function only download checkpoint at local rank 0.
Args:
filename (str): checkpoint file path with modelzoo or
torchvision prefix
map_location (str, optional): Same as :func:`torch.load`.
model_dir (string, optional): directory in which to save the object,
Default: None
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
rank, world_size = get_dist_info()
rank = int(os.environ.get('LOCAL_RANK', rank))
if rank == 0:
checkpoint = model_zoo.load_url(
filename, model_dir=model_dir, map_location=map_location)
if world_size > 1:
torch.distributed.barrier()
if rank > 0:
checkpoint = model_zoo.load_url(
filename, model_dir=model_dir, map_location=map_location)
return checkpoint
@CheckpointLoader.register_scheme(prefixes='pavi://')
def load_from_pavi(filename, map_location=None):
"""load checkpoint through the file path prefixed with pavi. In distributed
setting, this function download ckpt at all ranks to different temporary
directories.
Args:
filename (str): checkpoint file path with pavi prefix
map_location (str, optional): Same as :func:`torch.load`.
Default: None
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
assert filename.startswith('pavi://'), \
f'Expected filename startswith `pavi://`, but get {filename}'
model_path = filename[7:]
try:
from pavi import modelcloud
except ImportError:
raise ImportError(
'Please install pavi to load checkpoint from modelcloud.')
model = modelcloud.get(model_path)
with TemporaryDirectory() as tmp_dir:
downloaded_file = osp.join(tmp_dir, model.name)
model.download(downloaded_file)
checkpoint = torch.load(downloaded_file, map_location=map_location)
return checkpoint
@CheckpointLoader.register_scheme(prefixes='s3://')
def load_from_ceph(filename, map_location=None, backend='petrel'):
"""load checkpoint through the file path prefixed with s3. In distributed
setting, this function download ckpt at all ranks to different temporary
directories.
Args:
filename (str): checkpoint file path with s3 prefix
map_location (str, optional): Same as :func:`torch.load`.
backend (str, optional): The storage backend type. Options are 'ceph',
'petrel'. Default: 'petrel'.
.. warning::
:class:`mmcv.fileio.file_client.CephBackend` will be deprecated,
please use :class:`mmcv.fileio.file_client.PetrelBackend` instead.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
allowed_backends = ['ceph', 'petrel']
if backend not in allowed_backends:
raise ValueError(f'Load from Backend {backend} is not supported.')
if backend == 'ceph':
warnings.warn(
'CephBackend will be deprecated, please use PetrelBackend instead')
# CephClient and PetrelBackend have the same prefix 's3://' and the latter
# will be chosen as default. If PetrelBackend can not be instantiated
# successfully, the CephClient will be chosen.
try:
file_client = FileClient(backend=backend)
except ImportError:
allowed_backends.remove(backend)
file_client = FileClient(backend=allowed_backends[0])
with io.BytesIO(file_client.get(filename)) as buffer:
checkpoint = torch.load(buffer, map_location=map_location)
return checkpoint
@CheckpointLoader.register_scheme(prefixes=('modelzoo://', 'torchvision://'))
def load_from_torchvision(filename, map_location=None):
"""load checkpoint through the file path prefixed with modelzoo or
torchvision.
Args:
filename (str): checkpoint file path with modelzoo or
torchvision prefix
map_location (str, optional): Same as :func:`torch.load`.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
model_urls = get_torchvision_models()
if filename.startswith('modelzoo://'):
warnings.warn('The URL scheme of "modelzoo://" is deprecated, please '
'use "torchvision://" instead')
model_name = filename[11:]
else:
model_name = filename[14:]
return load_from_http(model_urls[model_name], map_location=map_location)
@CheckpointLoader.register_scheme(prefixes=('open-mmlab://', 'openmmlab://'))
def load_from_openmmlab(filename, map_location=None):
"""load checkpoint through the file path prefixed with open-mmlab or
openmmlab.
Args:
filename (str): checkpoint file path with open-mmlab or
openmmlab prefix
map_location (str, optional): Same as :func:`torch.load`.
Default: None
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
model_urls = get_external_models()
prefix_str = 'open-mmlab://'
if filename.startswith(prefix_str):
model_name = filename[13:]
else:
model_name = filename[12:]
prefix_str = 'openmmlab://'
deprecated_urls = get_deprecated_model_names()
if model_name in deprecated_urls:
warnings.warn(f'{prefix_str}{model_name} is deprecated in favor '
f'of {prefix_str}{deprecated_urls[model_name]}')
model_name = deprecated_urls[model_name]
model_url = model_urls[model_name]
# check if is url
if model_url.startswith(('http://', 'https://')):
checkpoint = load_from_http(model_url, map_location=map_location)
else:
filename = osp.join(_get_mmcv_home(), model_url)
if not osp.isfile(filename):
raise IOError(f'{filename} is not a checkpoint file')
checkpoint = torch.load(filename, map_location=map_location)
return checkpoint
@CheckpointLoader.register_scheme(prefixes='mmcls://')
def load_from_mmcls(filename, map_location=None):
"""load checkpoint through the file path prefixed with mmcls.
Args:
filename (str): checkpoint file path with mmcls prefix
map_location (str, optional): Same as :func:`torch.load`.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
model_urls = get_mmcls_models()
model_name = filename[8:]
checkpoint = load_from_http(
model_urls[model_name], map_location=map_location)
checkpoint = _process_mmcls_checkpoint(checkpoint)
return checkpoint
def _load_checkpoint(filename, map_location=None, logger=None):
"""Load checkpoint from somewhere (modelzoo, file, url).
Args:
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str, optional): Same as :func:`torch.load`.
Default: None.
logger (:mod:`logging.Logger`, optional): The logger for error message.
Default: None
Returns:
dict or OrderedDict: The loaded checkpoint. It can be either an
OrderedDict storing model weights or a dict containing other
information, which depends on the checkpoint.
"""
return CheckpointLoader.load_checkpoint(filename, map_location, logger)
def _load_checkpoint_with_prefix(prefix, filename, map_location=None):
"""Load partial pretrained model with specific prefix.
Args:
prefix (str): The prefix of sub-module.
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str | None): Same as :func:`torch.load`. Default: None.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint = _load_checkpoint(filename, map_location=map_location)
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
if not prefix.endswith('.'):
prefix += '.'
prefix_len = len(prefix)
state_dict = {
k[prefix_len:]: v
for k, v in state_dict.items() if k.startswith(prefix)
}
assert state_dict, f'{prefix} is not in the pretrained model'
return state_dict
def load_checkpoint(model,
filename,
map_location=None,
strict=False,
logger=None,
revise_keys=[(r'^module\.', '')]):
"""Load checkpoint from a file or URI.
Args:
model (Module): Module to load checkpoint.
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str): Same as :func:`torch.load`.
strict (bool): Whether to allow different params for the model and
checkpoint.
logger (:mod:`logging.Logger` or None): The logger for error message.
revise_keys (list): A list of customized keywords to modify the
state_dict in checkpoint. Each item is a (pattern, replacement)
pair of the regular expression operations. Default: strip
the prefix 'module.' by [(r'^module\\.', '')].
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint = _load_checkpoint(filename, map_location, logger)
# OrderedDict is a subclass of dict
if not isinstance(checkpoint, dict):
raise RuntimeError(
f'No state_dict found in checkpoint file {filename}')
# get state_dict from checkpoint
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
# strip prefix of state_dict
metadata = getattr(state_dict, '_metadata', OrderedDict())
for p, r in revise_keys:
state_dict = OrderedDict(
{re.sub(p, r, k): v
for k, v in state_dict.items()})
# Keep metadata in state_dict
state_dict._metadata = metadata
# load state_dict
load_state_dict(model, state_dict, strict, logger)
return checkpoint
def weights_to_cpu(state_dict):
"""Copy a model state_dict to cpu.
Args:
state_dict (OrderedDict): Model weights on GPU.
Returns:
OrderedDict: Model weights on GPU.
"""
state_dict_cpu = OrderedDict()
for key, val in state_dict.items():
state_dict_cpu[key] = val.cpu()
# Keep metadata in state_dict
state_dict_cpu._metadata = getattr(state_dict, '_metadata', OrderedDict())
return state_dict_cpu
def _save_to_state_dict(module, destination, prefix, keep_vars):
"""Saves module state to `destination` dictionary.
This method is modified from :meth:`torch.nn.Module._save_to_state_dict`.
Args:
module (nn.Module): The module to generate state_dict.
destination (dict): A dict where state will be stored.
prefix (str): The prefix for parameters and buffers used in this
module.
"""
for name, param in module._parameters.items():
if param is not None:
destination[prefix + name] = param if keep_vars else param.detach()
for name, buf in module._buffers.items():
# remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d
if buf is not None:
destination[prefix + name] = buf if keep_vars else buf.detach()
def get_state_dict(module, destination=None, prefix='', keep_vars=False):
"""Returns a dictionary containing a whole state of the module.
Both parameters and persistent buffers (e.g. running averages) are
included. Keys are corresponding parameter and buffer names.
This method is modified from :meth:`torch.nn.Module.state_dict` to
recursively check parallel module in case that the model has a complicated
structure, e.g., nn.Module(nn.Module(DDP)).
Args:
module (nn.Module): The module to generate state_dict.
destination (OrderedDict): Returned dict for the state of the
module.
prefix (str): Prefix of the key.
keep_vars (bool): Whether to keep the variable property of the
parameters. Default: False.
Returns:
dict: A dictionary containing a whole state of the module.
"""
# recursively check parallel module in case that the model has a
# complicated structure, e.g., nn.Module(nn.Module(DDP))
if is_module_wrapper(module):
module = module.module
# below is the same as torch.nn.Module.state_dict()
if destination is None:
destination = OrderedDict()
destination._metadata = OrderedDict()
destination._metadata[prefix[:-1]] = local_metadata = dict(
version=module._version)
_save_to_state_dict(module, destination, prefix, keep_vars)
for name, child in module._modules.items():
if child is not None:
get_state_dict(
child, destination, prefix + name + '.', keep_vars=keep_vars)
for hook in module._state_dict_hooks.values():
hook_result = hook(module, destination, prefix, local_metadata)
if hook_result is not None:
destination = hook_result
return destination
def save_checkpoint(model,
filename,
optimizer=None,
meta=None,
file_client_args=None):
"""Save checkpoint to file.
The checkpoint will have 3 fields: ``meta``, ``state_dict`` and
``optimizer``. By default ``meta`` will contain version and time info.
Args:
model (Module): Module whose params are to be saved.
filename (str): Checkpoint filename.
optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
meta (dict, optional): Metadata to be saved in checkpoint.
file_client_args (dict, optional): Arguments to instantiate a
FileClient. See :class:`mmcv.fileio.FileClient` for details.
Default: None.
`New in version 1.3.16.`
"""
if meta is None:
meta = {}
elif not isinstance(meta, dict):
raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
if is_module_wrapper(model):
model = model.module
if hasattr(model, 'CLASSES') and model.CLASSES is not None:
# save class name to the meta
meta.update(CLASSES=model.CLASSES)
checkpoint = {
'meta': meta,
'state_dict': weights_to_cpu(get_state_dict(model))
}
# save optimizer state dict in the checkpoint
if isinstance(optimizer, Optimizer):
checkpoint['optimizer'] = optimizer.state_dict()
elif isinstance(optimizer, dict):
checkpoint['optimizer'] = {}
for name, optim in optimizer.items():
checkpoint['optimizer'][name] = optim.state_dict()
if filename.startswith('pavi://'):
if file_client_args is not None:
raise ValueError(
'file_client_args should be "None" if filename starts with'
f'"pavi://", but got {file_client_args}')
try:
from pavi import modelcloud
from pavi import exception
except ImportError:
raise ImportError(
'Please install pavi to load checkpoint from modelcloud.')
model_path = filename[7:]
root = modelcloud.Folder()
model_dir, model_name = osp.split(model_path)
try:
model = modelcloud.get(model_dir)
except exception.NodeNotFoundError:
model = root.create_training_model(model_dir)
with TemporaryDirectory() as tmp_dir:
checkpoint_file = osp.join(tmp_dir, model_name)
with open(checkpoint_file, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
model.create_file(checkpoint_file, name=model_name)
else:
file_client = FileClient.infer_client(file_client_args, filename)
with io.BytesIO() as f:
torch.save(checkpoint, f)
file_client.put(f.getvalue(), filename)
|
import logging
import os
from argparse import ArgumentParser
from pathlib import Path
from brbanks2ynab.config.initialize import init_config
from brbanks2ynab.sync.sync import sync
def _default_config_path():
return os.path.join(os.getcwd(), 'brbanks2ynab.json')
def main():
logging.basicConfig()
logger = logging.getLogger('brbanks2ynab')
parser = ArgumentParser(description='Importador de transações de bancos brasileiros para o YNAB')
parser.add_argument('--debug', action='store_true')
subparsers = parser.add_subparsers(dest='cmd')
sync_parser = subparsers.add_parser('sync')
sync_parser.add_argument('--config', default=_default_config_path())
configure_parser = subparsers.add_parser('configure')
result = parser.parse_args()
if result.debug:
logger.setLevel(logging.DEBUG)
if result.cmd == 'configure':
init_config()
elif result.cmd == 'sync':
path = Path(result.config)
sync(path)
if __name__ == '__main__':
main()
|
import requests
from bs4 import BeautifulSoup as bsoup
import db_checker as dbchk
def get_response(url,site_tag):
aggr_urls=[]
agent = {"User-Agent":'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'}
site_req_url = requests.get(url, headers=agent)
site_req_html= bsoup(site_req_url.content,'html.parser')
if site_tag=="N7P":
links=site_req_html.find_all('a', href=True, text=True,rel=True)
for lk in links:
for i in lk['rel']:
if i=='bookmark':
aggr_urls.append(lk.string)
aggr_urls.append(lk['href'])
if site_tag=="W10":
for i in site_req_html.select('article[class*="mspuloop"]'):
lk=i.find_all('a',href=True,title=True)[0]
aggr_urls.append(lk['title'])
aggr_urls.append(lk['href'])
aggr_urls.reverse()
return(aggr_urls)
if __name__ == "__main__":
main_url=[("https://nokiapoweruser.com/tag/nokia-7-plus/","N7P"),
("https://mspoweruser.com/tag/windows-10/","W10")]
for i in main_url:
rslt=get_response(i[0],i[1])
dbchk.db_chk(rslt)
|
#################################################################
# MET v2 Metadate Explorer Tool
#
# This Software is Open Source. See License: https://github.com/TERENA/met/blob/master/LICENSE.md
# Copyright (c) 2012, TERENA All rights reserved.
#
# This Software is based on MET v1 developed for TERENA by Yaco Sistemas, http://www.yaco.es/
# MET v2 was developed for TERENA by Tamim Ziai, DAASI International GmbH, http://www.daasi.de
# Current version of MET has been revised for performance improvements by Andrea Biancini,
# Consortium GARR, http://www.garr.it
#########################################################################################
from setuptools import setup, find_packages
import sys, os
version = '0.1'
setup(name='met',
version=version,
description="Metadata Explorer Tool",
long_description="""Terena - Metadata Explorer Tool""",
classifiers=[
' Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='Terena',
author_email='',
url='https://github.com/TERENA/met',
license='BSD License',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'Django>=1.7',
'MySQL-python',
'lxml>=2.3.5',
'PIL',
'requests>=1.0.0',
'djangosaml2>=0.9.0',
'django-pagination==1.0.7',
'django-chartit',
'python-memcached==1.48',
'simplejson',
'django-mysqlpool',
'django-silk',
'pyff'
],
entry_points="""
# -*- Entry points: -*-
""",
)
|
from jsog3 import jsog
import unittest
class TestJSOG(unittest.TestCase):
def test_encode_reference(self):
inner = { "foo": "bar" }
outer = { "inner1": inner, "inner2": inner }
encoded = jsog.encode(outer)
# Python 3.7+ ensures fields are always processed in order,
# however contents of inner1 and inner2 might be swapped in
# older releases. Do we care?
self.assertEqual(encoded, {
"inner1": {
"@id": "1",
"foo": "bar",
},
'inner2': { '@ref': '1' }
})
def test_decode_reference(self):
JSOGIFIED = '{"@id":"1","foo":"foo","inner1":{"@id":"2","bar":"bar"},"inner2":{"@ref":"2"}}'
parsed = jsog.loads(JSOGIFIED)
inner1 = parsed['inner1']
inner2 = parsed['inner2']
self.assertTrue(inner1 is inner2)
def test_encode_circular(self):
thing = {}
thing['me'] = thing
thing['list'] = [thing]
encoded = jsog.encode(thing)
self.assertEqual(encoded, {
'@id': '1',
'me': { '@ref': '1' },
'list': [ { '@ref': '1' } ],
})
def test_decode_circular(self):
thing = {}
thing['me'] = thing
thing['list'] = [thing]
encoded = jsog.encode(thing)
back = jsog.decode(encoded)
self.assertFalse('@id' in back)
self.assertTrue(back['me'] is back)
self.assertTrue(back['list'][0] is back)
def test_encode_null(self):
encoded = jsog.encode(None)
self.assertEqual(encoded, None)
def test_decode_null(self):
decoded = jsog.decode(None)
self.assertEqual(decoded, None)
def test_decode_plain_json(self):
json = { "foo": "bar" }
decoded = jsog.decode(json)
self.assertEqual(json, decoded)
def test_decode_list_reference(self):
JSOGIFIED = '{"@id":"1","foo":"foo","inner1":{"@id":"2","bar":"bar"},"inner2":[{"@ref":"2"}]}'
parsed = jsog.loads(JSOGIFIED)
inner1 = parsed['inner1']
inner2 = parsed['inner2'][0]
self.assertTrue(inner1 is inner2)
def test_decode_missing_id(self):
with self.assertRaises(ValueError):
json = { "foo": { "@ref": "1" }, "bar": { "@ref": "1" } }
jsog.decode(json)
def test_decode_duplicate_id(self):
with self.assertRaises(ValueError):
json = { "foo": { "@id": "1" }, "bar": { "@id": "1" } }
jsog.decode(json)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
import logging
import datetime
from pythonjsonlogger import jsonlogger
class CustomJsonFormatter(jsonlogger.JsonFormatter):
def add_fields(self, log_record, record, message_dict):
dt_now = datetime.datetime.now()
super().add_fields(log_record, record, message_dict)
log_record['level'] = record.levelname
log_record['cursor'] = f'{record.pathname}#L{record.lineno}'
log_record['timestamp'] = dt_now.strftime('%Y/%m/%d %H:%M:%S')
def init_logger():
handler = logging.StreamHandler()
formatter = CustomJsonFormatter()
handler.setFormatter(formatter)
root = logging.getLogger()
# aion のハンドラを除去
root.handlers.clear()
root.addHandler(handler)
root.setLevel(logging.DEBUG)
|
prompt= "\nPor favor escriba su edad: "
prompt+= "\nEscriba '-1' cuando haya terminado"
while True:
edad= int(input(prompt))
if edad == -1:
break
elif edad < 3 and edad!=0 :
print("Su entrada es gratuita")
elif edad >= 3 and edad <12:
print("Son $10 por favor")
elif edad >=12:
print("Son $15 por favor")
elif edad == 0:
print("Edad no valida, intente de nuevo")
|
import math
import torch.nn as nn
import torch
import numpy as np
eps = float(np.finfo(np.float32).eps)
class PredictionLoss(nn.Module):
def __init__(self, batch_size, seq_len):
super(PredictionLoss, self).__init__()
self.batch_size = batch_size
self.seq_len = seq_len
@staticmethod
def gaussian_2d(x1, x2, mu1, mu2, s1, s2, rho):
norm1 = x1 - mu1
norm2 = x2 - mu2
sigma1sigma2 = s1 * s2
z = (norm1 / s1) ** 2 + (norm2 / s2) ** 2 - 2 * rho * norm1 * norm2 / sigma1sigma2
numerator = torch.exp(-z / (2 * (1 - rho ** 2)))
denominator = 2 * math.pi * sigma1sigma2 * torch.sqrt(1 - rho ** 2)
gaussian = numerator / denominator
return gaussian
def forward(self, output, target):
eos, pi, mu1, mu2, sigma1, sigma2, rho = output
x_1, x_2, x_eos = torch.chunk(target.view(-1, 3).contiguous(), chunks=3, dim=1)
gaussian = self.gaussian_2d(x_1, x_2, mu1, mu2, sigma1, sigma2, rho)
loss_gaussian = -torch.log(torch.sum(pi * gaussian, dim=1, keepdim=True) + eps)
loss_bernoulli = -torch.log(eos * x_eos + (1 - eos) * (1 - x_eos))
loss = torch.sum(loss_gaussian + loss_bernoulli)
return loss / (self.batch_size * self.seq_len)
|
import grpc
import grpc_testing
import pytest
import random
from pymilvus.grpc_gen import milvus_pb2, schema_pb2, common_pb2
from pymilvus import Milvus, DataType
class Fields:
class NormalizedField:
def __init__(self, **kwargs):
self.name = kwargs.get("name", None)
self.is_primary_key = kwargs.get("is_primary_key", False)
self.data_type = kwargs.get("data_type", None)
self.type_params = kwargs.get("type_params", dict())
self.autoID = kwargs.get("autoID", False)
def __eq__(self, other):
if isinstance(other, Fields.NormalizedField):
return self.name == other.name and \
self.is_primary_key == other.is_primary_key and \
self.data_type == other.data_type and \
self.type_params == other.type_params and \
self.autoID == other.autoID
return False
def __repr__(self):
dump = f"(name: {self.name}"
dump += f", id_primary_key:{self.is_primary_key}"
dump += f", data_type:{self.data_type}"
dump += f", type_params:{self.type_params}"
dump += f", autoID:{self.autoID})"
return dump
@classmethod
def equal(cls, grpc_fields, dict_fields):
n_grpc_fields = {
field.name: Fields.NormalizedField(name=field.name,
is_primary_key=field.is_primary_key,
data_type=field.data_type,
type_params={pair.key: pair.value for pair in field.type_params},
autoID=field.autoID
)
for field in grpc_fields}
n_dict_fields = {
field["name"]: Fields.NormalizedField(name=field["name"],
is_primary_key=field.get("is_primary", False),
data_type=field["type"],
type_params=field.get("params", dict()),
autoID=field.get("auto_id", False)
)
for field in dict_fields
}
return n_grpc_fields == n_dict_fields
class TestCreateCollection:
@pytest.fixture(scope="function")
def collection_name(self):
return f"test_collection_{random.randint(100000, 999999)}"
def setup(self) -> None:
self._real_time = grpc_testing.strict_real_time()
self._real_time_channel = grpc_testing.channel(
milvus_pb2.DESCRIPTOR.services_by_name.values(), self._real_time)
self._servicer = milvus_pb2.DESCRIPTOR.services_by_name['MilvusService']
self._milvus = Milvus(channel=self._real_time_channel, try_connect=False, pre_ping=False)
def teardown(self) -> None:
pass
def test_create_collection(self, collection_name):
id_field = {
"name": "my_id",
"type": DataType.INT64,
"auto_id": True,
"is_primary": True,
}
vector_field = {
"name": "embedding",
"type": DataType.FLOAT_VECTOR,
"metric_type": "L2",
"params": {"dim": "4"},
}
fields = {"fields": [id_field, vector_field]}
future = self._milvus.create_collection(collection_name=collection_name, fields=fields, _async=True)
invocation_metadata, request, rpc = self._real_time_channel.take_unary_unary(
self._servicer.methods_by_name['CreateCollection']
)
rpc.send_initial_metadata(())
rpc.terminate(common_pb2.Status(error_code=common_pb2.Success, reason="success"), (), grpc.StatusCode.OK, '')
request_schema = schema_pb2.CollectionSchema()
request_schema.ParseFromString(request.schema)
assert request.collection_name == collection_name
assert Fields.equal(request_schema.fields, fields["fields"])
return_value = future.result()
assert return_value.error_code == common_pb2.Success
assert return_value.reason == "success"
|
import torch
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
EPSILON = np.finfo(float).eps
def concrete_neuron(logit_p, testing=False, temp=1.0 / 10.0, **kwargs):
'''
Use concrete distribution to approximate binary output. Here input is logit(keep_prob).
'''
if testing:
result = logit_p.data.new().resize_as_(logit_p.data).fill_(1.)
result[logit_p.data < 0.] = 0.
return Variable(result)
# Note that p is the retain probability here
p = torch.sigmoid(logit_p)
unif_noise = Variable(logit_p.data.new().resize_as_(logit_p.data).uniform_())
approx = (
torch.log(1. - p + EPSILON)
- torch.log(p + EPSILON)
+ torch.log(unif_noise + EPSILON)
- torch.log(1. - unif_noise + EPSILON)
)
drop_prob = torch.sigmoid(approx / temp)
return (1. - drop_prob)
def concrete_dropout_neuron(dropout_p, temp=1.0 / 10.0, **kwargs):
'''
Use concrete distribution to approximate binary output. Here input is logit(dropout_prob).
'''
# Note that p is the dropout probability here
unif_noise = Variable(dropout_p.data.new().resize_as_(dropout_p.data).uniform_())
approx = (
torch.log(dropout_p + EPSILON)
- torch.log(1. - dropout_p + EPSILON)
+ torch.log(unif_noise + EPSILON)
- torch.log(1. - unif_noise + EPSILON)
)
approx_output = torch.sigmoid(approx / temp)
return 1 - approx_output
def multiclass_concrete_neuron(log_alpha, temp=0.1, **kwargs):
'''
Use concrete distribution to approximate multiclass output.
:param log_alpha: np array [N, nclass]
:return: Sample value: np array [N, nclass]
'''
# Note that p is the dropout probability here
alpha = torch.exp(log_alpha)
uniform = Variable(log_alpha.data.new().resize_as_(log_alpha.data).uniform_())
gumbel = - torch.log(- torch.log(uniform + EPSILON) + EPSILON)
logit = (torch.log(alpha + EPSILON) + gumbel) / temp
return F.softmax(logit)
if __name__ == '__main__':
def test_val(p_val):
p_tensor = p_val * torch.ones(1)
logit_p = Variable(torch.log(p_tensor) - torch.log(1 - p_tensor))
arr = [concrete_neuron(logit_p).data[0] for i in range(100)]
print('retain prob:', p_val, 'Average over 100:', np.mean(arr))
# Keep probability is 0.5
test_val(0.5)
test_val(0.1)
test_val(0.9)
def test_val(p_val):
p_tensor = p_val * torch.ones(1)
logit_p = Variable(torch.log(p_tensor) - torch.log(1 - p_tensor))
arr = [concrete_dropout_neuron(logit_p).data[0] for i in range(100)]
print('dropout prob:', p_val, 'Average over 100:', np.mean(arr))
print(arr[0])
test_val(0.5)
test_val(0.1)
test_val(0.9)
log_alpha = Variable(torch.ones(1, 4))
print(multiclass_concrete_neuron(log_alpha))
log_alpha[0, :2] = 5
print(multiclass_concrete_neuron(log_alpha))
|
import logging
import os
from pathlib import Path
from typing import Optional, cast
from flask import Flask, Response, render_template, request, send_from_directory
from quiz_bot import db
from quiz_bot.admin.cloud import CloudMaker
from quiz_bot.admin.flask import get_flask_app
from quiz_bot.admin.statistics import StatisticsCollector
logger = logging.getLogger(__name__)
def quizbot_app(cloud_maker: CloudMaker, statistics_collector: StatisticsCollector) -> Flask:
admin_folder = Path(__file__).parent
template_folder = admin_folder / "templates"
static_folder = admin_folder / "files"
if not static_folder.exists():
os.makedirs(static_folder.as_posix())
flask_app = get_flask_app(template_folder.as_posix())
@flask_app.teardown_request
def remove_session(exception: Optional[Exception]) -> None:
db.current_session.remove()
@flask_app.route('/')
def index() -> str:
picture_name = cloud_maker.save_cloud(static_folder)
return render_template(
"index.html",
page_name="T-Quiz Bot Overview",
picture_name=picture_name,
statistics=statistics_collector.statistics,
)
@flask_app.route('/files/<path:file>')
def get_file(file: str) -> Response:
return cast(Response, send_from_directory(static_folder.as_posix(), file))
@flask_app.route('/left_time')
def get_left_time() -> Response:
challenge_id = request.args.get('challenge')
if challenge_id is not None:
return Response(statistics_collector.get_left_time(challenge_id))
logger.warning("Got request '/left_time' without 'challenge' parameter!")
return Response(None)
return flask_app # noqa: R504
|
import numpy as np
def plot_scatter_basic(viz, env, args):
title = args[0] if len(args) > 0 else None
Y = np.random.rand(100)
return viz.scatter(
X=np.random.rand(100, 2),
Y=(Y[Y > 0] + 1.5).astype(int),
opts=dict(
legend=['Didnt', 'Update'],
xtickmin=-50,
xtickmax=50,
xtickstep=0.5,
ytickmin=-50,
ytickmax=50,
ytickstep=0.5,
markersymbol='cross-thin-open',
title=title
),
env=env
)
def plot_scatter_update_opts(viz, env, args):
old_scatter = plot_scatter_basic(viz, env, args)
viz.update_window_opts(
win=old_scatter,
opts=dict(
legend=['Apples', 'Pears'],
xtickmin=0,
xtickmax=1,
xtickstep=0.5,
ytickmin=0,
ytickmax=1,
ytickstep=0.5,
markersymbol='cross-thin-open',
),
env=env
)
# scatter plot example with various type of updates
def plot_scatter_append(viz, env, args):
title = args[0] if len(args) > 0 else None
colors = np.random.randint(0, 255, (2, 3,))
win = viz.scatter(
X=np.random.rand(255, 2),
Y=(np.random.rand(255) + 1.5).astype(int),
opts=dict(
markersize=10,
markercolor=colors,
legend=['1', '2'],
title=title
),
env=env
)
viz.scatter(
X=np.random.rand(255),
Y=np.random.rand(255),
opts=dict(
markersize=10,
markercolor=colors[0].reshape(-1, 3),
),
name='1',
update='append',
env=env,
win=win)
viz.scatter(
X=np.random.rand(255, 2),
Y=(np.random.rand(255) + 1.5).astype(int),
opts=dict(
markersize=10,
markercolor=colors,
),
update='append',
env=env,
win=win)
# 3d scatterplot with custom labels and ranges
def plot_scatter_3d(viz, env, args):
title = args[0] if len(args) > 0 else None
Y = np.random.rand(100)
viz.scatter(
X=np.random.rand(100, 3),
Y=(Y + 1.5).astype(int),
opts=dict(
legend=['Men', 'Women'],
markersize=5,
xtickmin=0,
xtickmax=2,
xlabel='Arbitrary',
xtickvals=[0, 0.75, 1.6, 2],
ytickmin=0,
ytickmax=2,
ytickstep=0.5,
ztickmin=0,
ztickmax=1,
ztickstep=0.5,
title=title
),
env=env
)
# 2D scatterplot with custom intensities (red channel)
def plot_scatter_custom_marker(viz, env, args):
title = args[0] if len(args) > 0 else None
viz.scatter(
X=np.random.rand(255, 2),
Y=(np.random.rand(255) + 1.5).astype(int),
opts=dict(
markersize=10,
markercolor=np.random.randint(0, 255, (2, 3,)),
title=title
),
env=env
)
# 2D scatter plot with custom colors per label:
def plot_scatter_custom_colors(viz, env, args):
title = args[0] if len(args) > 0 else None
viz.scatter(
X=np.random.rand(255, 2),
Y=(np.random.randn(255) > 0) + 1,
opts=dict(
markersize=10,
markercolor=np.floor(np.random.random((2, 3)) * 255),
markerborderwidth=0,
title=title
),
env=env
)
def plot_scatter_add_trace(viz, env, args):
title = args[0] if len(args) > 0 else None
win = viz.scatter(
X=np.random.rand(255, 2),
opts=dict(
markersize=10,
markercolor=np.random.randint(0, 255, (255, 3,)),
title=title
),
env=env
)
# assert that the window exists
assert viz.win_exists(win, env=env), 'Created window marked as not existing'
# add new trace to scatter plot
viz.scatter(
X=np.random.rand(255),
Y=np.random.rand(255),
win=win,
name='new_trace',
update='new',
env=env
)
# 1D scatter plot with text labels:
def plot_scatter_text_labels_1d(viz, env, args):
title = args[0] if len(args) > 0 else None
viz.scatter(
X=np.random.rand(10, 2),
opts=dict(
textlabels=['Label %d' % (i + 1) for i in range(10)],
title=title
),
env=env
)
# 2D scatter plot with text labels:
def plot_scatter_text_labels_2d(viz, env, args):
title = args[0] if len(args) > 0 else None
viz.scatter(
X=np.random.rand(10, 2),
Y=[1] * 5 + [2] * 3 + [3] * 2,
opts=dict(
legend=['A', 'B', 'C'],
textlabels=['Label %d' % (i + 1) for i in range(10)],
title=title
),
env=env
)
|
import fractions
N = int( input() )
radius = list( map( int, input().split(' ') ) )
for i in range( 1, N ):
if( radius[0] % radius[i] == 0 ):
print( '%d/1 ' %( radius[0] // radius[i] ) )
else:
print( '%s ' %( fractions.Fraction( radius[0], radius[i] ) ) )
|
from ....extensions import ExtensionMixin
from ...flarum.core.discussions import DiscussionFromBulk
class RealtimeDiscussionMixin(DiscussionFromBulk):
@property
def canViewWhoTypes(self) -> bool:
"""
Whether or not you can view who is typing in real time.
"""
return self.attributes.get("canViewWhoTypes", False)
class RealtimeExtension(ExtensionMixin):
"""
https://extiverse.com/extension/blomstra/realtime
"""
AUTHOR = 'blomstra'
NAME = 'realtime'
@classmethod
def mixin(cls):
super().mixin(DiscussionFromBulk, RealtimeDiscussionMixin)
|
from .sql import SqlBackend
__all__ = ('MySqlBackend',)
class MySqlBackend(SqlBackend):
name = 'MySQL'
reference_quote = '`'
supports_on_duplicate_key_update = True
|
# Display Module for Shmup
# Import Modules
import pygame
import consts
from os import path
# Load Background Graphics
background_img = pygame.image.load(path.join(consts.img_dir, 'background.png')).convert()
background = pygame.transform.scale(background_img, (consts.GAME_WIDTH, consts.GAME_HEIGHT))
background_rect = background.get_rect()
def draw_background(screen):
screen.blit(background, background_rect)
|
# Generated by Django 2.1.12 on 2020-02-29 17:34
import ckeditor.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('approved', models.BooleanField(default=True)),
('category', models.CharField(choices=[('admin', 'Admin'), ('user', 'User')], default='user', max_length=30)),
('age', models.IntegerField(blank=True, null=True, verbose_name='age')),
('notes', models.TextField(blank=True)),
('description_explicit', models.TextField(blank=True)),
('description_implicit', ckeditor.fields.RichTextField(blank=True)),
('description_toolbar', models.TextField(blank=True)),
],
),
]
|
# -*- coding: utf-8 -*-
"""NEE_seq2seq.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1h9MOT9VFNffvSPgi7OBHxKQteIk0TvXf
"""
# Commented out IPython magic to ensure Python compatibility.
# %matplotlib inline
# -*- coding: utf-8 -*-
"""
Code referenced from https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html#sphx-glr-intermediate-seq2seq-translation-tutorial-py
"""
import pandas as pd
import torch
import torch.nn as nn
import gensim
import nltk
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import torch.optim as optim
import random
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Device = {}".format(device))
MAX_LENGTH = 40
print("MAX SEQ LEN = " + str(MAX_LENGTH))
url='https://raw.githubusercontent.com/sdp009/Sarcasm_Target_NEE_S2S/main/dataset/train_raw.csv'
print("Loading dataset ...")
train_data=pd.read_csv(url)
row_index=[i for i in range(0,train_data.shape[0])]
print(train_data.head())
# print(row_index)
SOS_token = 0
EOS_token = 1
class Lang:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2 # Count SOS and EOS
def addSentence(self, sentence):
for word in sentence:
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size):
super(EncoderRNN, self).__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(input_size, hidden_size)
self.lstm = nn.LSTM(hidden_size, hidden_size)
def forward(self, input, hidden):
embedded = self.embedding(input).view(1, 1, -1)
output = embedded
output, (hidden,c_n) = self.lstm(output,(hidden,hidden))
return output, hidden
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.dropout_p = dropout_p
self.max_length = max_length
self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.lstm = nn.LSTM(self.hidden_size, self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input, hidden, encoder_outputs):
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
attn_weights = F.softmax(
self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
attn_applied = torch.bmm(attn_weights.unsqueeze(0),
encoder_outputs.unsqueeze(0))
output = torch.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
output = F.relu(output)
output, (hidden,c_n) = self.lstm(output, (hidden,hidden))
output = F.log_softmax(self.out(output[0]), dim=1)
return output, hidden, attn_weights
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
def preprocessor(data):
"""
Tokenizing the sentences using regular expressions and NLTK library
Input
text: list of descriptions
Output:
alphabet_tokens: list of tokens
"""
__tokenization_pattern = r'''(?x) # set flag to allow verbose regexps
\$?\d+(?:\.\d+)?%? # currency and percentages, e.g. $12.40, 82%
| (?:[A-Z]\.)+ # abbreviations, e.g. U.S.A.
| \w+(?:-\w+)* # words with optional internal hyphens
| \.\.\. # ellipsis
| [][.,;"'?():_`-] # these are separate tokens; includes ], [
'''
## call it using tokenizer.tokenize
tokenizer = nltk.tokenize.regexp.RegexpTokenizer(__tokenization_pattern)
tokens = tokenizer.tokenize(data)
tokens=[token.lower() for token in tokens if token.isalpha()]
alphabet_tokens = [token for token in tokens if token.isalpha()]
#en_stopwords = set(nltk.corpus.stopwords.words('english'))
#non_stopwords = [word for word in alphabet_tokens if not word in en_stopwords]
#stemmer = nltk.stem.snowball.SnowballStemmer("english")
#stems = [str(stemmer.stem(word)) for word in non_stopwords]
return alphabet_tokens
import time
import math
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
def prepareData(lang1, lang2,train_data):
input_lang=Lang(lang1)
output_lang=Lang(lang2)
for index,row in train_data.iterrows():
input_lang.addSentence(row['Sarcastic Comment'])
output_lang.addSentence(row['Target'])
print(input_lang.name, input_lang.n_words)
print(output_lang.name, output_lang.n_words)
return input_lang, output_lang
def indexesFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence]
def tensorFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
def tensorsFromPair(row):
input_tensor = tensorFromSentence(input_lang, row['Sarcastic Comment'])
target_tensor = tensorFromSentence(output_lang, row['Target'])
return (input_tensor, target_tensor)
def train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH):
encoder_hidden = encoder.initHidden()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
input_length = input_tensor.size(0)
target_length = target_tensor.size(0)
encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
loss = 0
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(
input_tensor[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0, 0]
decoder_input = torch.tensor([[SOS_token]], device=device)
decoder_hidden = encoder_hidden
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
topv, topi = decoder_output.topk(1)
decoder_input = topi.squeeze().detach() # detach from history as input
loss += criterion(decoder_output, target_tensor[di])
if decoder_input.item() == EOS_token:
break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return loss.item() / target_length
# Commented out IPython magic to ensure Python compatibility.
# %matplotlib inline
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib.ticker as ticker
import numpy as np
def showPlot(points):
plt.figure()
fig, ax = plt.subplots()
# this locator puts ticks at regular intervals
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
plt.show()
def trainIters(encoder, decoder, n_iters, print_every=500, plot_every=500, learning_rate=0.01):
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
sample_input = train_data.iloc[random.choice(row_index)]
print("\nSample Input = {}\n\n".format(sample_input))
print("\n========================================\n")
training_pairs = [tensorsFromPair(train_data.loc[random.choice(row_index)])
for i in range(n_iters)]
criterion = nn.NLLLoss()
print('Since :: Remaining :: % Complete :: AVG_LOSS')
for iter in range(1, n_iters + 1):
training_pair = training_pairs[iter - 1]
input_tensor = training_pair[0]
target_tensor = training_pair[1]
loss = train(input_tensor, target_tensor, encoder,
decoder, encoder_optimizer, decoder_optimizer, criterion)
print_loss_total += loss
plot_loss_total += loss
if iter % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),
iter, iter / n_iters * 100, print_loss_avg))
if iter % plot_every == 0:
plot_loss_avg = plot_loss_total / plot_every
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
# print("\nPlotting loss")
showPlot(plot_losses)
return plot_losses
"""TRAIN"""
train_data['Sarcastic Comment']=train_data['Sarcastic Comment'].apply(preprocessor)
train_data['Target']=train_data['Target'].apply(preprocessor)
print("Preprocessed Training data")
print(train_data.head())
input_lang,output_lang=prepareData("Sarcastic Comment","Target",train_data)
hidden_size = 256
print("\nSetting *Coder Hidden size = " + str(hidden_size))
encoder1 = EncoderRNN(input_lang.n_words, hidden_size).to(device)
attn_decoder1 = AttnDecoderRNN(hidden_size, output_lang.n_words).to(device)
plot_losses_pts = trainIters(encoder1, attn_decoder1, 50000)
"""FINE-TUNING SAVED MODEL"""
train_data['Sarcastic Comment']=train_data['Sarcastic Comment'].apply(preprocessor)
train_data['Target']=train_data['Target'].apply(preprocessor)
print("Preprocessed Training data")
print(train_data.head())
input_lang,output_lang=prepareData("Sarcastic Comment","Target",train_data)
encoder_from_saved = torch.load('NEE_encoder.pt')
attn_decoder_from_saved = torch.load('NEE_attn_decoder.pt')
loss_history = trainIters(encoder_from_saved, attn_decoder_from_saved, 65000)
# Commented out IPython magic to ensure Python compatibility.
# %matplotlib inline
fig = plt.figure()
overall_loss_history = plot_losses_pts + loss_history
plt.plot(overall_loss_history)
# For now save entire model, as we need to switch device later
torch.save(encoder_from_saved, "NEE_encoder_v2.pt")
torch.save(attn_decoder_from_saved, "NEE_attn_decoder_v2.pt")
"""EVAL"""
from difflib import SequenceMatcher
def evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH):
with torch.no_grad():
input_tensor = tensorFromSentence(input_lang, sentence)
input_length = input_tensor.size()[0]
encoder_hidden = encoder.initHidden()
encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_tensor[ei],
encoder_hidden)
encoder_outputs[ei] += encoder_output[0, 0]
decoder_input = torch.tensor([[SOS_token]], device=device) # SOS
decoder_hidden = encoder_hidden
decoded_words = []
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attentions= decoder(
decoder_input, decoder_hidden, encoder_outputs)
#decoder_attentions[di] = decoder_attentions.data
topv, topi = decoder_output.data.topk(1)
if topi.item() == EOS_token:
decoded_words.append('<EOS>')
break
else:
decoded_words.append(output_lang.index2word[topi.item()])
decoder_input = topi.squeeze().detach()
return decoded_words, decoder_attentions[:di + 1]
def evaluateRandomly(encoder, decoder, sparse_set=False):
Match_ratio = 0; entities = 0
if sparse_set:
for index,row in train_data[935:].iterrows():
print('>', ' '.join(row['Sarcastic Comment']))
print('=', ' '.join(row['Target']))
output_words, attentions = evaluate(encoder, decoder, row['Sarcastic Comment'])
output_sentence = ' '.join(output_words)
print('Prediction >> ', output_sentence)
Match_ratio += SequenceMatcher(None, row['Target'], output_sentence.split('<EOS>')[0]).ratio()
entities = entities + 1
print('Match ratio = {}\n\n'.format(Match_ratio))
else:
for index,row in train_data[801:].iterrows():
print('>', ' '.join(row['Sarcastic Comment']))
print('=', ' '.join(row['Target']))
output_words, attentions = evaluate(encoder, decoder, row['Sarcastic Comment'])
output_sentence = ' '.join(output_words)
print('Prediction >> ', output_sentence)
Match_ratio += SequenceMatcher(None, row['Target'], output_sentence.split('<EOS>')[0]).ratio()
entities = entities + 1
print('Match ratio = {}\n\n'.format(Match_ratio))
print("DIS-Similarity ratio = {}".format(Match_ratio/entities))
evaluateRandomly(encoder_from_saved,attn_decoder_from_saved)
"""TEST SAVED MODEL"""
encoder_eval = torch.load('NEE_encoder_v2.pt')
attn_decoder_eval = torch.load('NEE_attn_decoder_v2.pt')
evaluateRandomly(encoder_eval.eval(),attn_decoder_eval.eval(), sparse_set=True)
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
import brewer2mpl
bmap = brewer2mpl.get_map('Set1', 'qualitative', 3)
colors = bmap.mpl_colors
plt.style.use('ggplot')
VAR_THRESHOLD = 1e-2
def plot_vs_gt_shapes(vae, shapes_dataset, save, z_inds=None):
dataset_loader = DataLoader(shapes_dataset, batch_size=1000, num_workers=1, shuffle=False)
N = len(dataset_loader.dataset) # number of data samples
K = vae.z_dim # number of latent variables
nparams = vae.q_dist.nparams
vae.eval()
# print('Computing q(z|x) distributions.')
qz_params = torch.Tensor(N, K, nparams)
n = 0
for xs in dataset_loader:
batch_size = xs.size(0)
xs = Variable(xs.view(batch_size, 1, 64, 64).cuda(), volatile=True)
qz_params[n:n + batch_size] = vae.encoder.forward(xs).view(batch_size, vae.z_dim, nparams).data
n += batch_size
qz_params = qz_params.view(3, 6, 40, 32, 32, K, nparams)
# z_j is inactive if Var_x(E[z_j|x]) < eps.
qz_means = qz_params[:, :, :, :, :, :, 0]
var = torch.std(qz_means.contiguous().view(N, K), dim=0).pow(2)
active_units = torch.arange(0, K)[var > VAR_THRESHOLD].long()
print('Active units: ' + ','.join(map(str, active_units.tolist())))
n_active = len(active_units)
print('Number of active units: {}/{}'.format(n_active, vae.z_dim))
if z_inds is None:
z_inds = active_units
# subplots where subplot[i, j] is gt_i vs. z_j
mean_scale = qz_means.mean(2).mean(2).mean(2) # (shape, scale, latent)
mean_rotation = qz_means.mean(1).mean(2).mean(2) # (shape, rotation, latent)
mean_pos = qz_means.mean(0).mean(0).mean(0) # (pos_x, pos_y, latent)
fig = plt.figure(figsize=(3, len(z_inds))) # default is (8,6)
gs = gridspec.GridSpec(len(z_inds), 3)
gs.update(wspace=0, hspace=0) # set the spacing between axes.
vmin_pos = torch.min(mean_pos)
vmax_pos = torch.max(mean_pos)
for i, j in enumerate(z_inds):
ax = fig.add_subplot(gs[i * 3])
ax.imshow(mean_pos[:, :, j].numpy(), cmap=plt.get_cmap('coolwarm'), vmin=vmin_pos, vmax=vmax_pos)
ax.set_xticks([])
ax.set_yticks([])
ax.set_ylabel(r'$z_' + str(j) + r'$')
if i == len(z_inds) - 1:
ax.set_xlabel(r'pos')
vmin_scale = torch.min(mean_scale)
vmax_scale = torch.max(mean_scale)
for i, j in enumerate(z_inds):
ax = fig.add_subplot(gs[1 + i * 3])
ax.plot(mean_scale[0, :, j].numpy(), color=colors[2])
ax.plot(mean_scale[1, :, j].numpy(), color=colors[0])
ax.plot(mean_scale[2, :, j].numpy(), color=colors[1])
ax.set_ylim([vmin_scale, vmax_scale])
ax.set_xticks([])
ax.set_yticks([])
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
ax.set_aspect(abs(x1 - x0) / abs(y1 - y0))
if i == len(z_inds) - 1:
ax.set_xlabel(r'scale')
vmin_rotation = torch.min(mean_rotation)
vmax_rotation = torch.max(mean_rotation)
for i, j in enumerate(z_inds):
ax = fig.add_subplot(gs[2 + i * 3])
ax.plot(mean_rotation[0, :, j].numpy(), color=colors[2])
ax.plot(mean_rotation[1, :, j].numpy(), color=colors[0])
ax.plot(mean_rotation[2, :, j].numpy(), color=colors[1])
ax.set_ylim([vmin_rotation, vmax_rotation])
ax.set_xticks([])
ax.set_yticks([])
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
ax.set_aspect(abs(x1 - x0) / abs(y1 - y0))
if i == len(z_inds) - 1:
ax.set_xlabel(r'rotation')
fig.text(0.5, 0.03, 'Ground Truth', ha='center')
fig.text(0.01, 0.5, 'Learned Latent Variables ', va='center', rotation='vertical')
plt.savefig(save)
plt.close()
def plot_vs_gt_faces(vae, faces_dataset, save, z_inds=None):
dataset_loader = DataLoader(faces_dataset, batch_size=1000, num_workers=1, shuffle=False)
N = len(dataset_loader.dataset) # number of data samples
K = vae.z_dim # number of latent variables
nparams = vae.q_dist.nparams
vae.eval()
# print('Computing q(z|x) distributions.')
qz_params = torch.Tensor(N, K, nparams)
n = 0
for xs in dataset_loader:
batch_size = xs.size(0)
xs = Variable(xs.view(batch_size, 1, 64, 64).cuda(), volatile=True)
qz_params[n:n + batch_size] = vae.encoder.forward(xs).view(batch_size, vae.z_dim, nparams).data
n += batch_size
qz_params = qz_params.view(50, 21, 11, 11, K, nparams)
# z_j is inactive if Var_x(E[z_j|x]) < eps.
qz_means = qz_params[:, :, :, :, :, 0]
var = torch.std(qz_means.contiguous().view(N, K), dim=0).pow(2)
active_units = torch.arange(0, K)[var > VAR_THRESHOLD].long()
print('Active units: ' + ','.join(map(str, active_units.tolist())))
n_active = len(active_units)
print('Number of active units: {}/{}'.format(n_active, vae.z_dim))
if z_inds is None:
z_inds = active_units
# subplots where subplot[i, j] is gt_i vs. z_j
mean_pose_az = qz_means.mean(3).mean(2).mean(0) # (pose_az, latent)
mean_pose_el = qz_means.mean(3).mean(1).mean(0) # (pose_el, latent)
mean_light_az = qz_means.mean(2).mean(1).mean(0) # (light_az, latent)
fig = plt.figure(figsize=(len(z_inds), 3)) # default is (8,6)
gs = gridspec.GridSpec(3, len(z_inds))
gs.update(wspace=0, hspace=0) # set the spacing between axes.
vmin_scale = torch.min(mean_pose_az)
vmax_scale = torch.max(mean_pose_az)
for i, j in enumerate(z_inds):
ax = fig.add_subplot(gs[i])
ax.plot(mean_pose_az[:, j].numpy())
ax.set_ylim([vmin_scale, vmax_scale])
ax.set_xticks([])
ax.set_yticks([])
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
ax.set_aspect(abs(x1 - x0) / abs(y1 - y0))
if i == 0:
ax.set_ylabel(r'azimuth')
vmin_scale = torch.min(mean_pose_el)
vmax_scale = torch.max(mean_pose_el)
for i, j in enumerate(z_inds):
ax = fig.add_subplot(gs[len(z_inds) + i])
ax.plot(mean_pose_el[:, j].numpy())
ax.set_ylim([vmin_scale, vmax_scale])
ax.set_xticks([])
ax.set_yticks([])
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
ax.set_aspect(abs(x1 - x0) / abs(y1 - y0))
if i == 0:
ax.set_ylabel(r'elevation')
vmin_scale = torch.min(mean_light_az)
vmax_scale = torch.max(mean_light_az)
for i, j in enumerate(z_inds):
ax = fig.add_subplot(gs[2 * len(z_inds) + i])
ax.plot(mean_light_az[:, j].numpy())
ax.set_ylim([vmin_scale, vmax_scale])
ax.set_xticks([])
ax.set_yticks([])
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
ax.set_aspect(abs(x1 - x0) / abs(y1 - y0))
if i == 0:
ax.set_ylabel(r'lighting')
plt.suptitle('GT Factors vs. Latent Variables')
plt.savefig(save)
plt.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-checkpt', required=True)
parser.add_argument('-zs', type=str, default=None)
parser.add_argument('-gpu', type=int, default=0)
parser.add_argument('-save', type=str, default='latent_vs_gt.pdf')
parser.add_argument('-elbo_decomp', action='store_true')
args = parser.parse_args()
from elbo_decomposition import elbo_decomposition
import lib.dist as dist
import lib.flows as flows
from vae_quant import VAE, setup_data_loaders
def load_model_and_dataset(checkpt_filename):
print('Loading model and dataset.')
checkpt = torch.load(checkpt_filename, map_location=lambda storage, loc: storage)
args = checkpt['args']
state_dict = checkpt['state_dict']
# model
if not hasattr(args, 'dist') or args.dist == 'normal':
prior_dist = dist.Normal()
q_dist = dist.Normal()
elif args.dist == 'laplace':
prior_dist = dist.Laplace()
q_dist = dist.Laplace()
elif args.dist == 'flow':
prior_dist = flows.FactorialNormalizingFlow(dim=args.latent_dim, nsteps=4)
q_dist = dist.Normal()
vae = VAE(z_dim=args.latent_dim, use_cuda=True, prior_dist=prior_dist, q_dist=q_dist, conv=args.conv)
vae.load_state_dict(state_dict, strict=False)
# dataset loader
loader = setup_data_loaders(args)
return vae, loader, args
z_inds = list(map(int, args.zs.split(','))) if args.zs is not None else None
torch.cuda.set_device(args.gpu)
vae, dataset_loader, cpargs = load_model_and_dataset(args.checkpt)
if args.elbo_decomp:
elbo_decomposition(vae, dataset_loader)
eval('plot_vs_gt_' + cpargs.dataset)(vae, dataset_loader.dataset, args.save, z_inds)
def plot_vs_gt_celeba(vae, celeba_dataset, save, z_inds=None):
# no ground truth factors of variation...
pass
|
# Generated by Django 2.2.16 on 2021-09-10 08:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='bio',
field=models.TextField(blank=True, null=True, verbose_name='Биография'),
),
]
|
import json
import socket
from .flow import *
from .util import *
class SmartBulb(object):
def __init__(self, host, port=55443, timeout=5):
'''
Create a new Bulb instance
:param str host: host name or ip address on which the device listens
:param int port: port on which the device listens (default: 9999)
:param int timeout: socket timeout (default: 5)
'''
self._host = host
self._port = port
self._timeout = timeout
self.__cmd_id = 0
self.__socket = None
@property
def _cmd_id(self):
'''
Get next command id in sequence
:return: command id
'''
self.__cmd_id += 1
return self.__cmd_id - 1
@property
def _socket(self):
'''
Get, optionally create, the communication socket
:return: the communication socket
'''
if self.__socket is None:
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__socket.settimeout(self._timeout)
self.__socket.connect((self._host, self._port))
return self.__socket
def send_command(self, method, params=None):
'''
Request information and return the response
:param str method: control method id
:param list params: list of params for the specified method
:return: the command response
'''
command = {'id': self._cmd_id, 'method': method, 'params': params}
try:
self._socket.send((json.dumps(command) + '\r\n').encode('utf8'))
except socket.error as ex:
self.__socket.close()
self.__socket = None
raise_from(Exception('A socket error occurred when sending the command.'), ex)
# The bulb will send us updates on its state in addition to responses,
# so we want to make sure that we read until we see an actual response.
response = None
while response is None:
try:
data = self._socket.recv(4 * 1024)
except socket.error:
self.__socket.close()
self.__socket = None
response = {'error': 'Bulb closed the connection.'}
break
for line in data.split(b'\r\n'):
if not line:
continue
try:
line = json.loads(line.decode('utf8'))
except ValueError:
response = {'result': ['invalid command']}
if line.get('method') != 'props':
response = line
return response
@property
def name(self):
'''
Get the device name
:return: device name
'''
return self.send_command('get_prop', ['name'])['result']
@name.setter
def name(self, name):
'''
Set the device name
:param name: new name
'''
self.send_command('set_name', [name])
@property
def is_on(self):
'''
Get whether device is on
:return: True if device is on, False otherwise
'''
return self.send_command('get_prop', ['power'])['result'][0] == 'on'
def power_on(self):
'''
Turn the bulb on
'''
self.send_command('set_power', ['on'])
def power_off(self):
'''
Turn the bulb off
'''
self.send_command('set_power', ['off'])
def set_rgb(self, red, green, blue):
'''
Set the bulb's RGB value
:param int red: the red value to set (0-255)
:param int green: the green value to set (0-255)
:param int blue: the blue value to set (0-255)
'''
red = clamp(red, 0, 255)
green = clamp(green, 0, 255)
blue = clamp(blue, 0, 255)
self.send_command('set_rgb', [red * 65536 + green * 256 + blue])
def start_flow(self, flow):
'''
Start a flow
:param yeelight.Flow flow: the Flow instance to start
'''
if not isinstance(flow, Flow):
raise ValueError('Argument is not a Flow instance')
self.send_command('start_cf', [flow.count * len(flow.transitions), flow.action.value, flow.expression])
def stop_flow(self):
'''
Stop a flow
'''
self.send_command('stop_cf', [])
|
names = input("Enter names seperated by commas : ").split(",")
assignments = [int(x) for x in input("Enter assignment counts separated by commas : ").split(",")]
grades = [int(y) for y in input("Enter grades separated by commas : ").split(",")]
new_grades = []
for i in range(len(assignments)):
new_grades.append(grades[i] + 2 * assignments[i])
#print(new_grades)
# message string to be used for each student
# HINT: use .format() with this string in your for loop
message = "Hi {},\n\nThis is a reminder that you have {} assignments left to \
submit before you can graduate. You're current grade is {} and can increase \
to {} if you submit all assignments before the due date.\n\n"
# write a for loop that iterates through each set of names, assignments, and grades to print each student's message
for i in range(len(assignments)) :
print(message.fomate(names[i],assignments[i],grades[i],new_grades[i])
|
#!/usr/bin/env python
import os
import sys
import setuptools
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(name='pyflowater',
version='0.5.0',
packages=[ 'pyflowater' ],
description='Python interface for Flo by Moen API',
# long_description=long_description,
url='https://github.com/rsnodgrass/pyflowater',
author='Ryan Snodgrass',
author_email='rsnodgrass@gmail.com',
license='Apache Software License',
install_requires=[ 'requests>=2.0', 'google-cloud-firestore' ],
keywords=[ 'flo', 'home automation', 'water monitoring' ],
zip_safe=True,
classifiers=[ "Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
)
|
from LNMarkets import User
def test_userInformation(token):
userInfo = User.userInformation(token)
assert 'show_leaderboard' in userInfo
show_leaderboard = userInfo['show_leaderboard']
assert not User.updateUser(token, leaderboard=False)['show_leaderboard']
assert User.updateUser(token, leaderboard=True)['show_leaderboard']
User.updateUser(token, leaderboard=show_leaderboard)
|
# author: Bartlomiej "furas" Burek (https://blog.furas.pl)
# date: 2022.05.22
#
# detect click on circle button and print information
#
import tkinter as tk
# --- functions ---
def on_click(event):
#print('event:', event)
#print('x:', event.x)
#print('y:', event.y)
x1, y1, x2, y2 = canvas.bbox(button_id)
#print('bbox [x1, y1, x2, y2]:', x1, y1, x2, y2)
#if (x1 <= event.x <= x2) and (y1 <= event.y <= y2):
# print('clicked rectangle [x1, x2, y2, y2]:', [x1, x2, y2, y2])
center_x = (x2+x1)//2
center_y = (y2+y1)//2
r = (x2-x1)//2
temp_x = event.x - center_x
temp_y = event.y - center_y
if temp_x**2 + temp_y**2 <= r**2:
print('clicked circle [cx, cy, r]:', [center_x, center_y, r])
# --- main ---
root = tk.Tk()
canvas = tk.Canvas(root, width=512, height=512)
canvas.pack()
canvas.bind('<Button-1>', on_click)
background = tk.PhotoImage(file='images/lenna.png')
background_id = canvas.create_image((0, 0), image=background, anchor='nw')
# ---
button_img = tk.PhotoImage(file='images/hal9000.png')
button_id = canvas.create_image((256, 125), image=button_img, anchor='center')
# ---
root.mainloop()
|
import time
import playsound
import youtubesearchpython
import pytube
import random
class SongList:
# contains the id time and name of all the songs in the current playlist
songs = []
current_song_number : int
playMusicThreadObject = None
def __init__(self,
path = "./Downloads/",
searchstr = "English Songs",
shuffle = False,
repeatone = False,
repeatqueue = False
):
self.path = path
self.searchObject = youtubesearchpython.VideosSearch( searchstr, limit = 1) # it will fetch a single song at a time with requested type
self.shuffle = shuffle
self.current_song_number = -1
self.repeatqueue = repeatqueue
self.repeatone = repeatone
"""
Returns a dict containing song_id, duration and title as a string that
needed to play next based on the user choices
"""
def get_next_song(self) -> dict:
if self.shuffle:
return self.songs[random.randrange(0, len(self.songs))]
if self.repeatone:
return self.songs[self.current_song_number]
if self.repeatqueue :
if self.current_song_number == len(self.songs) - 1:
self.current_song_number = 0
self.current_song_number += 1
return self.songs[self.current_song_number]
def playMusic(SongListObject: SongList):
number_of_attempts = 0
path = SongListObject.path
print("I am Here at Playmusic")
print(SongListObject.songs)
while True:
if len(SongListObject.songs) == 0:
print("I am Here at If")
print(SongListObject.songs)
print("Please Wait for some time while we are downloading some songs")
number_of_attempts += 1
time.sleep(10)
elif number_of_attempts > 30:
print("Internet speed is too slow :-( , Try again later")
exit(1) # exiting code if no song is there to play from last 5 minutes
else:
song = SongListObject.get_next_song()
print("Currently Playing: ",song["title"])
print("Duration :", song["duration"])
playsound.playsound(path + song["title"] + ".mp4")
number_of_attempts = 0
|
#!/usr/bin/env python3
# Copyright (c) 2021 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import time
from test_framework.mininode import logger
from test_framework.test_framework import LuckyCoinOROTestFramework
from test_framework.util import force_finish_mnsync, connect_nodes
'''
feature_llmq_data_recovery.py
Tests automated recovery of DKG data and the related command line parameters:
-llmq-data-recovery
-llmq-qvvec-sync
'''
# LLMQ types available in regtest
llmq_test = 100
llmq_test_v17 = 102
llmq_type_strings = {llmq_test: 'llmq_test', llmq_test_v17: 'llmq_test_v17'}
class QuorumDataRecoveryTest(LuckyCoinOROTestFramework):
def set_test_params(self):
extra_args = [["-vbparams=dip0020:0:999999999999:10:8:6:5"] for _ in range(9)]
self.set_luckycoinoro_test_params(9, 7, fast_dip3_enforcement=True, extra_args=extra_args)
self.set_luckycoinoro_llmq_test_params(4, 3)
def restart_mn(self, mn, reindex=False, qvvec_sync=[], qdata_recovery_enabled=True):
args = self.extra_args[mn.nodeIdx] + ['-masternodeblsprivkey=%s' % mn.keyOperator,
'-llmq-data-recovery=%d' % qdata_recovery_enabled]
if reindex:
args.append('-reindex')
for llmq_sync in qvvec_sync:
args.append('-llmq-qvvec-sync=%s:%d' % (llmq_type_strings[llmq_sync[0]], llmq_sync[1]))
self.restart_node(mn.nodeIdx, args)
force_finish_mnsync(mn.node)
connect_nodes(mn.node, 0)
self.sync_blocks()
def restart_mns(self, mns=None, exclude=[], reindex=False, qvvec_sync=[], qdata_recovery_enabled=True):
for mn in self.mninfo if mns is None else mns:
if mn not in exclude:
self.restart_mn(mn, reindex, qvvec_sync, qdata_recovery_enabled)
self.wait_for_sporks_same()
def test_mns(self, quorum_type_in, quorum_hash_in, valid_mns=[], all_mns=[], test_secret=True, expect_secret=True,
recover=False, timeout=120):
for mn in all_mns:
if mn not in valid_mns:
assert not self.test_mn_quorum_data(mn, quorum_type_in, quorum_hash_in, test_secret, False)
self.wait_for_quorum_data(valid_mns, quorum_type_in, quorum_hash_in, test_secret, expect_secret, recover, timeout)
def get_mn(self, protx_hash):
for mn in self.mninfo:
if mn.proTxHash == protx_hash:
return mn
return None
def get_member_mns(self, quorum_type, quorum_hash):
members = self.nodes[0].quorum("info", quorum_type, quorum_hash)["members"]
mns = []
for member in members:
if member["valid"]:
mns.append(self.get_mn(member["proTxHash"]))
return mns
def get_subset_only_in_left(self, quorum_members_left, quorum_members_right):
quorum_members_subset = quorum_members_left.copy()
for mn in list(set(quorum_members_left) & set(quorum_members_right)):
quorum_members_subset.remove(mn)
return quorum_members_subset
def test_llmq_qvvec_sync(self, llmq_sync_entries):
self.log.info("Test with %d -llmq-qvvec-sync option(s)" % len(llmq_sync_entries))
for llmq_sync in llmq_sync_entries:
llmq_type = llmq_sync[0]
llmq_sync_mode = llmq_sync[1]
self.log.info("Validate -llmq-qvvec-sync=%s:%d" % (llmq_type_strings[llmq_type], llmq_sync_mode))
# First restart with recovery thread triggering disabled
self.restart_mns(qdata_recovery_enabled=False)
# If mode=0 i.e. "Sync always" all nodes should request the qvvec from new quorums
if llmq_sync_mode == 0:
quorum_hash = self.mine_quorum()
member_mns = self.get_member_mns(llmq_type, quorum_hash)
# So far the only the quorum members of the quorum should have the quorum verification vector
self.test_mns(llmq_type, quorum_hash, valid_mns=member_mns, all_mns=self.mninfo,
test_secret=False, recover=False)
# Now restart with recovery enabled
self.restart_mns(qvvec_sync=llmq_sync_entries)
# All other nodes should now request the qvvec from the quorum
self.test_mns(llmq_type, quorum_hash, valid_mns=self.mninfo, test_secret=False, recover=True)
# If mode=1 i.e. "Sync only if type member" not all nodes should request the qvvec from quorum 1 and 2
elif llmq_sync_mode == 1:
# Create quorum_1 and a quorum_2 so that we have subsets (members_only_in_1, members_only_in_2) where
# each only contains nodes that are members of quorum_1 but not quorum_2 and vice versa
quorum_hash_1 = None
quorum_hash_2 = None
members_only_in_1 = []
members_only_in_2 = []
while len(members_only_in_1) == 0 or len(members_only_in_2) == 0:
quorum_hash_1 = self.mine_quorum()
quorum_hash_2 = self.mine_quorum()
member_mns_1 = self.get_member_mns(llmq_type, quorum_hash_1)
member_mns_2 = self.get_member_mns(llmq_type, quorum_hash_2)
members_only_in_1 = self.get_subset_only_in_left(member_mns_1, member_mns_2)
members_only_in_2 = self.get_subset_only_in_left(member_mns_2, member_mns_1)
# So far the nodes of quorum_1 shouldn't have the quorum verification vector of quorum_2 and vice versa
self.test_mns(llmq_type, quorum_hash_2, valid_mns=[], all_mns=members_only_in_1, expect_secret=False)
self.test_mns(llmq_type, quorum_hash_1, valid_mns=[], all_mns=members_only_in_2, expect_secret=False)
# Now restart with recovery enabled
self.restart_mns(qvvec_sync=llmq_sync_entries)
# Members which are only in quorum 2 should request the qvvec from quorum 1 from the members of quorum 1
self.test_mns(llmq_type, quorum_hash_1, valid_mns=members_only_in_2, expect_secret=False, recover=True)
# Members which are only in quorum 1 should request the qvvec from quorum 2 from the members of quorum 2
self.test_mns(llmq_type, quorum_hash_2, valid_mns=members_only_in_1, expect_secret=False, recover=True)
def run_test(self):
node = self.nodes[0]
node.spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
node.spork("SPORK_21_QUORUM_ALL_CONNECTED", 0)
self.wait_for_sporks_same()
self.activate_dip8()
logger.info("Test automated DGK data recovery")
# This two nodes will remain the only ones with valid DKG data
last_resort_test = None
last_resort_v17 = None
while True:
# Mine the quorums used for the recovery test
quorum_hash_recover = self.mine_quorum()
# Get all their member masternodes
member_mns_recover_test = self.get_member_mns(llmq_test, quorum_hash_recover)
member_mns_recover_v17 = self.get_member_mns(llmq_test_v17, quorum_hash_recover)
# All members should initially be valid
self.test_mns(llmq_test, quorum_hash_recover, valid_mns=member_mns_recover_test)
self.test_mns(llmq_test_v17, quorum_hash_recover, valid_mns=member_mns_recover_v17)
try:
# As last resorts find one node which is in llmq_test but not in llmq_test_v17 and one other vice versa
last_resort_test = self.get_subset_only_in_left(member_mns_recover_test, member_mns_recover_v17)[0]
last_resort_v17 = self.get_subset_only_in_left(member_mns_recover_v17, member_mns_recover_test)[0]
break
except IndexError:
continue
assert last_resort_test != last_resort_v17
# Reindex all other nodes the to drop their DKG data, first run with recovery disabled to make sure disabling
# works as expected
recover_members = member_mns_recover_test + member_mns_recover_v17
exclude_members = [last_resort_test, last_resort_v17]
# Reindex all masternodes but exclude the last_resort for both testing quorums
self.restart_mns(exclude=exclude_members, reindex=True, qdata_recovery_enabled=False)
# Validate all but one are invalid members now
self.test_mns(llmq_test, quorum_hash_recover, valid_mns=[last_resort_test], all_mns=member_mns_recover_test)
self.test_mns(llmq_test_v17, quorum_hash_recover, valid_mns=[last_resort_v17], all_mns=member_mns_recover_v17)
# If recovery would be enabled it would trigger after the mocktime bump / mined block
self.bump_mocktime(self.quorum_data_request_expiration_timeout + 1)
node.generate(1)
time.sleep(10)
# Make sure they are still invalid
self.test_mns(llmq_test, quorum_hash_recover, valid_mns=[last_resort_test], all_mns=member_mns_recover_test)
self.test_mns(llmq_test_v17, quorum_hash_recover, valid_mns=[last_resort_v17], all_mns=member_mns_recover_v17)
# Mining a block should not result in a chainlock now because the responsible quorum shouldn't have enough
# valid members.
self.wait_for_chainlocked_block(node, node.generate(1)[0], False, 5)
# Now restart with recovery enabled
self.restart_mns(mns=recover_members, exclude=exclude_members, reindex=True, qdata_recovery_enabled=True)
# Validate that all invalid members recover. Note: recover=True leads to mocktime bumps and mining while waiting
# which trigger CQuorumManger::TriggerQuorumDataRecoveryThreads()
self.test_mns(llmq_test, quorum_hash_recover, valid_mns=member_mns_recover_test, recover=True)
self.test_mns(llmq_test_v17, quorum_hash_recover, valid_mns=member_mns_recover_v17, recover=True)
# Mining a block should result in a chainlock now because the quorum should be healed
self.wait_for_chainlocked_block(node, node.getbestblockhash())
logger.info("Test -llmq-qvvec-sync command line parameter")
# Run with one type separated and then both possible (for regtest) together, both calls generate new quorums
# and are restarting the nodes with the other parameters
self.test_llmq_qvvec_sync([(llmq_test, 0)])
self.test_llmq_qvvec_sync([(llmq_test_v17, 1)])
self.test_llmq_qvvec_sync([(llmq_test, 0), (llmq_test_v17, 1)])
logger.info("Test invalid command line parameter values")
node.stop_node()
node.wait_until_stopped()
# Test -llmq-qvvec-sync entry format
node.assert_start_raises_init_error(["-llmq-qvvec-sync="],
"Error: Invalid format in -llmq-qvvec-sync:")
node.assert_start_raises_init_error(["-llmq-qvvec-sync=0"],
"Error: Invalid format in -llmq-qvvec-sync: 0")
node.assert_start_raises_init_error(["-llmq-qvvec-sync=0:"],
"Error: Invalid format in -llmq-qvvec-sync: 0:")
node.assert_start_raises_init_error(["-llmq-qvvec-sync=:0"],
"Error: Invalid format in -llmq-qvvec-sync: :0")
node.assert_start_raises_init_error(["-llmq-qvvec-sync=0:0:0"],
"Error: Invalid format in -llmq-qvvec-sync: 0:0:0")
node.assert_start_raises_init_error(["-llmq-qvvec-sync=0::"],
"Error: Invalid format in -llmq-qvvec-sync: 0::")
node.assert_start_raises_init_error(["-llmq-qvvec-sync=::0"],
"Error: Invalid format in -llmq-qvvec-sync: ::0")
node.assert_start_raises_init_error(["-llmq-qvvec-sync=:0:"],
"Error: Invalid format in -llmq-qvvec-sync: :0:")
# Test llmqType
node.assert_start_raises_init_error(["-llmq-qvvec-sync=0:0"],
"Error: Invalid llmqType in -llmq-qvvec-sync: 0:0")
node.assert_start_raises_init_error(["-llmq-qvvec-sync=llmq-test:0"],
"Error: Invalid llmqType in -llmq-qvvec-sync: llmq-test:0")
node.assert_start_raises_init_error(["-llmq-qvvec-sync=100:0", "-llmq-qvvec-sync=0"],
"Error: Invalid llmqType in -llmq-qvvec-sync: 100:0")
node.assert_start_raises_init_error(["-llmq-qvvec-sync=llmq_test:0", "-llmq-qvvec-sync=llmq_test:0"],
"Error: Duplicated llmqType in -llmq-qvvec-sync: llmq_test:0")
# Test mode
node.assert_start_raises_init_error(["-llmq-qvvec-sync=llmq_test:-1"],
"Error: Invalid mode in -llmq-qvvec-sync: llmq_test:-1")
node.assert_start_raises_init_error(["-llmq-qvvec-sync=llmq_test:2"],
"Error: Invalid mode in -llmq-qvvec-sync: llmq_test:2")
if __name__ == '__main__':
QuorumDataRecoveryTest().main()
|
"""
Mask RCNN
Configurations and data loading code for disease dataset from airport.
Copyright (c) 2020 Chienping Tsung
Licensed under the MIT License (see LICENSE for details)
Written by Chienping Tsung
--------------------------------------------------
Usage:
run from the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 disease.py train --dataset=/path/to/disease/dataset --weights=coco
# Resume training a model that you had trained earlier
python3 disease.py train --dataset=/path/to/disease/dataset --weights=last
# Train a new model starting from ImageNet weights
python3 disease.py train --dataset=/path/to/disease/dataset --weights=imagenet
# Apply detection to an image
python3 disease.py detect --weights=/path/to/weights/file.h5 --image=<URL or path to file>
"""
import os
import sys
import json
import numpy as np
import skimage.draw
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR)
from mrcnn.config import Config
from mrcnn import model as modellib, utils
from mrcnn import visualize
# Default path for saving logs and checkpoints.
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
# Path to coco trained weights file.
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Disease dictionary.
# The numbers should be continuous from 1.
DISEASE_DIC = {
'crack': 1
}
##################################################
# Configurations
##################################################
class DiseaseConfig(Config):
"""Configurations for disease dataset from CAUC.
It's designed for a specific computer.
CPU: i7-8700
RAM: 64G
GPU: GTX 1080Ti with 11G RAM
"""
# Name for recognization of configuration.
NAME = "disease"
# Adjust for different GPU.
IMAGES_PER_GPU = 1
# Number of classes(include background).
NUM_CLASSES = 1 + len(DISEASE_DIC)
# Number of training steps per epoch.
STEPS_PER_EPOCH = 1000
# Threshold of detection confidence.
DETECTION_MIN_CONFIDENCE = 0.7
# Extra configurations.
GPU_COUNT = 1
VALIDATION_STEPS = 50
BACKBONE = "resnet50"
RPN_ANCHOR_SCALES = (64, 128, 256, 512, 1024)
RPN_ANCHOR_RATIOS = [0.18, 0.5, 1, 2, 5.56]
IMAGE_MIN_DIM = 800
IMAGE_MAX_DIM = 1856
TRAIN_ROIS_PER_IMAGE = 100
##################################################
# Dataset
##################################################
class DiseaseDataset(utils.Dataset):
def load_disease(self, dataset_dir, subset):
"""Load a subset of disease dataset.
dataset_dir: directory of dataset
subset: train or val subset
"""
# Add classes.
for d in DISEASE_DIC:
self.add_class("disease", DISEASE_DIC[d], d)
# Train or val dataset?
assert subset in ["train", "val"]
dataset_dir = os.path.join(dataset_dir, subset)
# Load annotations
# labelme (3.16.7) saves each annotation in the form:
# {
# "shapes": [
# {
# "label": "slab",
# "points": [
# [
# 126.4797507788162,
# 1.8691588785046729
# ],
# ... more points ...
# ],
# ... more polygon informations ...
# },
# ... more shapes ...
# ],
# "imagePath": "11635_48_17549.bmp",
# "imageHeight": 900,
# "imageWidth": 1800,
# ... more attributions ...
# }
# We mostly care about the x and y coordinates of each shape.
for i in os.listdir(dataset_dir):
# Select json file.
if not i.lower().endswith('.json'):
continue
# Load annotation from file.
annotation = json.load(open(os.path.join(dataset_dir, i)))
# Assemble x and y coordinates, and filter the required shape.
polygons = [
{
'all_points_x': [p[0] for p in shape['points']],
'all_points_y': [p[1] for p in shape['points']],
'label': shape['label'],
'shape_type': shape['shape_type']
} for shape in annotation['shapes']
if shape['label'] in DISEASE_DIC.keys() and
shape['shape_type'] in ['polygon', 'circle']
]
# Assemble image path.
image_path = os.path.join(dataset_dir, annotation['imagePath'])
# Assemble image width and height.
try:
height = annotation['imageHeight']
width = annotation['imageWidth']
except KeyError as e:
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
"disease",
image_id=annotation['imagePath'],
path=image_path,
width=width, height=height,
polygons=polygons
)
def load_mask(self, image_id):
"""Generate instance masks for an image.
:returns
masks: A bool array of shape[height, width, instance_count] with one mask per instance.
class_ids: A 1D array of class IDs of the instance masks.
"""
# If not a disease dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "disease":
return super(self.__class__, self).load_mask(image_id)
# Convert polygons to a bitmap mask of shape.
# [height, width, instance_count]
info = self.image_info[image_id]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])], dtype=np.uint8)
class_ids = []
for i, p in enumerate(info["polygons"]):
if p['shape_type'] == 'polygon':
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
elif p['shape_type'] == 'circle':
rr, cc = skimage.draw.circle(
p['all_points_y'][0], p['all_points_x'][0],
((p['all_points_y'][0] - p['all_points_y'][1])**2 + (p['all_points_x'][0] - p['all_points_x'][1])**2)**0.5
)
else:
raise Exception("Undefined shape_type: {}".format(p['shape_type']))
mask[rr, cc, i] = 1
class_ids.append(DISEASE_DIC[p['label']])
return mask.astype(np.bool), np.array(class_ids, dtype=np.int32)
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "disease":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
##################################################
# train and detect
##################################################
def train(model):
"""Train the model."""
# Training dataset.
dataset_train = DiseaseDataset()
dataset_train.load_disease(args.dataset, "train")
dataset_train.prepare()
# Validation dataset.
dataset_val = DiseaseDataset()
dataset_val.load_disease(args.dataset, "val")
dataset_val.prepare()
# Augmentation configurations
import imgaug.augmenters as iaa
aug = iaa.Sequential(
[
iaa.Fliplr(0.5), # horizontal flips
iaa.Flipud(0.5), # vertical flips
iaa.Crop(percent=(0, 0.1)), # random crops
# Strengthen or weaken the contrast of images.
iaa.LinearContrast((0.75, 1.5)),
# Make images brighter or darker.
iaa.Multiply((0.8, 1.2)),
# Apply affine transformations to images.
iaa.Affine(
rotate=(-25, 25)
)
], random_order=True
)
# Start training here.
print("Start training.")
model.train(
dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=60,
layers='all',
augmentation=aug
)
def detect(model, image_path=None):
assert image_path
print("Running on {}.".format(image_path))
# Read the image.
image = skimage.io.imread(image_path)
# Detect objects.
r = model.detect([image], verbose=1)[0]
# Visualization and save the output.
class_names = ['background'] + list(DISEASE_DIC.keys())
visualize.display_instances(
image, r['rois'], r['masks'], r['class_ids'], class_names,
scores=r['scores'], title=image_path
)
print("Saved to splash.png.")
##################################################
# main
##################################################
if __name__ == '__main__':
import argparse
# Parse the arguments from command line.
parser = argparse.ArgumentParser(description="Detector for diseases from airport via Mask RCNN.")
parser.add_argument(
'command',
help="'train' or 'detect'", metavar="<command>"
)
parser.add_argument(
'--dataset', required=False,
help="Directory of the disease dataset.", metavar="/path/to/disease/dataset"
)
parser.add_argument(
'--weights', required=True,
help="Path to weights .h5 file or 'coco'", metavar="/path/to/weights.h5"
)
parser.add_argument(
'--logs', default=DEFAULT_LOGS_DIR, required=False,
help="Logs and checkpoints directory.", metavar="/path/to/logs"
)
parser.add_argument(
'--image', required=False,
help="Image to detect the diseases.", metavar="path or URL to image"
)
args = parser.parse_args()
# Validate the arguments.
assert args.command in ['train', 'detect']
if args.command == 'train':
assert args.dataset
print("Dataset: ", args.dataset)
elif args.command == 'detect':
assert args.image
print("Image: ", args.image)
print("Weights: ", args.weights)
print("Logs: ", args.logs)
# Configurations
if args.command == 'train':
config = DiseaseConfig()
else:
class InferenceConfig(DiseaseConfig):
# Set the batch size to 1 since we'll be running detection.
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Create model
if args.command == 'train':
model = modellib.MaskRCNN(mode='training', config=config, model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode='inference', config=config, model_dir=args.logs)
# Prepare the weights
if args.weights.lower() == 'coco':
weights_path = COCO_WEIGHTS_PATH
# Download the coco weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
elif args.weights.lower() == 'last':
# Find the last trained weights
weights_path = model.find_last()
elif args.weights.lower() == 'imagenet':
# Start from ImageNet trained weights
weights_path = model.get_imagenet_weights()
else:
weights_path = args.weights
# Load weights
print("Loading weights: ", weights_path)
if args.weights.lower() == 'coco':
# Exclude the last layers because different numbers of classes.
model.load_weights(weights_path, by_name=True, exclude=[
'mrcnn_class_logits',
'mrcnn_bbox_fc',
'mrcnn_bbox',
'mrcnn_mask',
'rpn_model'
])
else:
model.load_weights(weights_path, by_name=True)
# Train or detect
if args.command == 'train':
train(model)
elif args.command == 'detect':
detect(model, args.image)
|
from __future__ import annotations
from typing import Optional, TYPE_CHECKING
import actions
from components.base_component import BaseComponent
from components.inventory import Inventory
from components.ability import Ability
from vim_parser import VimCommandParser
import exceptions
import utils
if TYPE_CHECKING:
from entity import Actor, Item
class Consumable(BaseComponent):
parent: Item
def get_action(self, consumer: Actor) -> Optional[actions.Action]:
"""Try to return the action for this item."""
return actions.ItemAction(consumer, self.parent)
def activate(self, action:actions.ItemAction) -> None:
"""Invoke this item's ability.
"""
raise NotImplementedError()
def consume(self) -> None:
item = self.parent
inventory = item.parent
if isinstance(inventory,Inventory):
inventory.remove(item)
class NotConsumable(Consumable):
""" A consumable that does nothing but print a message.
Crucially, it does not remove itself from parent inventory."""
def __init__(self,message:str):
self.message = message
def consume(self) -> None:
raise exceptions.Impossible(self.message)
def activate(self,action:actions.ItemAction) -> None:
raise exceptions.Impossible(self.message)
class HealingConsumable(Consumable):
def __init__(self, amount:int,hp_buff:bool=False):
self.amount = amount
self.hp_buff = hp_buff # Whether to permanently increase hp
def activate(self, action:actions.ItemAction) -> None:
consumer = action.entity
if self.hp_buff:
consumer.fighter.max_hp += self.amount
amount_recovered = consumer.fighter.heal(self.amount)
if self.hp_buff:
self.engine.message_log.add_message(
f"You consumed the {self.parent.name}, increasing your max hp by {self.amount}"
)
self.consume()
elif amount_recovered > 0:
self.engine.message_log.add_message(
f"You consumed the {self.parent.name}, recovering {amount_recovered} hp"
)
self.consume()
else:
raise exceptions.Impossible(f"Your health is already full.")
class CommandConsumable(Consumable):
def __init__(self,command:str):
super().__init__()
self.command = command
def activate(self, action:actions.ItemAction) -> None:
"""Invoke this item's ability.
"""
consumer = action.entity
parser = VimCommandParser(consumer.engine,entity=consumer)
action = parser.colon_command(self.command)
action.requirements = [] # Make sure player can do it
# (Although I'm not sure whether requirements even get checked during
# perform(), vs at some other point?)
action.perform()
self.consume()
|
from frappe import _
def get_data():
return [
{
"label": _("Portal"),
"items": [
{
"type": "doctype",
"name": "Homepage",
"description": _("Settings for website homepage"),
},
{
"type": "doctype",
"name": "Shopping Cart Settings",
"label": _("Shopping Cart Settings"),
"description": _("Settings for online shopping cart such as shipping rules, price list etc."),
"hide_count": True
}
]
}
]
|
from __future__ import absolute_import
from __future__ import unicode_literals
import os
from django.core.management.base import BaseCommand, CommandError
from corehq.apps.export.dbaccessors import get_properly_wrapped_export_instance
from corehq.apps.export.export import save_export_payload
from io import open
class Command(BaseCommand):
help = "Upload saved export"
def add_arguments(self, parser):
parser.add_argument(
'export_id',
help="Export ID of the saved export"
)
parser.add_argument(
'path',
help='Path to export archive',
)
def handle(self, export_id, **options):
path = options.pop('path')
if not os.path.isfile(path):
raise CommandError("File not found: {}".format(path))
export_instance = get_properly_wrapped_export_instance(export_id)
with open(path, 'rb') as payload:
save_export_payload(export_instance, payload)
|
#Dentro do pacote utilidadesCeV que criamos no desafio 111, temos um módulo chamado dado. Crie uma função chamada leiaDinheiro() que seja capaz de funcionar como a função imputa(), mas com uma validação de dados para aceitar apenas valores que seja monetários.
from utilidades import moeda
from utilidades import dado
preco = dado.moneyread('Digite preço: R$')
moeda.resumo(preco, 20, 10)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2019 Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: boot
short_description: Set boot configuration
version_added: 1.2.0
author:
- Felix Fontein (@felixfontein)
description:
- Set the boot configuration for a dedicated server.
seealso:
- module: community.hrobot.ssh_key
description: Add, remove or update SSH key
- module: community.hrobot.ssh_key_info
description: Query information on SSH keys
extends_documentation_fragment:
- community.hrobot.robot
options:
server_number:
description:
- The server number of the server whose boot configuration to adjust.
type: int
required: true
regular_boot:
description:
- If this option is provided, all special boot configurations are removed and
the installed operating system will be booted up next (assuming it is bootable).
- Precisely one of I(regular_boot), I(rescue), I(install_linux), I(install_vnc),
I(install_windows), I(install_plesk), and I(install_cpanel) must be provided.
type: bool
choices:
- true
rescue:
description:
- If this option is provided, the rescue system will be activated for the next boot.
- Precisely one of I(regular_boot), I(rescue), I(install_linux), I(install_vnc),
I(install_windows), I(install_plesk), and I(install_cpanel) must be provided.
type: dict
suboptions:
os:
description:
- The operating system to use for the rescue system. Possible choices can
change over time.
- Currently, C(linux), C(linuxold), C(freebsd), C(freebsdold), C(freebsdax),
C(freebsdbetaax), C(vkvm), and C(vkvmold) seem to be available.
type: str
required: true
arch:
description:
- The architecture to use for the rescue system.
- Not all architectures are available for all operating systems.
- Defaults to C(64).
type: int
choices:
- 32
- 64
authorized_keys:
description:
- One or more SSH key fingerprints to equip the rescue system with.
- Only fingerprints for SSH keys deposited in the Robot API can be used.
- You can use the M(community.hrobot.ssh_key_info) module to query the
SSH keys you can use, and the M(community.hrobot.ssh_key) module to
add or update SSH keys.
type: list
elements: str
install_linux:
description:
- If this option is provided, a Linux system install will be activated for the next boot.
- Precisely one of I(regular_boot), I(rescue), I(install_linux), I(install_vnc),
I(install_windows), I(install_plesk), and I(install_cpanel) must be provided.
type: dict
suboptions:
dist:
description:
- The distribution to install.
type: str
required: true
arch:
description:
- The architecture to use for the install.
- Not all architectures are available for all distributions.
- Defaults to C(64).
type: int
choices:
- 32
- 64
lang:
description:
- The language to use for the operating system.
type: str
required: true
authorized_keys:
description:
- One or more SSH key fingerprints to equip the rescue system with.
- Only fingerprints for SSH keys deposited in the Robot API can be used.
- You can use the M(community.hrobot.ssh_key_info) module to query the
SSH keys you can use, and the M(community.hrobot.ssh_key) module to
add or update SSH keys.
type: list
elements: str
install_vnc:
description:
- If this option is provided, a VNC installation will be activated for the next boot.
- Precisely one of I(regular_boot), I(rescue), I(install_linux), I(install_vnc),
I(install_windows), I(install_plesk), and I(install_cpanel) must be provided.
type: dict
suboptions:
dist:
description:
- The distribution to install.
type: str
required: true
arch:
description:
- The architecture to use for the install.
- Not all architectures are available for all distributions.
- Defaults to C(64).
type: int
choices:
- 32
- 64
lang:
description:
- The language to use for the operating system.
type: str
required: true
install_windows:
description:
- If this option is provided, a Windows installation will be activated for the next boot.
- Precisely one of I(regular_boot), I(rescue), I(install_linux), I(install_vnc),
I(install_windows), I(install_plesk), and I(install_cpanel) must be provided.
type: dict
suboptions:
lang:
description:
- The language to use for Windows.
type: str
required: true
install_plesk:
description:
- If this option is provided, a Plesk installation will be activated for the next boot.
- Precisely one of I(regular_boot), I(rescue), I(install_linux), I(install_vnc),
I(install_windows), I(install_plesk), and I(install_cpanel) must be provided.
type: dict
suboptions:
dist:
description:
- The distribution to install.
type: str
required: true
arch:
description:
- The architecture to use for the install.
- Not all architectures are available for all distributions.
- Defaults to C(64).
type: int
choices:
- 32
- 64
lang:
description:
- The language to use for the operating system.
type: str
required: true
hostname:
description:
- The hostname.
type: str
required: true
install_cpanel:
description:
- If this option is provided, a cPanel installation will be activated for the next boot.
- Precisely one of I(regular_boot), I(rescue), I(install_linux), I(install_vnc),
I(install_windows), I(install_plesk), and I(install_cpanel) must be provided.
type: dict
suboptions:
dist:
description:
- The distribution to install.
type: str
required: true
arch:
description:
- The architecture to use for the install.
- Not all architectures are available for all distributions.
- Defaults to C(64).
type: int
choices:
- 32
- 64
lang:
description:
- The language to use for the operating system.
type: str
required: true
hostname:
description:
- The hostname.
type: str
required: true
'''
EXAMPLES = r'''
- name: Disable all special boot configurations
community.hrobot.boot:
hetzner_user: foo
hetzner_password: bar
regular_boot: true
- name: Enable a rescue system (64bit Linux) for the next boot
community.hrobot.reset:
hetzner_user: foo
hetzner_password: bar
rescue:
os: linux
- name: Enable a Linux install for the next boot
community.hrobot.reset:
hetzner_user: foo
hetzner_password: bar
install_linux:
dist: CentOS 5.5 minimal
lang: en
authorized_keys:
- 56:29:99:a4:5d:ed:ac:95:c1:f5:88:82:90:5d:dd:10
- 15:28:b0:03:95:f0:77:b3:10:56:15:6b:77:22:a5:bb
'''
RETURN = r'''
configuration_type:
description:
- Describes the active boot configuration.
returned: success
type: str
choices:
- regular_boot
- rescue
- install_linux
- install_vnc
- install_windows
- install_plesk
- install_cpanel
password:
description:
- The root password for the active boot configuration, if available.
- For non-rescue boot configurations, it is avised to change the root password
as soon as possible.
returned: success and if a boot configuration other than C(regular_boot) is active
type: str
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible_collections.community.hrobot.plugins.module_utils.robot import (
BASE_URL,
ROBOT_DEFAULT_ARGUMENT_SPEC,
fetch_url_json,
)
BOOT_CONFIGURATION_DATA = [
('rescue', 'rescue', {
'os': ('os', 'os'),
'arch': ('arch', 'arch'),
'authorized_keys': ('authorized_key', 'authorized_key'),
}),
('install_linux', 'linux', {
'dist': ('dist', 'dist'),
'arch': ('arch', 'arch'),
'lang': ('lang', 'lang'),
'authorized_keys': ('authorized_key', 'authorized_key'),
}),
('install_vnc', 'vnc', {
'dist': ('dist', 'dist'),
'arch': ('arch', 'arch'),
'lang': ('lang', 'lang'),
}),
('install_windows', 'windows', {
'lang': ('lang', 'lang'),
}),
('install_plesk', 'plesk', {
'dist': ('dist', 'dist'),
'arch': ('arch', 'arch'),
'lang': ('lang', 'lang'),
'hostname': ('hostname', 'hostname'),
}),
('install_cpanel', 'cpanel', {
'dist': ('dist', 'dist'),
'arch': ('arch', 'arch'),
'lang': ('lang', 'lang'),
'hostname': ('hostname', 'hostname'),
}),
]
def main():
argument_spec = dict(
server_number=dict(type='int', required=True),
regular_boot=dict(type='bool', choices=[True]),
rescue=dict(type='dict', options=dict(
os=dict(type='str', required=True),
arch=dict(type='int', choices=[32, 64]),
authorized_keys=dict(type='list', elements='str', no_log=False),
)),
install_linux=dict(type='dict', options=dict(
dist=dict(type='str', required=True),
arch=dict(type='int', choices=[32, 64]),
lang=dict(type='str', required=True),
authorized_keys=dict(type='list', elements='str', no_log=False),
)),
install_vnc=dict(type='dict', options=dict(
dist=dict(type='str', required=True),
arch=dict(type='int', choices=[32, 64]),
lang=dict(type='str', required=True),
)),
install_windows=dict(type='dict', options=dict(
lang=dict(type='str', required=True),
)),
install_plesk=dict(type='dict', options=dict(
dist=dict(type='str', required=True),
arch=dict(type='int', choices=[32, 64]),
lang=dict(type='str', required=True),
hostname=dict(type='str', required=True),
)),
install_cpanel=dict(type='dict', options=dict(
dist=dict(type='str', required=True),
arch=dict(type='int', choices=[32, 64]),
lang=dict(type='str', required=True),
hostname=dict(type='str', required=True),
)),
)
argument_spec.update(ROBOT_DEFAULT_ARGUMENT_SPEC)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[('regular_boot', 'rescue', 'install_linux', 'install_vnc', 'install_windows', 'install_plesk', 'install_cpanel')],
required_one_of=[('regular_boot', 'rescue', 'install_linux', 'install_vnc', 'install_windows', 'install_plesk', 'install_cpanel')],
)
server_number = module.params['server_number']
changed = False
# Retrieve current boot config
url = "{0}/boot/{1}".format(BASE_URL, server_number)
result, error = fetch_url_json(module, url, accept_errors=['SERVER_NOT_FOUND', 'BOOT_NOT_AVAILABLE'])
if error is not None:
if error == 'SERVER_NOT_FOUND':
module.fail_json(msg='This server does not exist, or you do not have access rights for it')
if error == 'BOOT_NOT_AVAILABLE':
module.fail_json(msg='There is no boot configuration available for this server')
raise AssertionError('Unexpected error {0}'.format(error)) # pragma: no cover
# Deactivate current boot configurations that are not requested
for option_name, other_name, dummy in BOOT_CONFIGURATION_DATA:
if (result['boot'].get(other_name) or {}).get('active') and not module.params[option_name]:
changed = True
if not module.check_mode:
url = "{0}/boot/{1}/{2}".format(BASE_URL, server_number, other_name)
fetch_url_json(module, url, method='DELETE', allow_empty_result=True)
# Enable/compare boot configuration
return_values = {
'configuration_type': 'regular_boot',
'password': None,
}
for option_name, other_name, options in BOOT_CONFIGURATION_DATA:
if module.params[option_name]:
return_values['configuration_type'] = option_name
existing = result['boot'].get(other_name) or {}
return_values['password'] = existing.get('password')
data = {}
for option_key, (result_key, data_key) in options.items():
option = module.params[option_name][option_key]
if option is None or option == []:
continue
data[data_key] = option
if existing.get('active'):
# Idempotence check
needs_change = False
for option_key, (result_key, data_key) in options.items():
should = module.params[option_name][option_key]
if should is None:
continue
has = existing.get(data_key)
if isinstance(has, list):
has = sorted(has)
if not isinstance(should, list):
should = [should]
should = sorted(should)
if should != has:
needs_change = True
else:
needs_change = True
if needs_change:
changed = True
if not module.check_mode:
url = "{0}/boot/{1}/{2}".format(BASE_URL, server_number, other_name)
if existing.get('active'):
# Deactivate existing boot configuration
fetch_url_json(module, url, method='DELETE', allow_empty_result=True)
# Enable new boot configuration
headers = {"Content-type": "application/x-www-form-urlencoded"}
result, dummy = fetch_url_json(
module,
url,
data=urlencode(data),
headers=headers,
method='POST',
)
return_values['password'] = (result.get(other_name) or {}).get('password')
else:
return_values['password'] = None
module.exit_json(changed=changed, **return_values)
if __name__ == '__main__': # pragma: no cover
main() # pragma: no cover
|
from setuptools import setup, find_packages
import versioneer
from pathlib import Path
here = Path(__file__).resolve().parent
long_description = here.joinpath("README.md").read_text()
setup(
name="acondbs",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="A GraphQL server for product DB",
long_description=long_description,
long_description_content_type="text/markdown",
author="Simons Observatory",
author_email="so_software@simonsobservatory.org",
url="https://github.com/simonsobs/acondbs",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
packages=find_packages(exclude=["docs", "tests"]),
include_package_data=True,
install_requires=[
"SQLAlchemy>=1.4",
"SQLAlchemy-Utils>=0.37",
"Flask>=2.0",
"Flask-Cors>=3.0",
"Flask-GraphQL>=2.0",
"Flask-Migrate>=3.1",
"Flask-SQLAlchemy>=2.5",
"graphene-sqlalchemy>=2.3",
"graphene-sqlalchemy-filter>=1.10",
"cryptography>=3.2",
"gitpython>=3.1",
"requests>=2.24",
"a2wsgi>=1.4"
],
extras_require={
"tests": [
"pytest>=6.2",
"pytest-cov>=2.12",
'pytest-asyncio>=0.14',
"snapshottest>=0.6",
"async-asgi-testclient>=1.4.6",
]
},
)
|
""" MIT License
https://github.com/robertchase/fsm/blob/master/LICENSE
"""
# pylint: disable=not-callable
from __future__ import absolute_import
from functools import partial
from ergaleia.load_from_path import load_lines_from_path
from ergaleia.un_comment import un_comment
import fsm.actions as fsm_actions
from fsm.fsm_machine import create as create_machine
import fsm.FSM as FSM
class UnexpectedDirective(Exception):
"""Unexpected directive in fsm description file."""
def __init__(self, directive, line):
super(UnexpectedDirective, self).__init__(
"unexpected directive '{}', line={}".format(directive, line)
)
class Parser(object):
"""FSM description file parser."""
def __init__(self):
self.ctx = fsm_actions.Context()
self.fsm = create_machine(
action=partial(fsm_actions.act_action, self.ctx),
context=partial(fsm_actions.act_context, self.ctx),
default=partial(fsm_actions.act_default, self.ctx),
enter=partial(fsm_actions.act_enter, self.ctx),
event=partial(fsm_actions.act_event, self.ctx),
exception=partial(fsm_actions.act_exception, self.ctx),
exit=partial(fsm_actions.act_exit, self.ctx),
handler=partial(fsm_actions.act_handler, self.ctx),
state=partial(fsm_actions.act_state, self.ctx),
)
self.fsm.state = 'init'
def __str__(self):
states = self.states
d = 'from fsm.FSM import STATE, EVENT, FSM\n'
d += '# pylint: skip-file\n'
d += '# flake8: noqa\n'
d += '\n'.join('# ' + a for a in self.actions)
d += '\ndef create(**actions):\n'
d += '\n'.join(self._define(s) for s in states.values())
d += '\n' + '\n'.join(self._set_events(s) for s in states.values())
d += '\n return FSM([' + ','.join('S_' + s for s in states) + '])'
return d
@property
def first_state(self):
"""Return the first state defined in the fsm descriptio file."""
return self.ctx.first_state
@property
def actions(self):
"""Return a list of the action names in sorted order."""
return self.ctx.actions
@property
def states(self):
"""Return a dict of state object by name."""
return self.ctx.states
@property
def events(self):
"""Return a list of the event names."""
return self.ctx.events
@property
def handlers(self):
"""Return a dict of handler callables by name."""
return self.ctx.handlers
@property
def exception(self):
"""Return the exception handler or None.
"""
return self.ctx.exception
@exception.setter
def exception(self, value):
"""Set the exception handler."""
self.ctx.exception = value
@classmethod
def parse(cls, data):
"""Parse an fsm description file.
Arguments:
data --- list, file, filename or filepath
"""
parser = cls()
ctx = parser.ctx
for num, line in enumerate(
un_comment(
load_lines_from_path(data, 'fsm')
),
start=1):
if not line:
continue
line = line.split(' ', 1)
if len(line) == 1:
raise fsm_actions.TooFewTokens(line[0], num)
event, ctx.line = line
ctx.line_num = num
if not parser.fsm.handle(event.lower()):
raise UnexpectedDirective(event, num)
return parser
def compile(self, *args, **kwargs):
"""Bind and build and FSM from a parsed fsm.
Can safely be called multiple times on the same parser without
getting control structures mixed up.
parse() + compile() == load()
"""
handlers = self.ctx.handlers.copy()
self.bind(*args, **kwargs)
fsm = self.build(**self.ctx.handlers)
self.ctx.handlers = handlers
return fsm
@classmethod
def load(cls, path, *args, **kwargs):
"""Parse, bind and build an FSM from an fsm description file.
Arguments:
path -- list, file, filename or filepath
*args -- passed to the context, if specified in description
**kwargs -- passed to the context, if specified in description
Returns:
fsm.FSM.FSM
"""
p = cls.parse(path)
p.bind(*args, **kwargs)
return p.build(**p.ctx.handlers)
def bind(self, *args, **kwargs):
"""Bind the context to the action routines.
If a CONTEXT and HANDLER(s) are defined in the fsm description
file, the CONTEXT is initialized with *args and **kwargs,
and bound to each action routine and the exception routine as the
first argument.
"""
if self.ctx.context:
self.context = self.ctx.context(*args, **kwargs)
for n, h in self.handlers.items():
self.handlers[n] = partial(h, self.context)
if self.exception:
self.exception = partial(self.exception, self.context)
def build(self, **actions):
"""Construct an FSM from a parsed fsm description file.
Keyword arguments:
**actions -- each action routine callable
"""
states = {}
for state in self.states.values():
s = FSM.STATE(
name=state.name,
on_enter=actions[state.enter] if state.enter else None,
on_exit=actions[state.exit] if state.exit else None,
)
states[s.name] = s
for event in state.events.values():
e = FSM.EVENT(
name=event.name,
actions=[actions[n] for n in event.actions],
next_state=event.next_state,
)
s.events[e.name] = e
for state in states.values():
for event in state.events.values():
if event.next_state:
event.next_state = states[event.next_state]
fsm = FSM.FSM(states.values())
fsm.state = self.first_state
fsm.context = self.context
fsm.exception = self.exception
return fsm
@staticmethod
def _define(state):
s = " S_{0}=STATE('{0}'".format(state.name)
if state.enter:
s += ",on_enter=actions['{}']".format(state.enter)
if state.exit:
s += ",on_exit=actions['{}']".format(state.exit)
return s + ')'
@staticmethod
def _set_events(state):
s = " S_{}.set_events([".format(state.name)
for e in state.events.values():
s += "EVENT('{}',[".format(e.name)
s += ','.join("actions['{}']".format(a) for a in e.actions)
s += ']'
if e.next_state:
s += ', S_{}'.format(e.next_state)
s += "),"
s += '])'
return s
if __name__ == '__main__':
import sys
print(Parser.parse(sys.argv[1] if len(sys.argv) > 1 else sys.stdin))
|
# FDC KM 0.1.1
#
'''
1.ustal ilość zmiennych
2.pobierz iloczyny pełne równe 1
3.zbuduj tablicę
4.wypełnij tablicę
5.POGRUPUJ
6.uporządkuj&usuń duplikaty
7.odczytaj & wypisz'''
ST = [0,1,3,2]
TK = []
TT = []; rTT=[]
def Siatka(n): #KK
if(n==8): return (4,4,4,4);
elif(n==7): return (4,4,4,2);
elif(n==6): return (4,4,4,1);
elif(n==5): return (4,4,2,1);
elif(n==4): return (4,4,1,1);
elif(n==3): return (4,2,1,1);
elif(n==2): return (2,2,1,1);
elif(n==1): return (2,1,1,1);
elif(n==0): return (1,1,1,1);
def Wypisz(TT):
for l in range(len(TT)):
for k in range(len(TT[l])):
for j in range(len(TT[l][k])):
for i in range(len(TT[l][k][j])):
print(TT[l][k][j][i][0],end=' ')
print()
print()
print()
def Trans(TK):
global W,X,Y,Z
TT=[]; n=0
for l in range(W):
xyz = []
for k in range(Z):
xy = []
for j in range(Y):
xx = []
for i in range(X):
xx.append([TK[n][0],TK[n][5],TK[n][6]])
n+=1
if(i==3): xx[3],xx[2]=xx[2],xx[3];
xy.append(xx)
if(j==3): xy[3],xy[2]=xy[2],xy[3];
xyz.append(xy)
if(k==3): xyz[3],xyz[2]=xyz[2],xyz[3];
TT.append(xyz)
if(l==3): TT[3],TT[2]=TT[2],TT[3];
return TT;
def Trans_Rev(TK):
global W,X,Y,Z
TT=[]; n=0
for i in range(X):
wyz = []
for j in range(Y):
wz = []
for k in range(Z):
ww = []
for l in range(W):
e = []
ww.append(e)
wz.append(ww)
wyz.append(wz)
TT.append(wyz)
'''for i in range(X):
xyz = []
for j in range(Y):
xy = []
for k in range(Z):
xx = []
for l in range(W):
xx.append([TK[n][0],TK[n][5],TK[n][6]])
n+=1
if(l==3): xx[3],xx[2]=xx[2],xx[3];
xy.append(xx)
if(k==3): xy[3],xy[2]=xy[2],xy[3];
xyz.append(xy)
if(j==3): xyz[3],xyz[2]=xyz[2],xyz[3];
TT.append(xyz)
if(i==3): TT[3],TT[2]=TT[2],TT[3];
return TT;'''
for i in range(X):
for j in range(Y):
for k in range(Z):
for l in range(W):
TT[i][j][k][l] = TK[l][k][j][i]
return TT;
def Near(kom,TK=TK,TT=TT):
global W,X,Y,Z
print(TK[kom])
tx = TK[kom][1]; ty = TK[kom][2]
tz = TK[kom][3]; tw = TK[kom][4]
print(TT[tw][tz][ty][tx-1]); print(TT[tw][tz][ty][(tx+1)%X])
print(TT[tw][tz][ty-1][tx]); print(TT[tw][tz][(ty+1)%Y][tx])
print(TT[tw][tz-1][ty][tx]); print(TT[tw][(tz+1)%Z][ty][tx])
print(TT[tw-1][tz][ty][tx]); print(TT[(tw+1)%W][tz][ty][tx])
def Lista(TK,TT):
global X,Y,Z,W;
lista = []
for i in range(len(TK)):
x=TK[i][1]
y=TK[i][2]
z=TK[i][3]
w=TK[i][4]
if(TT[x][y][z][w][0]!=0):
#1D
pillz_x = 0; pillz_y = 0;
pillz_z = 0; pillz_w = 0;
kij_x = 0; kij_y = 0;
kij_z = 0; kij_w = 0;
#2D
kafel_xy= 0; kafel_xz= 0; kafel_xw= 0;
kafel_yz= 0; kafel_yw= 0;
kafel_zw= 0;
lufa_xy = 0; lufa_xw = 0; lufa_xz = 0;
lufa_yx = 0; lufa_yz = 0; lufa_yw = 0;
lufa_zx = 0; lufa_zy = 0; lufa_zw = 0;
lufa_wy = 0; lufa_wx = 0; lufa_wz = 0;
pol_xy = 0; pol_xw = 0; pol_xz = 0;
pol_yw = 0; pol_yz = 0;
pol_zw = 0;
#3D
kost_xyz = 0; kost_xzw = 0;
kost_yzw = 0; kost_xyw = 0;
dział_xyz= 0; dział_xzw= 0; dział_xyw= 0;
dział_yxz= 0; dział_yxw= 0; dział_yzw= 0;
dział_zxy= 0; dział_zyw= 0; dział_zxw= 0;
dział_wxy= 0; dział_wyz= 0; dział_wxz= 0;
kana_xyz = 0; kana_xyw = 0;
kana_xzy = 0; kana_xzw = 0;
kana_xwz = 0; kana_xwy = 0;
kana_yzx = 0; kana_yzw = 0;
kana_ywx = 0; kana_ywz = 0;
kana_zwx = 0; kana_zwy = 0;
szsc_xyz = 0; szsc_xzw = 0;
szsc_xyw = 0; szsc_yzw = 0;
#4D
plazm_x = 0; plazm_y = 0;
plazm_z = 0; plazm_w = 0;
burg_xy = 0; burg_xw = 0; burg_xz = 0;
burg_yw = 0; burg_yz = 0;
burg_zw = 0;
taxi_xyz= 0; taxi_yzw= 0;
taxi_xzw= 0; taxi_xyw= 0;
m_tese = 0; tesserakt= 0;
#sprawdzanie pastylek i kijków x4
if(TT[(x+1)%X][y][z][w][0]!=0):
pillz_x = 1
if(TT[(x+2)%X][y][z][w][0]!=0 and
TT[(x+3)%X][y][z][w][0]!=0): kij_x=1
if(TT[x][(y+1)%Y][z][w][0]!=0):
pillz_y = 1
if(TT[x][(y+2)%Y][z][w][0]!=0 and
TT[x][(y+3)%Y][z][w][0]!=0): kij_y=1
if(TT[x][y][(z+1)%Z][w][0]!=0):
pillz_z = 1
if(TT[x][y][(z+2)%Z][w][0]!=0 and
TT[x][y][(z+3)%Z][w][0]!=0): kij_z=1
if(TT[x][y][z][(w+1)%W][0]!=0):
pillz_w = 1
if(TT[x][y][z][(w+2)%W][0]!=0 and
TT[x][y][z][(w+3)%W][0]!=0): kij_w=1
#sprawdzanie kafelek x6
if(pillz_x and pillz_y):
if(TT[(x+1)%X][(y+1)%Y][z][w][0]!=0):
kafel_xy = 1
if(pillz_x and pillz_z):
if(TT[(x+1)%X][y][(z+1)%Z][w][0]!=0):
kafel_xz = 1
if(pillz_x and pillz_w):
if(TT[(x+1)%X][y][z][(w+1)%W][0]!=0):
kafel_xw = 1
if(pillz_y and pillz_z):
if(TT[x][(y+1)%Y][(z+1)%Z][w][0]!=0):
kafel_yz = 1
if(pillz_y and pillz_w):
if(TT[x][(y+1)%Y][z][(w+1)%W][0]!=0):
kafel_yw = 1
if(pillz_z and pillz_w):
if(TT[x][y][(z+1)%Z][(w+1)%W][0]!=0):
kafel_zw = 1
#sprawdzanie kostek x4
if(kafel_xy and kafel_yz and kafel_xz):
if(TT[(x+1)%X][(y+1)%Y][(z+1)%Z][w][0]!=0):
kost_xyz = 1
if(kafel_xy and kafel_yw and kafel_xw):
if(TT[(x+1)%X][(y+1)%Y][z][(w+1)%W][0]!=0):
kost_xyw = 1
if(kafel_xw and kafel_zw and kafel_xz):
if(TT[(x+1)%X][y][(z+1)%Z][(w+1)%W][0]!=0):
kost_xzw = 1
if(kafel_yw and kafel_yz and kafel_zw):
if(TT[x][(y+1)%Y][(z+1)%Z][(w+1)%W][0]!=0):
kost_yzw = 1
#sprawdzanie małego teseraktu x1
if(kost_xyz and kost_xyw and kost_xzw and kost_yzw):
if(TT[(x+1)%X][(y+1)%Y][(z+1)%Z][(w+1)%W][0]!=0):
m_tese=1
#sprawdzanie luf x3x4
if(kafel_xy and kij_x):
if(TT[(x+2)%X][(y+1)%Y][z][w][0]!=0 and
TT[(x+3)%X][(y+1)%Y][z][w][0]!=0): lufa_xy=1
if(kafel_xz and kij_x):
if(TT[(x+2)%X][y][(z+1)%Z][w][0]!=0 and
TT[(x+3)%X][y][(z+1)%Z][w][0]!=0): lufa_xz=1
if(kafel_xw and kij_x):
if(TT[(x+2)%X][y][z][(w+1)%W][0]!=0 and
TT[(x+3)%X][y][z][(w+1)%W][0]!=0): lufa_xw=1
if(kafel_xy and kij_y):
if(TT[(x+1)%X][(y+2)%Y][z][w][0]!=0 and
TT[(x+1)%X][(y+3)%Y][z][w][0]!=0): lufa_yx=1
if(kafel_yz and kij_y):
if(TT[x][(y+2)%Y][(z+1)%Z][(w+0)%W][0]!=0 and
TT[x][(y+3)%Y][(z+1)%Z][(w+0)%W][0]!=0): lufa_yz=1
if(kafel_yw and kij_y):
if(TT[x][(y+2)%Y][(z+0)%Z][(w+1)%W][0]!=0 and
TT[x][(y+3)%Y][(z+0)%Z][(w+1)%W][0]!=0): lufa_yw=1
if(kafel_xz and kij_z):
if(TT[(x+1)%X][(y+0)%Y][(z+2)%Z][(w+0)%W][0]!=0 and
TT[(x+1)%X][(y+0)%Y][(z+3)%Z][(w+0)%W][0]!=0): lufa_zx=1
if(kafel_yz and kij_z):
if(TT[x][(y+1)%Y][(z+2)%Z][(w+0)%W][0]!=0 and
TT[x][(y+1)%Y][(z+3)%Z][(w+0)%W][0]!=0): lufa_zy=1
if(kafel_zw and kij_z):
if(TT[x][(y+0)%Y][(z+2)%Z][(w+1)%W][0]!=0 and
TT[x][(y+0)%Y][(z+3)%Z][(w+1)%W][0]!=0): lufa_zw=1
if(kafel_xw and kij_w):
if(TT[(x+1)%X][(y+0)%Y][(z+0)%Z][(w+2)%W][0]!=0 and
TT[(x+1)%X][(y+0)%Y][(z+0)%Z][(w+3)%W][0]!=0): lufa_wx=1
if(kafel_yw and kij_w):
if(TT[x][(y+1)%Y][(z+0)%Z][(w+2)%W][0]!=0 and
TT[x][(y+1)%Y][(z+0)%Z][(w+3)%W][0]!=0): lufa_wy=1
if(kafel_zw and kij_w):
if(TT[x][(y+0)%Y][(z+1)%Z][(w+2)%W][0]!=0 and
TT[x][(y+0)%Y][(z+1)%Z][(w+3)%W][0]!=0): lufa_wz=1
#sprawdzanie dział x3x4
if(kost_xyz and lufa_xy and lufa_xz and
TT[(x+2)%X][(y+1)%Y][(z+1)%Z][(w+0)%W][0]!=0 and
TT[(x+3)%X][(y+1)%Y][(z+1)%Z][(w+0)%W][0]!=0): dział_xyz=1
if(kost_xyz and lufa_yx and lufa_yz and
TT[(x+1)%X][(y+2)%Y][(z+1)%Z][(w+0)%W][0]!=0 and
TT[(x+1)%X][(y+3)%Y][(z+1)%Z][(w+0)%W][0]!=0): dział_yxz=1
if(kost_xyz and lufa_zx and lufa_zy and
TT[(x+1)%X][(y+1)%Y][(z+2)%Z][(w+0)%W][0]!=0 and
TT[(x+1)%X][(y+1)%Y][(z+3)%Z][(w+0)%W][0]!=0): dział_zxy=1
if(kost_yzw and lufa_wy and lufa_wz and
TT[(x+0)%X][(y+1)%Y][(z+1)%Z][(w+2)%W][0]!=0 and
TT[(x+0)%X][(y+1)%Y][(z+1)%Z][(w+3)%W][0]!=0): dział_wyz=1
if(kost_yzw and lufa_yw and lufa_yz and
TT[(x+0)%X][(y+2)%Y][(z+1)%Z][(w+1)%W][0]!=0 and
TT[(x+0)%X][(y+3)%Y][(z+1)%Z][(w+1)%W][0]!=0): dział_yzw=1
if(kost_yzw and lufa_zw and lufa_zy and
TT[(x+0)%X][(y+1)%Y][(z+2)%Z][(w+1)%W][0]!=0 and
TT[(x+0)%X][(y+1)%Y][(z+3)%Z][(w+1)%W][0]!=0): dział_zyw=1
if(kost_xzw and lufa_xw and lufa_xz and
TT[(x+2)%X][(y+0)%Y][(z+1)%Z][(w+1)%W][0]!=0 and
TT[(x+3)%X][(y+0)%Y][(z+1)%Z][(w+1)%W][0]!=0): dział_xzw=1
if(kost_xzw and lufa_wx and lufa_wz and
TT[(x+1)%X][(y+0)%Y][(z+1)%Z][(w+2)%W][0]!=0 and
TT[(x+1)%X][(y+0)%Y][(z+1)%Z][(w+3)%W][0]!=0): dział_wxz=1
if(kost_xzw and lufa_zx and lufa_zw and
TT[(x+1)%X][(y+0)%Y][(z+2)%Z][(w+1)%W][0]!=0 and
TT[(x+1)%X][(y+0)%Y][(z+3)%Z][(w+1)%W][0]!=0): dział_zxw=1
if(kost_xyw and lufa_xy and lufa_xw and
TT[(x+2)%X][(y+1)%Y][(z+0)%Z][(w+1)%W][0]!=0 and
TT[(x+3)%X][(y+1)%Y][(z+0)%Z][(w+1)%W][0]!=0): dział_xyw=1
if(kost_xyw and lufa_yx and lufa_yw and
TT[(x+1)%X][(y+2)%Y][(z+0)%Z][(w+1)%W][0]!=0 and
TT[(x+1)%X][(y+3)%Y][(z+0)%Z][(w+1)%W][0]!=0): dział_yxw=1
if(kost_xyw and lufa_wx and lufa_wy and
TT[(x+1)%X][(y+1)%Y][(z+0)%Z][(w+2)%W][0]!=0 and
TT[(x+1)%X][(y+1)%Y][(z+0)%Z][(w+3)%W][0]!=0): dział_wxy=1
#sprawdzanie dział plazmowych x4
if(m_tese):
if(dział_xyz and dział_xzw and dział_xyw):
if(TT[(x+2)%X][(y+1)%Y][(z+1)%Z][(w+1)%W][0]!=0 and
TT[(x+3)%X][(y+1)%Y][(z+1)%Z][(w+1)%W][0]!=0): plazm_x=1
if(dział_yxz and dział_yzw and dział_yxw):
if(TT[(x+1)%X][(y+2)%Y][(z+1)%Z][(w+1)%W][0]!=0 and
TT[(x+1)%X][(y+3)%Y][(z+1)%Z][(w+1)%W][0]!=0): plazm_y=1
if(dział_zxy and dział_zxw and dział_zyw):
if(TT[(x+1)%X][(y+1)%Y][(z+2)%Z][(w+1)%W][0]!=0 and
TT[(x+1)%X][(y+1)%Y][(z+3)%Z][(w+1)%W][0]!=0): plazm_z=1
if(dział_wyz and dział_wxz and dział_wxy):
if(TT[(x+1)%X][(y+1)%Y][(z+1)%Z][(w+2)%W][0]!=0 and
TT[(x+1)%X][(y+1)%Y][(z+1)%Z][(w+3)%W][0]!=0): plazm_w=1
#sprawdzanie płaszczyzn (pól) x6
if(lufa_xy and lufa_yx):
if(TT[(x+2)%X][(y+2)%Y][(z+0)%Z][(w+0)%W][0]!=0 and
TT[(x+3)%X][(y+2)%Y][(z+0)%Z][(w+0)%W][0]!=0 and
TT[(x+2)%X][(y+3)%Y][(z+0)%Z][(w+0)%W][0]!=0 and
TT[(x+3)%X][(y+3)%Y][(z+0)%Z][(w+0)%W][0]!=0): pol_xy=1
if(lufa_xz and lufa_zx):
if(TT[(x+2)%X][(y+0)%Y][(z+2)%Z][(w+0)%W][0]!=0 and
TT[(x+3)%X][(y+0)%Y][(z+2)%Z][(w+0)%W][0]!=0 and
TT[(x+2)%X][(y+0)%Y][(z+3)%Z][(w+0)%W][0]!=0 and
TT[(x+3)%X][(y+0)%Y][(z+3)%Z][(w+0)%W][0]!=0): pol_xz=1
if(lufa_xw and lufa_wx):
if(TT[(x+2)%X][(y+0)%Y][(z+0)%Z][(w+2)%W][0]!=0 and
TT[(x+3)%X][(y+0)%Y][(z+0)%Z][(w+2)%W][0]!=0 and
TT[(x+2)%X][(y+0)%Y][(z+0)%Z][(w+3)%W][0]!=0 and
TT[(x+3)%X][(y+0)%Y][(z+0)%Z][(w+3)%W][0]!=0): pol_xw=1
if(lufa_zy and lufa_yz):
if(TT[(x+0)%X][(y+2)%Y][(z+2)%Z][(w+0)%W][0]!=0 and
TT[(x+0)%X][(y+2)%Y][(z+3)%Z][(w+0)%W][0]!=0 and
TT[(x+0)%X][(y+3)%Y][(z+2)%Z][(w+0)%W][0]!=0 and
TT[(x+0)%X][(y+3)%Y][(z+3)%Z][(w+0)%W][0]!=0): pol_yz=1
if(lufa_wy and lufa_yw):
if(TT[(x+0)%X][(y+2)%Y][(z+0)%Z][(w+2)%W][0]!=0 and
TT[(x+0)%X][(y+2)%Y][(z+0)%Z][(w+3)%W][0]!=0 and
TT[(x+0)%X][(y+3)%Y][(z+0)%Z][(w+2)%W][0]!=0 and
TT[(x+0)%X][(y+3)%Y][(z+0)%Z][(w+3)%W][0]!=0): pol_yw=1
if(lufa_wz and lufa_zw):
if(TT[(x+0)%X][(y+0)%Y][(z+2)%Z][(w+2)%W][0]!=0 and
TT[(x+0)%X][(y+0)%Y][(z+3)%Z][(w+2)%W][0]!=0 and
TT[(x+0)%X][(y+0)%Y][(z+2)%Z][(w+3)%W][0]!=0 and
TT[(x+0)%X][(y+0)%Y][(z+3)%Z][(w+3)%W][0]!=0): pol_zw=1
#sprawdzanie kanapek x2x6
if(pol_xy):
if(dział_xyz and dział_yxz and
TT[(x+2)%X][(y+2)%Y][(z+1)%Z][(w+0)%W][0]!=0 and
TT[(x+3)%X][(y+2)%Y][(z+1)%Z][(w+0)%W][0]!=0 and
TT[(x+2)%X][(y+3)%Y][(z+1)%Z][(w+0)%W][0]!=0 and
TT[(x+3)%X][(y+3)%Y][(z+1)%Z][(w+0)%W][0]!=0): kana_xyz=1
if(dział_xyw and dział_yxw and
TT[(x+2)%X][(y+2)%Y][(z+0)%Z][(w+1)%W][0]!=0 and
TT[(x+3)%X][(y+2)%Y][(z+0)%Z][(w+1)%W][0]!=0 and
TT[(x+2)%X][(y+3)%Y][(z+0)%Z][(w+1)%W][0]!=0 and
TT[(x+3)%X][(y+3)%Y][(z+0)%Z][(w+1)%W][0]!=0): kana_xyw=1
if(pol_xz):
if(dział_xyz and dział_zxy and
TT[(x+2)%X][(y+1)%Y][(z+2)%Z][(w+0)%W][0]!=0 and
TT[(x+3)%X][(y+1)%Y][(z+2)%Z][(w+0)%W][0]!=0 and
TT[(x+2)%X][(y+1)%Y][(z+3)%Z][(w+0)%W][0]!=0 and
TT[(x+3)%X][(y+1)%Y][(z+3)%Z][(w+0)%W][0]!=0): kana_xzy=1
if(dział_xzw and dział_zxw and
TT[(x+2)%X][(y+0)%Y][(z+2)%Z][(w+1)%W][0]!=0 and
TT[(x+3)%X][(y+0)%Y][(z+2)%Z][(w+1)%W][0]!=0 and
TT[(x+2)%X][(y+0)%Y][(z+3)%Z][(w+1)%W][0]!=0 and
TT[(x+3)%X][(y+0)%Y][(z+3)%Z][(w+1)%W][0]!=0): kana_xzw=1
if(pol_xw):
if(dział_xyw and dział_wxy and
TT[(x+2)%X][(y+1)%Y][(z+0)%Z][(w+2)%W][0]!=0 and
TT[(x+3)%X][(y+1)%Y][(z+0)%Z][(w+2)%W][0]!=0 and
TT[(x+2)%X][(y+1)%Y][(z+0)%Z][(w+3)%W][0]!=0 and
TT[(x+3)%X][(y+1)%Y][(z+0)%Z][(w+3)%W][0]!=0): kana_xwy=1
if(dział_xzw and dział_wxz and
TT[(x+2)%X][(y+0)%Y][(z+1)%Z][(w+2)%W][0]!=0 and
TT[(x+3)%X][(y+0)%Y][(z+1)%Z][(w+2)%W][0]!=0 and
TT[(x+2)%X][(y+0)%Y][(z+1)%Z][(w+3)%W][0]!=0 and
TT[(x+3)%X][(y+0)%Y][(z+1)%Z][(w+3)%W][0]!=0): kana_xwz=1
if(pol_yz):
if(dział_yxz and dział_zxy and
TT[(x+1)%X][(y+2)%Y][(z+2)%Z][(w+0)%W][0]!=0 and
TT[(x+1)%X][(y+3)%Y][(z+2)%Z][(w+0)%W][0]!=0 and
TT[(x+1)%X][(y+2)%Y][(z+3)%Z][(w+0)%W][0]!=0 and
TT[(x+1)%X][(y+3)%Y][(z+3)%Z][(w+0)%W][0]!=0): kana_yzx=1
if(dział_yzw and dział_zyw and
TT[(x+0)%X][(y+2)%Y][(z+2)%Z][(w+1)%W][0]!=0 and
TT[(x+0)%X][(y+3)%Y][(z+2)%Z][(w+1)%W][0]!=0 and
TT[(x+0)%X][(y+2)%Y][(z+3)%Z][(w+1)%W][0]!=0 and
TT[(x+0)%X][(y+3)%Y][(z+3)%Z][(w+1)%W][0]!=0): kana_yzw=1
if(pol_yw):
if(dział_yxw and dział_wxy and
TT[(x+1)%X][(y+2)%Y][(z+0)%Z][(w+2)%W][0]!=0 and
TT[(x+1)%X][(y+3)%Y][(z+0)%Z][(w+2)%W][0]!=0 and
TT[(x+1)%X][(y+2)%Y][(z+0)%Z][(w+3)%W][0]!=0 and
TT[(x+1)%X][(y+3)%Y][(z+0)%Z][(w+3)%W][0]!=0): kana_ywx=1
if(dział_yzw and dział_wyz and
TT[(x+0)%X][(y+2)%Y][(z+1)%Z][(w+2)%W][0]!=0 and
TT[(x+0)%X][(y+3)%Y][(z+1)%Z][(w+2)%W][0]!=0 and
TT[(x+0)%X][(y+2)%Y][(z+1)%Z][(w+3)%W][0]!=0 and
TT[(x+0)%X][(y+3)%Y][(z+1)%Z][(w+3)%W][0]!=0): kana_ywz=1
if(pol_zw):
if(dział_wxz and dział_zxw and
TT[(x+1)%X][(y+0)%Y][(z+2)%Z][(w+2)%W][0]!=0 and
TT[(x+1)%X][(y+0)%Y][(z+2)%Z][(w+3)%W][0]!=0 and
TT[(x+1)%X][(y+0)%Y][(z+3)%Z][(w+2)%W][0]!=0 and
TT[(x+1)%X][(y+0)%Y][(z+3)%Z][(w+3)%W][0]!=0): kana_zwx=1
if(dział_wyz and dział_zyw and
TT[(x+0)%X][(y+1)%Y][(z+2)%Z][(w+2)%W][0]!=0 and
TT[(x+0)%X][(y+1)%Y][(z+2)%Z][(w+3)%W][0]!=0 and
TT[(x+0)%X][(y+1)%Y][(z+3)%Z][(w+2)%W][0]!=0 and
TT[(x+0)%X][(y+1)%Y][(z+3)%Z][(w+3)%W][0]!=0): kana_zwy=1
#sprawdzanie burgerów x6
if(kana_xyz and kana_xyw and plazm_x and plazm_y and
TT[(x+2)%X][(y+2)%Y][(z+1)%Z][(w+1)%W][0]!=0 and
TT[(x+3)%X][(y+2)%Y][(z+1)%Z][(w+1)%W][0]!=0 and
TT[(x+2)%X][(y+3)%Y][(z+1)%Z][(w+1)%W][0]!=0 and
TT[(x+3)%X][(y+3)%Y][(z+1)%Z][(w+1)%W][0]!=0): burg_xy=1
if(kana_xzy and kana_xzw and plazm_x and plazm_z and
TT[(x+2)%X][(y+1)%Y][(z+2)%Z][(w+1)%W][0]!=0 and
TT[(x+3)%X][(y+1)%Y][(z+2)%Z][(w+1)%W][0]!=0 and
TT[(x+2)%X][(y+1)%Y][(z+3)%Z][(w+1)%W][0]!=0 and
TT[(x+3)%X][(y+1)%Y][(z+3)%Z][(w+1)%W][0]!=0): burg_xz=1
if(kana_xwz and kana_xwy and plazm_x and plazm_w and
TT[(x+2)%X][(y+1)%Y][(z+1)%Z][(w+2)%W][0]!=0 and
TT[(x+3)%X][(y+1)%Y][(z+1)%Z][(w+2)%W][0]!=0 and
TT[(x+2)%X][(y+1)%Y][(z+1)%Z][(w+3)%W][0]!=0 and
TT[(x+3)%X][(y+1)%Y][(z+1)%Z][(w+3)%W][0]!=0): burg_xw=1
if(kana_yzx and kana_yzw and plazm_z and plazm_y and
TT[(x+1)%X][(y+2)%Y][(z+2)%Z][(w+1)%W][0]!=0 and
TT[(x+1)%X][(y+2)%Y][(z+3)%Z][(w+1)%W][0]!=0 and
TT[(x+1)%X][(y+3)%Y][(z+2)%Z][(w+1)%W][0]!=0 and
TT[(x+1)%X][(y+3)%Y][(z+3)%Z][(w+1)%W][0]!=0): burg_yz=1
if(kana_ywz and kana_ywx and plazm_w and plazm_y and
TT[(x+1)%X][(y+2)%Y][(z+1)%Z][(w+2)%W][0]!=0 and
TT[(x+1)%X][(y+2)%Y][(z+1)%Z][(w+3)%W][0]!=0 and
TT[(x+1)%X][(y+3)%Y][(z+1)%Z][(w+2)%W][0]!=0 and
TT[(x+1)%X][(y+3)%Y][(z+1)%Z][(w+3)%W][0]!=0): burg_yw=1
if(kana_zwx and kana_zwy and plazm_z and plazm_w and
TT[(x+1)%X][(y+1)%Y][(z+2)%Z][(w+2)%W][0]!=0 and
TT[(x+1)%X][(y+1)%Y][(z+2)%Z][(w+3)%W][0]!=0 and
TT[(x+1)%X][(y+1)%Y][(z+3)%Z][(w+2)%W][0]!=0 and
TT[(x+1)%X][(y+1)%Y][(z+3)%Z][(w+3)%W][0]!=0): burg_zw=1
#sprawdzanie sześcianów x4 i wisiorków x4
if(kana_xyz and kana_yzx and kana_xzy and
TT[(x+2)%X][(y+2)%Y][(z+2)%Z][(w+0)%W][0]!=0 and
TT[(x+3)%X][(y+2)%Y][(z+2)%Z][(w+0)%W][0]!=0 and
TT[(x+2)%X][(y+3)%Y][(z+2)%Z][(w+0)%W][0]!=0 and
TT[(x+3)%X][(y+3)%Y][(z+2)%Z][(w+0)%W][0]!=0 and
TT[(x+2)%X][(y+2)%Y][(z+3)%Z][(w+0)%W][0]!=0 and
TT[(x+3)%X][(y+2)%Y][(z+3)%Z][(w+0)%W][0]!=0 and
TT[(x+2)%X][(y+3)%Y][(z+3)%Z][(w+0)%W][0]!=0 and
TT[(x+3)%X][(y+3)%Y][(z+3)%Z][(w+0)%W][0]!=0): szsc_xyz=1
if(burg_xy and burg_yz and burg_xz and szsc_xyz and
TT[(x+2)%X][(y+2)%Y][(z+2)%Z][(w+1)%W][0]!=0 and
TT[(x+3)%X][(y+2)%Y][(z+2)%Z][(w+1)%W][0]!=0 and
TT[(x+2)%X][(y+3)%Y][(z+2)%Z][(w+1)%W][0]!=0 and
TT[(x+3)%X][(y+3)%Y][(z+2)%Z][(w+1)%W][0]!=0 and
TT[(x+2)%X][(y+2)%Y][(z+3)%Z][(w+1)%W][0]!=0 and
TT[(x+3)%X][(y+2)%Y][(z+3)%Z][(w+1)%W][0]!=0 and
TT[(x+2)%X][(y+3)%Y][(z+3)%Z][(w+1)%W][0]!=0 and
TT[(x+3)%X][(y+3)%Y][(z+3)%Z][(w+1)%W][0]!=0): taxi_xyz=1
if(kana_xyw and kana_ywx and kana_xwy and
TT[(x+2)%X][(y+2)%Y][(z+0)%Z][(w+2)%W][0]!=0 and
TT[(x+3)%X][(y+2)%Y][(z+0)%Z][(w+2)%W][0]!=0 and
TT[(x+2)%X][(y+3)%Y][(z+0)%Z][(w+2)%W][0]!=0 and
TT[(x+3)%X][(y+3)%Y][(z+0)%Z][(w+2)%W][0]!=0 and
TT[(x+2)%X][(y+2)%Y][(z+0)%Z][(w+3)%W][0]!=0 and
TT[(x+3)%X][(y+2)%Y][(z+0)%Z][(w+3)%W][0]!=0 and
TT[(x+2)%X][(y+3)%Y][(z+0)%Z][(w+3)%W][0]!=0 and
TT[(x+3)%X][(y+3)%Y][(z+0)%Z][(w+3)%W][0]!=0): szsc_xyw=1
if(burg_xy and burg_yw and burg_xw and szsc_xyw and
TT[(x+2)%X][(y+2)%Y][(z+1)%Z][(w+2)%W][0]!=0 and
TT[(x+3)%X][(y+2)%Y][(z+1)%Z][(w+2)%W][0]!=0 and
TT[(x+2)%X][(y+3)%Y][(z+1)%Z][(w+2)%W][0]!=0 and
TT[(x+3)%X][(y+3)%Y][(z+1)%Z][(w+2)%W][0]!=0 and
TT[(x+2)%X][(y+2)%Y][(z+1)%Z][(w+3)%W][0]!=0 and
TT[(x+3)%X][(y+2)%Y][(z+1)%Z][(w+3)%W][0]!=0 and
TT[(x+2)%X][(y+3)%Y][(z+1)%Z][(w+3)%W][0]!=0 and
TT[(x+3)%X][(y+3)%Y][(z+1)%Z][(w+3)%W][0]!=0): taxi_xyw=1
if(kana_xzw and kana_zwx and kana_xwz and
TT[(x+2)%X][(y+0)%Y][(z+2)%Z][(w+2)%W][0]!=0 and
TT[(x+3)%X][(y+0)%Y][(z+2)%Z][(w+2)%W][0]!=0 and
TT[(x+2)%X][(y+0)%Y][(z+3)%Z][(w+2)%W][0]!=0 and
TT[(x+3)%X][(y+0)%Y][(z+3)%Z][(w+2)%W][0]!=0 and
TT[(x+2)%X][(y+0)%Y][(z+2)%Z][(w+3)%W][0]!=0 and
TT[(x+3)%X][(y+0)%Y][(z+2)%Z][(w+3)%W][0]!=0 and
TT[(x+2)%X][(y+0)%Y][(z+3)%Z][(w+3)%W][0]!=0 and
TT[(x+3)%X][(y+0)%Y][(z+3)%Z][(w+3)%W][0]!=0): szsc_xzw=1
if(burg_xz and burg_zw and burg_xw and szsc_xzw and
TT[(x+2)%X][(y+1)%Y][(z+2)%Z][(w+2)%W][0]!=0 and
TT[(x+3)%X][(y+1)%Y][(z+2)%Z][(w+2)%W][0]!=0 and
TT[(x+2)%X][(y+1)%Y][(z+3)%Z][(w+2)%W][0]!=0 and
TT[(x+3)%X][(y+1)%Y][(z+3)%Z][(w+2)%W][0]!=0 and
TT[(x+2)%X][(y+1)%Y][(z+2)%Z][(w+3)%W][0]!=0 and
TT[(x+3)%X][(y+1)%Y][(z+2)%Z][(w+3)%W][0]!=0 and
TT[(x+2)%X][(y+1)%Y][(z+3)%Z][(w+3)%W][0]!=0 and
TT[(x+3)%X][(y+1)%Y][(z+3)%Z][(w+3)%W][0]!=0): taxi_xzw=1
if(kana_yzw and kana_zwy and kana_ywz and
TT[(x+0)%X][(y+2)%Y][(z+2)%Z][(w+2)%W][0]!=0 and
TT[(x+0)%X][(y+3)%Y][(z+2)%Z][(w+2)%W][0]!=0 and
TT[(x+0)%X][(y+2)%Y][(z+3)%Z][(w+2)%W][0]!=0 and
TT[(x+0)%X][(y+3)%Y][(z+3)%Z][(w+2)%W][0]!=0 and
TT[(x+0)%X][(y+2)%Y][(z+2)%Z][(w+3)%W][0]!=0 and
TT[(x+0)%X][(y+3)%Y][(z+2)%Z][(w+3)%W][0]!=0 and
TT[(x+0)%X][(y+2)%Y][(z+3)%Z][(w+3)%W][0]!=0 and
TT[(x+0)%X][(y+3)%Y][(z+3)%Z][(w+3)%W][0]!=0): szsc_yzw=1
if(burg_yz and burg_zw and burg_yw and szsc_yzw and
TT[(x+1)%X][(y+2)%Y][(z+2)%Z][(w+2)%W][0]!=0 and
TT[(x+1)%X][(y+3)%Y][(z+2)%Z][(w+2)%W][0]!=0 and
TT[(x+1)%X][(y+2)%Y][(z+3)%Z][(w+2)%W][0]!=0 and
TT[(x+1)%X][(y+3)%Y][(z+3)%Z][(w+2)%W][0]!=0 and
TT[(x+1)%X][(y+2)%Y][(z+2)%Z][(w+3)%W][0]!=0 and
TT[(x+1)%X][(y+3)%Y][(z+2)%Z][(w+3)%W][0]!=0 and
TT[(x+1)%X][(y+2)%Y][(z+3)%Z][(w+3)%W][0]!=0 and
TT[(x+1)%X][(y+3)%Y][(z+3)%Z][(w+3)%W][0]!=0): taxi_yzw=1
#sprawdzanie dużego tesseraktu
if(taxi_xyz and taxi_yzw and taxi_xzw and taxi_xyw):
tesserakt = 1
for i in range(len(TK)):
if(TK[i][0]==0):
tesserakt=0
break;
if(tesserakt==0):
if(taxi_xyz): #taxi
grupa=grupuj(4,4,4,2,x,y,z,w)
lista.append(grupa)
else:
if(szsc_xyz): #szsc
grupa=grupuj(4,4,4,1,x,y,z,w)
lista.append(grupa)
if(taxi_xyw):
grupa=grupuj(4,4,2,4,x,y,z,w)
lista.append(grupa)
else:
if(szsc_xyw):
grupa=grupuj(4,4,1,4,x,y,z,w)
lista.append(grupa)
if(taxi_xzw):
grupa=grupuj(4,2,4,4,x,y,z,w)
lista.append(grupa)
else:
if(szsc_xzw):
grupa=grupuj(4,1,4,4,x,y,z,w)
lista.append(grupa)
if(taxi_yzw):
grupa=grupuj(2,4,4,4,x,y,z,w)
lista.append(grupa)
else:
if(szsc_yzw):
grupa=grupuj(1,4,4,4,x,y,z,w)
lista.append(grupa)
if(burg_xy): #burg
grupa=grupuj(4,4,2,2,x,y,z,w)
lista.append(grupa)
else:
if(kana_xyz): #kana
grupa=grupuj(4,4,2,1,x,y,z,w)
lista.append(grupa)
if(kana_xyw):
grupa=grupuj(4,4,1,2,x,y,z,w)
lista.append(grupa)
elif(pol_xy): #pol
grupa=grupuj(4,4,1,1,x,y,z,w)
lista.append(grupa)
if(burg_xz):
grupa=grupuj(4,2,4,2,x,y,z,w)
lista.append(grupa)
else:
if(kana_xzy):
grupa=grupuj(4,2,4,1,x,y,z,w)
lista.append(grupa)
if(kana_xzw):
grupa=grupuj(4,1,4,2,x,y,z,w)
lista.append(grupa)
elif(pol_xz):
grupa=grupuj(4,1,4,1,x,y,z,w)
lista.append(grupa)
if(burg_xw):
grupa=grupuj(4,2,2,4,x,y,z,w)
lista.append(grupa)
else:
if(kana_xwy):
grupa=grupuj(4,2,1,4,x,y,z,w)
lista.append(grupa)
if(kana_xwz):
grupa=grupuj(4,1,2,4,x,y,z,w)
lista.append(grupa)
elif(pol_xw):
grupa=grupuj(4,1,1,4,x,y,z,w)
lista.append(grupa)
if(burg_yz):
grupa=grupuj(2,4,4,2,x,y,z,w)
lista.append(grupa)
else:
if(kana_yzx):
grupa=grupuj(2,4,4,1,x,y,z,w)
lista.append(grupa)
if(kana_yzw):
grupa=grupuj(1,4,4,2,x,y,z,w)
lista.append(grupa)
elif(pol_yz):
grupa=grupuj(1,4,4,1,x,y,z,w)
lista.append(grupa)
if(burg_yw):
grupa=grupuj(2,4,2,4,x,y,z,w)
lista.append(grupa)
else:
if(kana_ywx):
grupa=grupuj(2,4,1,4,x,y,z,w)
lista.append(grupa)
if(kana_ywz):
grupa=grupuj(1,4,2,4,x,y,z,w)
lista.append(grupa)
elif(pol_yw):
grupa=grupuj(1,4,1,4,x,y,z,w)
lista.append(grupa)
if(burg_zw):
grupa=grupuj(2,2,4,4,x,y,z,w)
lista.append(grupa)
else:
if(kana_zwx):
grupa=grupuj(2,1,4,4,x,y,z,w)
lista.append(grupa)
if(kana_zwy):
grupa=grupuj(1,2,4,4,x,y,z,w)
lista.append(grupa)
elif(pol_zw):
grupa=grupuj(1,1,4,4,x,y,z,w)
lista.append(grupa)
if(plazm_x): #plazm
grupa=grupuj(4,2,2,2,x,y,z,w)
lista.append(grupa)
else:
if(dział_xyz): #dział
grupa=grupuj(4,2,2,1,x,y,z,w)
lista.append(grupa)
elif(lufa_xy): #lufa
grupa=grupuj(4,2,1,1,x,y,z,w)
lista.append(grupa)
if(dział_xyw):
grupa=grupuj(4,2,1,2,x,y,z,w)
lista.append(grupa)
elif(lufa_xw):
grupa=grupuj(4,1,1,2,x,y,z,w)
lista.append(grupa)
if(dział_xzw):
grupa=grupuj(4,1,2,2,x,y,z,w)
lista.append(grupa)
elif(lufa_xz):
grupa=grupuj(4,1,2,1,x,y,z,w)
lista.append(grupa)
elif(kij_x): #kij
grupa=grupuj(4,1,1,1,x,y,z,w)
lista.append(grupa)
else:
if(pillz_x): #pillz
grupa=grupuj(2,1,1,1,x,y,z,w)
lista.append(grupa)
else:
grupa=grupuj(1,1,1,1,x,y,z,w)
lista.append(grupa) #solo
if(plazm_y):
grupa=grupuj(2,4,2,2,x,y,z,w)
lista.append(grupa)
else:
if(dział_yxz):
grupa=grupuj(2,4,2,1,x,y,z,w)
lista.append(grupa)
elif(lufa_yx):
grupa=grupuj(2,4,1,1,x,y,z,w)
lista.append(grupa)
if(dział_yxw):
grupa=grupuj(2,4,1,2,x,y,z,w)
lista.append(grupa)
elif(lufa_yw):
grupa=grupuj(1,4,1,2,x,y,z,w)
lista.append(grupa)
if(dział_yzw):
grupa=grupuj(1,4,2,2,x,y,z,w)
lista.append(grupa)
elif(lufa_yz):
grupa=grupuj(1,4,2,1,x,y,z,w)
lista.append(grupa)
elif(kij_y):
grupa=grupuj(1,4,1,1,x,y,z,w)
lista.append(grupa)
else:
if(pillz_y):
grupa=grupuj(1,2,1,1,x,y,z,w)
lista.append(grupa)
if(plazm_z):
grupa=grupuj(2,2,4,2,x,y,z,w)
lista.append(grupa)
else:
if(dział_zxy):
grupa=grupuj(2,2,4,1,x,y,z,w)
lista.append(grupa)
elif(lufa_zx):
grupa=grupuj(2,1,4,1,x,y,z,w)
lista.append(grupa)
if(dział_zxw):
grupa=grupuj(2,1,4,2,x,y,z,w)
lista.append(grupa)
elif(lufa_zw):
grupa=grupuj(1,1,4,2,x,y,z,w)
lista.append(grupa)
if(dział_zyw):
grupa=grupuj(1,2,4,2,x,y,z,w)
lista.append(grupa)
elif(lufa_zy):
grupa=grupuj(1,2,4,1,x,y,z,w)
lista.append(grupa)
elif(kij_z):
grupa=grupuj(1,1,4,1,x,y,z,w)
lista.append(grupa)
else:
if(pillz_z):
grupa=grupuj(1,1,2,1,x,y,z,w)
lista.append(grupa)
if(plazm_w):
grupa=grupuj(2,2,2,4,x,y,z,w)
lista.append(grupa)
else:
if(dział_wxy):
grupa=grupuj(2,2,1,4,x,y,z,w)
lista.append(grupa)
elif(lufa_wx):
grupa=grupuj(2,1,1,4,x,y,z,w)
lista.append(grupa)
if(dział_wxz):
grupa=grupuj(2,1,2,4,x,y,z,w)
lista.append(grupa)
elif(lufa_wz):
grupa=grupuj(1,1,2,4,x,y,z,w)
lista.append(grupa)
if(dział_wyz):
grupa=grupuj(1,2,2,4,x,y,z,w)
lista.append(grupa)
elif(lufa_wy):
grupa=grupuj(1,2,1,4,x,y,z,w)
lista.append(grupa)
elif(kij_w):
grupa=grupuj(1,1,1,4,x,y,z,w)
lista.append(grupa)
else:
if(pillz_w):
grupa=grupuj(1,1,1,2,x,y,z,w)
lista.append(grupa)
if(m_tese): #m_tese
grupa=grupuj(2,2,2,2,x,y,z,w)
lista.append(grupa)
else:
if(kost_xyz): #kost
grupa=grupuj(2,2,2,1,x,y,z,w)
lista.append(grupa)
elif(kafel_xy): #kafel
grupa=grupuj(2,2,1,1,x,y,z,w)
lista.append(grupa)
if(kost_xyw):
grupa=grupuj(2,2,1,2,x,y,z,w)
lista.append(grupa)
elif(kafel_yw):
grupa=grupuj(1,2,1,2,x,y,z,w)
lista.append(grupa)
if(kost_xzw):
grupa=grupuj(2,1,2,2,x,y,z,w)
lista.append(grupa)
elif(kafel_xz):
grupa=grupuj(2,1,2,1,x,y,z,w)
lista.append(grupa)
if(kost_yzw):
grupa=grupuj(1,2,2,2,x,y,z,w)
lista.append(grupa)
elif(kafel_zw):
grupa=grupuj(1,1,2,2,x,y,z,w)
lista.append(grupa)
if(kafel_xw):
grupa=grupuj(2,1,1,2,x,y,z,w)
lista.append(grupa)
if(kafel_yz):
grupa=grupuj(1,2,2,1,x,y,z,w)
lista.append(grupa)
else:
#grupa=grupuj(4,4,4,4,x,y,z,w)
#lista.append(grupa)
return 1
return lista
def grupuj(x,y,z,w,px,py,pz,pw):
global X,Y,Z,W
grupa = []
for l in range(w):
for k in range(z):
for j in range(y):
for i in range(x):
grupa.append([(i+px)%X,(j+py)%Y,(k+pz)%Z,(l+pw)%W])
return grupa
def Filtruj(lista):
if(lista==1): return 1;
if not lista: return 0;
for l in range(len(lista)):
#for m in range(len(lista[l])-1):
# for p in range(m+1,len(lista[l])):
# if(lista[l][m]==lista[l][p]):
# del lista[l][p]; p-=1
plista = []
[plista.append(m) for m in lista[l] if m not in plista]
lista[l] = plista
#print(lista)
for k in range(len(lista)-1):
for j in range(len(lista)):
if(j!=k and len(lista[k])>=len(lista[j])):
for i in range(len(lista[j])):
if(lista[j][i] not in lista[k]):
break;
elif(i==len(lista[j])-1):
lista[j]=[]
else:
continue
else:
continue
while([] in lista):
lista.remove([])
return lista
def Filtr_2(lista,TK,TT):
if not lista: return 0;
jede = 0; zero = 0;
for i in range(len(TK)):
if(TK[i][0]==1):
jede=1
break;
if(TK[i][0]==0): zero=1
if(not jede):
if(zero): return 0;
else: return 2;
if(lista==1):
return 1;
else:
for j in range(len(lista)):
jede = 0
for i in range(len(lista[j])):
if(TT[lista[j][i][0]][lista[j][i][1]]
[lista[j][i][2]][lista[j][i][3]][0]==1):
jede=1
break;
if(not jede):
lista[j]=[]
while([] in lista):
lista.remove([])
return lista
def Minim(n,zbiór,TT):
if(zbiór==2): return 2;
if(zbiór==1): return 1;
if not zbiór: return 0;
lista = []
for j in range(len(zbiór)):
cr=zbiór[j][0]; x=cr[0];
y=cr[1];z=cr[2];w=cr[3];
iloc = Bin(n,TT,x,y,z,w)
for i in range(1,len(zbiór[j])):
cr=zbiór[j][i]; x=cr[0];
y=cr[1];z=cr[2];w=cr[3];
ilcc= Bin(n,TT,x,y,z,w)
for k in range(len(iloc)):
if(ilcc[k]!=iloc[k]):
ilist = list(iloc)
ilist[k] = '-'
iloc = ''.join(ilist)
lista.append(iloc)
return lista
def Bin(n,TT,x,y,z,w):
nr=TT[x][y][z][w][1]
nr='{0:08b}'.format(nr)
nr=nr[8-int(n):]
return nr
def Drukuj(n,lista):
wynik = 'f('
if(n>0):
wynik+='A'
if(n>1):
wynik+=',B'
if(n>2):
wynik+=',C'
if(n>3):
wynik+=',D'
if(n>4):
wynik+=',E'
if(n>5):
wynik+=',F'
if(n>6):
wynik+=',G'
if(n>7):
wynik+=',H'
wynik+=') = '
if(lista==2):
wynik+='X'
print(wynik); return wynik
if(lista==1):
wynik+='1'
print(wynik); return wynik
if not lista:
wynik+='0'
print(wynik); return wynik
for j in range(len(lista)):
#l = len(lista[j])
if(n>0):
if(lista[j][-1]=='1'):
wynik+='A'
elif(lista[j][-1]=='0'):
wynik+="A'"
else:
pass
if(n>1):
if(lista[j][-2]=='1'):
wynik+='B'
elif(lista[j][-2]=='0'):
wynik+="B'"
else:
pass
if(n>2):
if(lista[j][-3]=='1'):
wynik+='C'
elif(lista[j][-3]=='0'):
wynik+="C'"
else:
pass
if(n>3):
if(lista[j][-4]=='1'):
wynik+='D'
elif(lista[j][-4]=='0'):
wynik+="D'"
else:
pass
if(n>4):
if(lista[j][-5]=='1'):
wynik+='E'
elif(lista[j][-5]=='0'):
wynik+="E'"
else:
pass
if(n>5):
if(lista[j][-6]=='1'):
wynik+='F'
elif(lista[j][-6]=='0'):
wynik+="F'"
else:
pass
if(n>6):
if(lista[j][-7]=='1'):
wynik+='G'
elif(lista[j][-7]=='0'):
wynik+="G'"
else:
pass
if(n>7):
if(lista[j][-8]=='1'):
wynik+='H'
elif(lista[j][-8]=='0'):
wynik+="H'"
else:
pass
if(j<len(lista)-1): wynik+='+'
print(wynik)
return wynik
#============================================================================##
#1.dobieranie odpowiedniej tablicy zależne od liczby zmiennych:
while True:
n=input("Podaj liczbę zmiennych funkcji (0-8):")
if(n=='q'): quit()
elif(0<=int(n)<=8): break;
X,Y,Z,W = Siatka(int(n)); n=int(n)
nn=1
for i in range(n): nn=nn*2;
#2.zbieranie iloczynów pełnych równych 1:
dane = []; print("Podaj iloczyny pełne, dla który funcja zwarca 1, "+
"gdy zakończysz, wpisz 'q'")
while True:
inp=input("> ")
if(inp=='q'):
break;
elif(int(inp)<nn):
dane.append(int(inp))
else:
pass
#2,5.zbieranie iloczynów równych '?'
ndane = []; print("Podaj iloczyny pełne, dla który funcja zwarca ?, "+
"gdy zakończysz, wpisz 'q'")
while True:
inp=input("> ")
if(inp=='q'):
break;
elif(int(inp)<nn):
ndane.append(int(inp))
else:
pass
#3.tworzenie TK, przechowywanie wartości i położenia:
for l in range(W):
for k in range(Z):
for j in range(Y):
for i in range(X):
TK.append(['-',ST[i],ST[j],ST[k],ST[l],'d','b'])
''' 0 - wartość 0/1/-
1 - położenie na osi x
2 - położenie na osi y
3 - położenie na osi z
4 - położenie na osi w
5 - id iloczynu
6 - id iloczynu (bin)'''
#4.wypełnainie tablicy na podstawie dostarczonych danych:
for i in range(len(TK)):
if(i in dane): TK[i][0]=1
elif(i in ndane): TK[i][0]='?'
else: TK[i][0]=0
TK[i][5]=int(i)
TK[i][6]="{0:b}".format(i)
#5.transformacja tablicy
TT=Trans(TK); print(); Wypisz(TT)
rTT=Trans_Rev(TT)
#5,5.grupowanie
Grupy = Lista(TK,rTT)
Grupy = Filtruj(Grupy)
Grupy = Filtr_2(Grupy,TK,rTT)
#6.Odczyt i wypisanie wyniku
Grupy = Minim(int(n),Grupy,rTT)
wynik = Drukuj(int(n),Grupy)
|
import numpy as np
"Code by Jinming Wang, Zuhao Yang"
class DataProcessor():
def __init__ (self, data):
""" Argument: data: the data to be processed """
self.data = data
def copy(self):
return DataProcessor(self.data.copy())
def makeSegments(self, size, stride):
"""
Split a self.data into many small segments
Arguments:
size (int or 2-tuple): size of each segment, if given int then segment is a square
stride (int or 2-tuple): the step or distance between segments, if given int then vertical and horizontal steps are the same
self.data = splitted segments as a list, each element is a self.data (np array)
Example:
[[A B C D]
[E F G H] -> makeSegments, size=(2, 3), stride=1 ->
[I J K L]]
-> [ [[A B C] [[B C D] [[E F G] [[F G H]
[E F G]] [F G H]] [I J K]] [J K L]] ]
"""
# Convert size and stride into 2-tuples if they are not
if type(size) == int:
size = (size, size)
if type(stride) == int:
stride = (stride, stride)
# Initialize segments to correct size
if (self.data.ndim == 3):
height, width, depth = np.shape(self.data)
num_segments = ((height - size[0])/stride[0] + 1) * \
((width - size[1])/stride[1] + 1)
segments = np.zeros((int(num_segments), size[0], size[1], depth))
elif (self.data.ndim == 2):
height, width = np.shape(self.data)
num_segments = ((height - size[0])/stride[0] + 1) * \
((width - size[1])/stride[1] + 1)
segments = np.zeros((int(num_segments), size[0], size[1]))
else:
print("Dimension of given self.data must be 2 or 3")
return None
# Do splitting, iterate every row and column of self.data, get a small partition of correct size and assign this partition to segments
segment_idx = 0
for row in range(0, height - size[0] + 1, stride[0]):
for col in range(0, width - size[1] + 1, stride[1]):
if (self.data.ndim == 2):
segments[segment_idx] = self.data[row:row+size[0], col:col+size[1]]
else:
segments[segment_idx] = self.data[row:row+size[0], col:col+size[1], :]
segment_idx += 1
self.data = segments
def shuffle(self):
"""
Random shuffle all elements in a dataset
Argument:
self.data (array like): a set of all data
self.data = shuffled_self.data
"""
np.random.shuffle(self.data)
def makeBatch(self, batch_size, ignore_remainder = False):
"""
Split self.data into batches of size batch_size
Arguments:
batch_size (int): the size of each batch
ignore_remainder (boolean): if set to true, if the last batch cannot reach batch_size, then ignore the last batch and all data in it, if set to false, then some random data from self.data are added to the last batch
self.data = A list of batches, where each batch contains batch_size data
"""
data_num = np.shape(self.data)[0]
batch_num = data_num//batch_size
remainder_size = data_num % batch_num
if remainder_size != 0:
if (ignore_remainder):
self.data = np.split(self.data[0:data_num - remainder_size], batch_num)
else:
num_data_needed = batch_size - remainder_size
# take some random data from original data set and append them to the end
# NOBUG
random_indices = np.random.randint(0, data_num, num_data_needed)
appenda = np.zeros((num_data_needed, *self.data.shape[1:]))
for i, j in enumerate(random_indices):
appenda[i] = self.data[j]
self.data = np.concatenate((self.data, appenda))
self.data = np.split(self.data, batch_num + 1)
else:
self.data = np.split(self.data, batch_num)
def concatDatasets(self, list_of_dataset):
"""
Used to concatenate two data sets, after you loaded a list of different levels, you may apply makeSegments on each level, then you got something like
[[lvl1_seg1, lvl1_seg2, ...], [lvl2_seg1, lvl2_seg2, ...], ...]
This function can turn this into
[lvl1_seg1, lvl1_seg2, ..., lvl2_seg1, lvl2_seg2, ... lvln_segn]
Arguments:
list_of_dataset: a list of lists of data
self.data = A list of data
"""
# TODO: complete this implementation as described above
self.data = np.concatenate(list_of_dataset)
def swapAxes(self, axis_1, axis_2):
"""
Swap two axes of self.data, (batch_number, batch_size, row, column, depth)
"""
self.data = np.swapaxes(self.data, axis_1, axis_2)
def test():
data = np.random.randint(0, 5, (3, 3, 2))
processor = DataProcessor(data)
processor.makeSegments(2, 1)
print(np.shape(processor.data))
print("The first element in segments:\n", processor.data[0])
processor.shuffle()
print("The first element in shuffled segments:\n", processor.data[0])
processor.makeBatch(2)
print("Size of segments after make batches:", np.shape(processor.data))
print("Expected: (2,2,2,2,2), 2 batches in total, 2 data in each batch, \n" + \
"\teach data has 2 rows, 2 columns and depth of 2")
if __name__ == "__main__":
test()
|
p1 = float(input('Nota prova 1:'))
p2 = float(input('Nota prova 2:'))
m = (p1 + p2)/2
print(f'A media entre a P1 e a P2 e = {m:.1f}.')
if m < 5.0:
print('REPROVADO')
elif 5 <= m < 6.9:
print('RECUPERACAO')
elif m >= 7.0:
print('APROVADO!')
|
SUBTRAIN_VOCAB = 'tasks/R2R/data/sub_train_vocab.txt'
TRAIN_VOCAB = 'tasks/R2R/data/train_vocab.txt'
TRAINVAL_VOCAB = 'tasks/R2R/data/trainval_vocab.txt'
|
from dataloaders import cityscapes, dada, apolloscape, kitti, bdd, merge3
from torch.utils.data import DataLoader
def make_data_loader(args, **kwargs):
if args.dataset == 'cityscapesevent':
train_set = cityscapes.CityscapesRGBEvent(args, split='train')
val_set = cityscapes.CityscapesRGBEvent(args, split='val')
num_class = val_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, drop_last=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.val_batch_size, shuffle=False, **kwargs)
test_loader = None
elif args.dataset == 'dadaevent':
val_set = dada.DADARGBEvent(args, split='val')
num_class = val_set.NUM_CLASSES
train_loader = None
val_loader = DataLoader(val_set, batch_size=args.val_batch_size, shuffle=False, **kwargs)
test_loader = None
elif args.dataset == 'apolloscapeevent':
train_set = apolloscape.ApolloscapeRGBEvent(args, split='train')
val_set = apolloscape.ApolloscapeRGBEvent(args, split='val')
num_class = val_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, drop_last=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.val_batch_size, shuffle=False, **kwargs)
test_loader = None
elif args.dataset == 'kittievent':
train_set = kitti.KITTIRGBEvent(args, split='train')
val_set = kitti.KITTIRGBEvent(args, split='val')
num_class = val_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, drop_last=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.val_batch_size, shuffle=False, **kwargs)
test_loader = None
elif args.dataset == 'bdd':
train_set = bdd.BDD(args, split='train')
val_set = bdd.BDD(args, split='val')
num_class = val_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, drop_last=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.val_batch_size, shuffle=False, **kwargs)
test_loader = None
elif args.dataset == 'merge3':
train_set = merge3.Merge3(args, split='train')
val_set = merge3.Merge3(args, split='val')
num_class = val_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, drop_last=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.val_batch_size, shuffle=False, **kwargs)
test_loader = None
else:
raise NotImplementedError
return train_loader, val_loader, test_loader, num_class
|
class DataPointBase(object):
def __init__(self, id, values):
pass
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Correct triangle noise wave in ES60 data
Created on Wed May 2 10:30:23 2018
@author: Alejandro Ariza, British Antarctic Survey
"""
def correct_es60triangle(Sv):
print('TODO')
|
class Solution(object):
def findClosestElements(self, arr, k, x):
"""
:type arr: List[int]
:type k: int
:type x: int
:rtype: List[int]
"""
idx = bisect.bisect_left(arr, x)
l, r = idx, idx # not include r
while r - l < k:
if l == 0: return arr[:k]
if r == len(arr): return arr[-k:]
if x - arr[l - 1] <= arr[r] - x:
l -= 1
else:
r += 1
return arr[l:r]
|
from pyteal import Expr, InnerTxnBuilder, Seq, Subroutine, TealType, TxnField, TxnType
@Subroutine(TealType.none)
def axfer(receiver, asset_id, amt) -> Expr:
return Seq(
InnerTxnBuilder.Begin(),
InnerTxnBuilder.SetFields(
{
TxnField.type_enum: TxnType.AssetTransfer,
TxnField.asset_receiver: receiver,
TxnField.xfer_asset: asset_id,
TxnField.asset_amount: amt,
}
),
InnerTxnBuilder.Submit(),
)
@Subroutine(TealType.none)
def pay(receiver, amt) -> Expr:
return Seq(
InnerTxnBuilder.Begin(),
InnerTxnBuilder.SetFields(
{
TxnField.type_enum: TxnType.Payment,
TxnField.receiver: receiver,
TxnField.amount: amt,
}
),
InnerTxnBuilder.Submit(),
)
|
from .adsdatatype import AdsDatatype
class SymbolInfo:
def __init__(self, name, indexGroup, indexOffset, adsDatatype, bitOffset = 0):
self.Name = name
self.IndexGroup = indexGroup
self.IndexOffset = indexOffset
self.AdsDatatype = adsDatatype
self.BitOffset = bitOffset
Name = ''
IndexGroup = 0
IndexOffset = 0
BitOffset = 0
AdsDatatype = AdsDatatype.Custom
Value = None
def WriteTo(self, byteBuffer):
# byte shift needed, if bool!
if (self.AdsDatatype == AdsDatatype.Bool):
currentByte = AdsDatatype.UnpackFrom(AdsDatatype.UInt8, byteBuffer, self.IndexOffset)
if (self.Value):
newByte = currentByte | (1 << self.BitOffset)
else:
newByte = currentByte & ~(1 << self.BitOffset) & 0xF
AdsDatatype.PackInto(AdsDatatype.UInt8, byteBuffer, self.IndexOffset, newByte)
else:
AdsDatatype.PackInto(self.AdsDatatype, byteBuffer, self.IndexOffset, self.Value)
def ReadFrom(self, byteBuffer):
if (self.AdsDatatype == AdsDatatype.Bool):
result = AdsDatatype.UnpackFrom(AdsDatatype.UInt8, byteBuffer, self.IndexOffset)
result = ((result & (1 << self.BitOffset)) == True)
else:
result = AdsDatatype.UnpackFrom(self.AdsDatatype, byteBuffer, self.IndexOffset)
self.Value = result
return result
def __str__(self):
return "%s [%s] (%08x, %08x%s)" % (
self.Name,
AdsDatatype.GetName(self.AdsDatatype),
self.IndexGroup,
self.IndexOffset,
(".%s" % self.BitOffset) if self.AdsDatatype == AdsDatatype.Bool else ''
)
|
import re
import base64
def to_bin_conversion(inp, form):
formatted = inp.replace(" -2b",'')
out = []
if form == "asc":
for i in formatted:
Dec = ord(i)
BinaryValues = bin(Dec)
out.append(BinaryValues[2:].zfill(8))
return(''.join(out))
elif form == "hex":
formatted = formatted.replace(' ', '')
byteArray = re.findall(r'.{1,2}',formatted,re.DOTALL)
for i in range(len(byteArray)):
BinaryValues = bin(int(byteArray[i], 16))
out.append(BinaryValues[2:].zfill(8))
return(''.join(out))
elif form == "dec":
byteArray = formatted.split()
for i in range(len(byteArray)):
BinaryValues = bin(int(byteArray[i]))
out.append(BinaryValues[2:].zfill(8))
return(''.join(out))
elif form == "b64":
formatted = base64.b64decode(formatted).decode('utf-8')
for i in formatted:
Dec = ord(i)
BinaryValues = bin(Dec)
out.append(BinaryValues[2:].zfill(8))
return(''.join(out))
# elif form == "oct":
def to_dec_conversion(inp, form):
formatted = inp.replace(" -2d",'')
out = []
if form == "asc":
for i in formatted:
Dec = ord(i)
Dec = str(Dec)
out.append(Dec)
return(' '.join(out))
elif form == "hex":
formatted = formatted.replace(' ', '')
byteArray = re.findall(r'.{1,2}',formatted,re.DOTALL)
for i in range(len(byteArray)):
DecValues = str(int(byteArray[i], 16))
out.append(DecValues)
return(' '.join(out))
elif form == "bin":
formatted = formatted.replace(' ', '')
byteArray = re.findall(r'.{1,8}',formatted,re.DOTALL)
for i in range(len(byteArray)):
DecValues = str(int(byteArray[i], 2))
out.append(DecValues)
return(' '.join(out))
elif form == "b64":
formatted = base64.b64decode(formatted).decode('utf-8')
for i in formatted:
Dec = ord(i)
Dec = str(Dec)
out.append(Dec)
return(' '.join(out))
# elif form == "oct":
def to_hex_conversion(inp, form):
formatted = inp.replace(" -2h", "")
formatted = formatted.replace(" -20x", "")
out = []
if form == "asc":
for i in formatted:
Dec = ord(i)
HexVal = hex(Dec)
out.append(HexVal[2:])
return(''.join(out))
elif form == "dec":
byteArray = formatted.split()
for i in range(len(byteArray)):
HexVal = hex(int(byteArray[i]))
out.append(HexVal[2:])
return('20'.join(out))
elif form == "bin":
formatted = formatted.replace(' ', '')
byteArray = re.findall(r'.{1,8}',formatted,re.DOTALL)
for i in range(len(byteArray)):
HexVal = hex(int(byteArray[i], 2))
out.append(HexVal[2:])
return('20'.join(out))
elif form == "b64":
formatted = base64.b64decode(formatted).decode('utf-8')
for i in formatted:
Dec = ord(i)
HexVal = hex(Dec)
out.append(HexVal[2:])
return(''.join(out))
# elif form == "oct":
def to_ascii_conversion(inp, form):
formatted = inp.replace(" -2a",'')
formatted = formatted.replace(" -2t",'')
out = []
if form == "hex":
formatted = formatted.replace(" ","")
byteArray = re.findall(r'.{1,2}',formatted,re.DOTALL)
for i in range(len(byteArray)):
Ascii = chr(int(byteArray[i], 16))
out.append(Ascii)
return(''.join(out))
elif form == "dec":
byteArray = formatted.split()
for i in range(len(byteArray)):
Ascii = chr(int(byteArray[i]))
out.append(Ascii)
return(''.join(out))
elif form == "bin":
formatted = formatted.replace(" ","")
byteArray = re.findall(r'.{1,8}',formatted,re.DOTALL)
for i in range(len(byteArray)):
Ascii = chr(int(byteArray[i], 2))
out.append(Ascii)
return(''.join(out))
elif form == "b64":
return(base64.b64decode(formatted).decode('ascii'))
# elif form == "oct":
def to_b64_conversion(inp, form):
formatted = inp.replace(" -2b64", '')
return(base64.b64encode(bytes(formatted, 'utf-8')).decode('utf-8'))
""" //should i have this, idrk, might be useful in the future, leave it for now and add it later?
def to_oct_conversion(inp, form):
formatted = inp.replace(" -2o",'')
formatted = inp.replace(" -20", '')
out = []
if form == "asc":
for i in formatted:
Dec = ord(i)
OctVal = oct(Dec)
out.append(OctVal)
return(''.join(out))
elif form == "hex":
elif form == "dec":
elif form == "bin":
"""
#====================================================================================================================================
try:
info = input()
if "-help" in info:
print("baseflipper.py: 'string' [--format] [--outputformat] \n\n"\
" --formats:\n -h hexadecimal \n -b binary \n "\
"-a ascii text \n -d decimal \n -b64 "\
" base64 \n\n --outputformats: \n -2h "\
"hexadecimal \n -2b binary \n -2a ascii text"\
" \n -2d decimal\n -2b64 base64")
if "-a" in info:
info = info.replace(" -a",'')
if "-2b64" in info:
info = to_b64_conversion(info, "asc")
elif "-2d" in info:
info = to_dec_conversion(info, "asc")
elif "-2b" in info:
info = to_bin_conversion(info, "asc")
elif "-2h" in info:
info = to_hex_conversion(info, "asc")
elif "-2o" in info:
info = to_oct_conversion(info, "asc")
elif "-h" in info:
info = info.replace(" -h", '')
if "-2b64" in info:
info = to_b64_conversion(info, "hex")
elif "-2d" in info:
info = to_dec_conversion(info, "hex")
elif "-2a" in info:
info = to_ascii_conversion(info, "hex")
elif "-2o" in info:
info = to_oct_conversion(info, "hex")
elif "-2b" in info:
info = to_bin_conversion(info, "hex")
elif "-b64" in info:
info = info.replace(" -64",'')
if "-2h" in info:
info = to_hex_conversion(info, "b64")
elif "-2a" in info:
info = to_ascii_conversion(info, "b64")
elif "-2b" in info:
info = to_bin_conversion(info, "b64")
elif "-2d" in info:
info = to_dec_conversion(info, "b64")
elif "-2o" in info:
info = to_oct_conversion(info, "b64")
elif "-b" in info:
info = info.replace(" -b",'')
if "-2h" in info:
info = to_hex_conversion(info, "bin")
elif "-2a" in info:
info = to_ascii_conversion(info, "bin")
elif "-2d" in info:
info = to_dec_conversion(info, "bin")
elif "-2o" in info or "-20" in info:
info = to_oct_conversion(info, "bin")
elif "-2b64" in info:
info = to_b64_conversion(info, "bin")
elif "-d" in info:
info = info.replace(" -d",'')
if "-2h" in info:
info = to_hex_conversion(info, "dec")
elif "-2a" in info:
info = to_ascii_conversion(info, "dec")
elif "-2b64" in info:
info = to_b64_conversion(info, "dec")
elif "-2b" in info:
info = to_bin_conversion(info, "dec")
elif "-2o" in info:
info = to_oct_conversion(info, "dec")
else:
raise ValueError()
print(info)
'''
future addition maybe
elif "-o" in info:
info = info.replace(" -o",'')
if "-2h" in info:
info = to_hex_conversion(info, "oct")
elif "-2a" in info:
info = to_ascii_conversion(info, "oct")
elif "-2b64" in info:
info = to_b64_conversion(info, "oct")
elif "-2b" in info:
info = to_bin_conversion(info, "oct")
elif "-2d" in info:
info = to_dec_conversion(info, "oct")
'''
except:
print("baseflipper.py: invalid configuration type [-help] for more info")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Advent of Code 2015 from http://adventofcode.com/2015/day/25
Author: James Walker
Copyright: MIT license
--- Day 25: Let It Snow ---
Merry Christmas! Santa is booting up his weather machine; looks like you
might get a white Christmas after all. The weather machine beeps! On the
console of the machine is a copy protection message asking you to enter a
code from the instruction manual. Apparently, it refuses to run unless you
give it that code.
No problem; you'll just look up the code in the-- "Ho ho ho", Santa ponders
aloud. "I can't seem to find the manual." You look up the support number for
the manufacturer and give them a call. Good thing, too - that 49th star
wasn't going to earn itself.
"Oh, that machine is quite old!", they tell you. "That model went out of
support six minutes ago, and we just finished shredding all of the manuals.
I bet we can find you the code generation algorithm, though."
After putting you on hold for twenty minutes (your call is very important to
them, it reminded you repeatedly), they finally find an engineer that
remembers how the code system works.
The codes are printed on an infinite sheet of paper, starting in the top-left
corner. The codes are filled in by diagonals: starting with the first row
with an empty first box, the codes are filled in diagonally up and to the
right. This process repeats until the infinite paper is covered. So, the
first few codes are filled in in this order:
| 1 2 3 4 5 6
---+---+---+---+---+---+---+
1 | 1 3 6 10 15 21
2 | 2 5 9 14 20
3 | 4 8 13 19
4 | 7 12 18
5 | 11 17
6 | 16
For example, the 12th code would be written to row 4, column 2; the 15th code
would be written to row 1, column 5.
The voice on the other end of the phone continues with how the codes are
actually generated. The first code is 20151125. After that, each code is
generated by taking the previous one, multiplying it by 252533, and then
keeping the remainder from dividing that value by 33554393.
So, to find the second code (which ends up in row 2, column 1), start with
the previous value, 20151125. Multiply it by 252533 to get 5088824049625.
Then, divide that by 33554393, which leaves a remainder of 31916031. That
remainder is the second code.
"Oh!", says the voice. "It looks like we missed a scrap from one of the
manuals. Let me read it to you." You write down his numbers:
| 1 2 3 4 5 6
---+---------+---------+---------+---------+---------+---------+
1 | 20151125 18749137 17289845 30943339 10071777 33511524
2 | 31916031 21629792 16929656 7726640 15514188 4041754
3 | 16080970 8057251 1601130 7981243 11661866 16474243
4 | 24592653 32451966 21345942 9380097 10600672 31527494
5 | 77061 17552253 28094349 6899651 9250759 31663883
6 | 33071741 6796745 25397450 24659492 1534922 27995004
"Now remember", the voice continues, "that's not even all of the first few
numbers; for example, you're missing the one at 7,1 that would come before
6,2. But, it should be enough to let your-- oh, it's time for lunch! Bye!"
The call disconnects.
Santa looks nervous. Your puzzle input contains the message on the machine's
console. What code do you give the machine?
Answer: 2650453
--- Day 25: Part Two ---
The machine springs to life, then falls silent again. It beeps. "Insufficient
fuel", the console reads. "Fifty stars are required before proceeding. One
star is available."
..."one star is available"? You check the fuel tank; sure enough, a lone star
sits at the bottom, awaiting its friends. Looks like you need to provide 49
yourself.
Answer: Get the other 48 stars from days 1 to 24
"""
# Standard Library Imports
import re
# Application-specific Imports
from advent_of_code.solvers import solver
class Solver(solver.AdventOfCodeSolver):
"""Advent of Code 2015 Day 25: Let It Snow
Attributes
puzzle_input (list): A list of instructions for solving the puzzle
puzzle_title (str): Name of the Advent of Code puzzle
solved_output (str): A template string for solution output
"""
def __init__(self, *args):
solver.AdventOfCodeSolver.__init__(self, *args)
self._solved_output = 'The code to give to the machine is {0}.'
@staticmethod
def _get_exponent(console_msg):
"""
Args:
console_msg (str): Input for the puzzle
Returns:
int: Exponent calculated from the puzzle input
"""
msg_pattern = r'row (?P<row>\d+), column (?P<col>\d+)'
code_grid = re.search(msg_pattern, console_msg)
if code_grid:
row = int(code_grid.group('row'))
col = int(code_grid.group('col'))
exponent = int((row + col - 2) * (row + col - 1) / 2) + col - 1
else:
exponent = -1
return exponent
@staticmethod
def _modular_exp(base, exponent, modulus, start):
"""Performs right-to-left binary method for modular exponentiation
Args:
base (int): Non-negative number for the exponent
exponent (int): Base is multiplied by itself this number of times
modulus (int): Non-negative number to apply modulus operation
start (int): Starting point to use when calculating the result
Returns:
int: Result of performing modular exponentiation with inputs
"""
result = start if start >= 1 else 0
if start and base >= 1 and exponent > -1 and modulus > 1:
base %= modulus
while exponent:
if exponent & 1:
result *= base
result %= modulus
exponent >>= 1
base *= base
base %= modulus
return result
def _solve_puzzle_parts(self):
"""Solves each part of a Advent of Code 2015 puzzle
Args: None
Returns:
tuple: Pair of solutions for the two parts of the puzzle
"""
base = 252533
exponent = self._get_exponent(console_msg=self.puzzle_input)
modulus = 33554393
start = 20151125
return (self._modular_exp(base, exponent, modulus, start), None)
def run_test_cases(self):
"""Runs a series of inputs and compares against expected outputs
Args: None
Returns: None
"""
test_input1 = 'row 2, column 1'
test_input2 = 'row 6, column 1'
test_input3 = 'row 6, column 2'
test_input4 = 'row 6, column 3'
test_input5 = 'row 6, column 4'
test_input6 = 'row 6, column 5'
test_input7 = 'row 6, column 6'
self._run_test_case(solver.TestCase(test_input1, 31916031, None))
self._run_test_case(solver.TestCase(test_input2, 33071741, None))
self._run_test_case(solver.TestCase(test_input3, 6796745, None))
self._run_test_case(solver.TestCase(test_input4, 25397450, None))
self._run_test_case(solver.TestCase(test_input5, 24659492, None))
self._run_test_case(solver.TestCase(test_input6, 1534922, None))
self._run_test_case(solver.TestCase(test_input7, 27995004, None))
|
from typing import List
from typing_extensions import Literal
from pydantic import BaseModel, Field
class ModuleConnection(BaseModel):
"""Model a single module connection."""
url: str = Field(
...,
description="The url (port) value the module driver should connect to. "
"For example: socket://host:port",
)
module_type: str = Field(
..., description="What kind of module this connection emulates."
)
identifier: str = Field(..., description="Unique id for this emulator.")
class Message(BaseModel):
"""A message sent to module server clients."""
status: Literal["connected", "disconnected", "dump"] = Field(
...,
description="`dump` includes a complete list of connected emulators. "
"`connected` for new connections. `disconnected` for emulators "
"that have disconnected. ",
)
connections: List[ModuleConnection]
|
# -*- coding: utf-8 -*- <--------------采用utf-8
import io, sys, os, urllib2, shutil, time, subprocess, platform
# 全局变量
# 淘宝的接口是动态网页,不满足需求,改用chinaz
# GET_ISP_TYPE_URL = 'http://ip.taobao.com/ipSearch.php'
# GET_ISP_TYPE_URL = 'http://ip.chinaz.com/'
# 站长之家抽风,暂时此地址不可用了
GET_ISP_TYPE_URL = 'http://ip.chinaz.com/getip.aspx'
# GET_ISP_TYPE_URL = 'http://www.123cha.com/ip/'
ISP_TYPE_DIANXIN = unicode('电信', "utf-8")
IPS_TYPE_TIETONG = unicode('铁通', "utf-8")
IPS_TYPE_YIDONG = unicode('移动', "utf-8")
# 电信可用ip文件
GITHUB_DIANXIN_RAW_FILE = 'https://raw.githubusercontent.com/out0fmemory/GoAgent-Always-Available/master/%E7%94%B5%E4%BF%A1%E5%AE%BD%E5%B8%A6%E9%AB%98%E7%A8%B3%E5%AE%9A%E6%80%A7Ip.txt'
BACKUP_SITE_DIANXIN_RAW_FILE = 'http://yanke.info/ip/dianxin_ip.txt'
# 铁通可用ip文件
GITHUB_TIETONG_RAW_FILE = 'https://raw.githubusercontent.com/out0fmemory/GoAgent-Always-Available/master/%E9%93%81%E9%80%9A%E5%AE%BD%E5%B8%A6%E9%AB%98%E7%A8%B3%E5%AE%9A%E6%80%A7Ip.txt'
BACKUP_SITE_TIETONG_RAW_FILE = 'http://yanke.info/ip/tietong_ip.txt'
# 网络请求重试次数
NET_RETRY_CNT = 3
PROXY_PROP = 'proxy.ini'
PROXY_PROP_BACKUP = 'proxy.bak'
PROXY_PROP_TEM = 'proxy.tem'
GOOGLE_CN_TAG = '[google_cn]'
GOOGLE_HK_TAG = '[google_hk]'
HOSTS_TAG = 'hosts = '
SEPIRATOR_TAG = '.'
GOAGENT_EXE_FILE = 'goagent.exe'
# 从网络获取到的数据文件
NET_GSCAN_FILE = 'net_gsan_ip.txt'
GET_NET_IP_LIST_SEP = 60 * 60 * 10
# 获取运营商类型
def getIpType():
try:
getIpurl = GET_ISP_TYPE_URL
fd = urllib2.urlopen(getIpurl, timeout=5)
Ipdata = fd.read()
# print Ipdata
# Ipdata = Ipdata.decode('utf-8').encode('gbk')
ispType = ISP_TYPE_DIANXIN
if IPS_TYPE_TIETONG in Ipdata:
print "The Isp is TieTong"
ispType = IPS_TYPE_TIETONG
elif IPS_TYPE_YIDONG in Ipdata:
print "The Isp is YiDong, use The TieTong Ips"
ispType = IPS_TYPE_TIETONG
elif ISP_TYPE_DIANXIN in Ipdata:
print "The Isp is DianXin"
else:
print "The Isp is Others, use The Default DianXin Ips"
fd.close()
return ispType
except Exception, e:
print "The Isp is Others, use The Default DianXin Ips"
return None
# 获取github上可用ip地址
def getAvailableGoagentIp(ispType):
try:
# 下载github上的ip地址文件
print "down Available Ip list from Github"
url = GITHUB_DIANXIN_RAW_FILE
if ispType == IPS_TYPE_TIETONG:
url = GITHUB_TIETONG_RAW_FILE
fd = urllib2.urlopen(url, timeout=5)
content = fd.read()
print 'Now Available Ip list:' + content
fd.close()
return content
except Exception, e:
return None
def getAvailableGoagentIpWithBackupSite(ispType):
try:
# 下载yanke.info上的ip地址文件
print "down Available Ip list from yanke.info"
url = BACKUP_SITE_DIANXIN_RAW_FILE
if ispType == IPS_TYPE_TIETONG:
url = BACKUP_SITE_TIETONG_RAW_FILE
fd = urllib2.urlopen(url, timeout=10)
content = fd.read()
print 'Now Available Ip list:' + content
fd.close()
return content
except Exception, e:
return None
def localFileReplace(ipList):
# 先备份配置文件
shutil.copy(PROXY_PROP, PROXY_PROP_BACKUP)
# 查找并替换配置文件
isInHostCn = 0
isInHostHk = 0
inFile = open(PROXY_PROP, "r")
out = open(PROXY_PROP_TEM, "w")
line = inFile.readline()
while line:
# print line
if line.find(GOOGLE_CN_TAG) != -1:
isInHostCn = 1
elif line.find(GOOGLE_HK_TAG) != -1:
isInHostHk = 1
if isInHostCn == 1:
if HOSTS_TAG in line and SEPIRATOR_TAG in line:
print "before replace " + GOOGLE_CN_TAG + line
isInHostCn = 0
line = HOSTS_TAG + ipList + '\n'
elif isInHostHk == 1:
if HOSTS_TAG in line and SEPIRATOR_TAG in line:
print "before replace " + GOOGLE_HK_TAG + line
isInHostHk = 0
line = HOSTS_TAG + ipList + '\n'
out.write(line)
line = inFile.readline()
inFile.close()
out.flush()
out.close()
shutil.copy(PROXY_PROP_TEM, PROXY_PROP)
def modifyGscanConf(confPath, confPathTem, findIpCnt):
fp = open(confPath, 'r')
lines = fp.readlines()
fp.close
fp = open(confPathTem, 'w')
for line in lines:
if 'RecordLimit' in line:
line = '"RecordLimit" : ' + str(findIpCnt) + ','
fp.write(line)
fp.flush()
fp.close()
def readGscanIp():
currentPath = os.path.split(os.path.realpath(__file__))[0]
googleIpPath = os.path.join(currentPath, "google_ip.txt")
# foreground get ip, we do not get ip every time
if os.path.exists(googleIpPath) and time.time() - getFileModifyTime(googleIpPath) < 60 * 3:
print 'use local ip_file because time short'
fileIp = readIpFromFile(googleIpPath)
if fileIp != None and len(fileIp) > 0:
return fileIp
# 通过gscan获取可用ip
def gscanIp(isNeedWait, findIpCnt):
currentPath = os.path.split(os.path.realpath(__file__))[0]
googleIpPath = os.path.join(currentPath, "google_ip.txt")
# foreground get ip, we do not get ip every time
if isNeedWait:
if os.path.exists(googleIpPath) and time.time() - getFileModifyTime(googleIpPath) < GET_NET_IP_LIST_SEP:
print 'use local ip_file because time short'
fileIp = readIpFromFile(googleIpPath)
print fileIp
if fileIp != None and len(fileIp) > 50:
return fileIp
gscanPath = os.path.join(currentPath, 'gscan')
gscanPath = os.path.join(gscanPath, 'bin')
osType = platform.system()
executeFile = 'gscan-win.exe'
if osType == 'Darwin':
executeFile = 'gscan-mac'
elif osType == 'Linux':
executeFile = 'gscan'
executePath = os.path.join(gscanPath, executeFile)
confPath = os.path.join(gscanPath, 'my.conf')
gscanConfPath = os.path.join(gscanPath, 'gscan.conf')
gscanConfPathTem = gscanConfPath + 'tem'
modifyGscanConf(gscanConfPath, gscanConfPathTem, findIpCnt)
cmd = executePath + ' -iprange ' + confPath + ' -conf ' + gscanConfPathTem
print 'process gscan to get google ip:' + cmd + ' , get ip count:' + str(findIpCnt) + ",isForeround:" + str(
isNeedWait)
# 'pwd && cd /Volumes/sd_card/gscan-master/example/local && cd gscan && ./gscan-master -iprange="./my.conf"',
process = subprocess.Popen(
cmd,
0,
None,
# subprocess.PIPE,
# subprocess.PIPE,
# subprocess.PIPE,
None,
None,
None,
None,
False,
True,
None,
None,
False,
None,
0)
if isNeedWait:
process.wait()
fp = open(googleIpPath, 'r')
content = fp.readline()
fp.close
return content
# 获取文件修改时间
def getFileModifyTime(path):
if path != None:
ft = os.stat(path)
return ft.st_mtime
return time.time()
# 获取第一次启动ip
def getFirstStartUpIp():
if os.path.exists(NET_GSCAN_FILE) and time.time() - getFileModifyTime(NET_GSCAN_FILE) < GET_NET_IP_LIST_SEP:
print 'use local ip_file because time short'
fileIp = readIpFromFile(NET_GSCAN_FILE)
if fileIp != None and len(fileIp) > 0:
return fileIp
print 'real get net ip_file'
content = getAvailableIp()
# 保存下载到的数据
if content != None:
print 'get net ip success, now save to files'
saveIpToFile(content, NET_GSCAN_FILE)
else:
print 'get net ip fail, try to use the old file!'
return readIpFromFile(NET_GSCAN_FILE)
return content
def saveIpToFile(content, path):
if content != None:
file = open(path, "w+")
file.write(content)
file.flush()
file.close
def readIpFromFile(path):
if path != None:
file = open(path)
content = file.readline()
file.close
return content
return None
# 获取已经扫描好的上传到github以及备用服务器的ip列表
def getAvailableIp():
i = 0
ispType = None
while i < NET_RETRY_CNT and ispType == None:
ispType = getIpType()
i = i + 1
if ispType == None:
ispType = ISP_TYPE_DIANXIN
i = 0
ipList = None
while i < NET_RETRY_CNT and ipList == None:
ipList = getAvailableGoagentIp(ispType)
if ipList == None:
ipList = getAvailableGoagentIpWithBackupSite(ispType)
i = i + 1
if ipList == None:
print 'get available Ip list fail'
return ipList
# 总调
def startGoagentWithIpAutoGet():
i = 0
ispType = None
while i < NET_RETRY_CNT and ispType == None:
ispType = getIpType()
i = i + 1
if ispType == None:
ispType = ISP_TYPE_DIANXIN
i = 0
ipList = None
while i < NET_RETRY_CNT and ipList == None:
ipList = getAvailableGoagentIp(ispType)
if ipList == None:
ipList = getAvailableGoagentIpWithBackupSite(ispType)
i = i + 1
if ipList == None:
print 'get available Ip list fail'
return
localFileReplace(ipList)
# 启动goagent
os.startfile(GOAGENT_EXE_FILE)
if __name__ == "__main__":
startGoagentWithIpAutoGet()
|
import asyncio
import re
translitDict = {
'eng': 'qwertyuiopasdfghjklzxcvbnm',
'ru': 'квертиуиопасдфжхжклзкцвбнм'
}
ending = {'m': '', 'j': 'а', 's': 'о'}
def translit(input_text):
"""Удаляет непонятные символы и транслитит английский текст на кириллицу (🚲)"""
output = []
input_text = re.sub('[^a-zA-ZА-Яа-яёЁ_ \-]+', '', input_text)
input_text = input_text.lower().replace('x', 'ks').replace(
'j', 'dj').replace('sh', 'ш').replace('zh', 'ж').replace('ch', 'ч')
for char in input_text:
output.append(
char.translate(
str.maketrans(translitDict.get('eng'),
translitDict.get('ru'))))
return ''.join(output)
def add_dot(txt):
"""Добавляет точку к тексту, если её там нет"""
return txt + '.' if txt[-1] != '.' else txt
def filter_symbol(string, symbol, alternative):
"""Заменяет символы, которые могут искривить ввод, пока что тут обычный реплейс"""
return string.replace(symbol, alternative)
class Queue:
"""Очередь сообщений"""
def __init__(self, max_in_queue_per_user=10):
self.queue = []
self.limits = {}
self.max_per_user = max_in_queue_per_user
def activate(self, loop=False):
"""Вызывается из нового созданого event-loop'a чтобы не конфликтовать с тележной либой, обнуляет фьючер (🏒)"""
if loop:
self.loop = loop
self.not_empty = asyncio.Future()
@asyncio.coroutine
def _trigger(self):
"""Существует для установки результата фьючера о наличии сообщения из правильного event-loop'a (🏒)"""
if len(self.queue) > 0:
self.not_empty.set_result(True)
def pull_the_trigger(self):
"""Обёртка для функции _trigger"""
asyncio.run_coroutine_threadsafe(self._trigger(), self.loop)
def build_item(self, text, ctx, responseParser=None, pOffset=0):
return (text, ctx.message.reply, ctx.chat.send_action, ctx.author.id,
ctx.chat.id, asyncio.get_event_loop(), responseParser, pOffset)
async def get_item(self):
if len(self.queue) == 0:
await self.not_empty
self.activate()
return self.queue.pop(0) if len(self.queue) > 0 else ""
def add_to(self, item, user):
if item[0] == '':
return
if user not in self.limits:
self.limits[user] = 1
else:
self.limits[user] += 1
if self.limits[user] <= self.max_per_user:
self.queue.append(item)
self.pull_the_trigger()
def cut_extra_stuff(txt):
"""Вырезает артефакты"""
extra = txt.find('\n\n\n')
return txt[0:extra] if extra != -1 else txt
async def delay(func, sec, *args):
await asyncio.sleep(sec)
await func(*args)
def add_to_user_history(msg, history):
if msg.author.id in history:
history[msg.author.id].append((msg.content, 1))
if len(history[msg.author.id]) > 16:
history[msg.author.id].pop(0)
history[msg.author.id].pop(0)
else:
history[msg.author.id] = [(msg.content, 1)]
return history[msg.author.id]
def add_to_chat_history(msg, history):
if msg.chat.id in history:
history[msg.chat.id].append(
(msg.content, msg.author.id,
translit(msg.author.first_name).capitalize()))
if len(history[msg.chat.id]) > 16:
history[msg.chat.id].pop(0)
history[msg.chat.id].pop(0)
else:
history[msg.chat.id] = [(msg.content, msg.author.id,
translit(msg.author.first_name).capitalize())]
return history[msg.chat.id]
def translator_response_parser(txt):
return txt[0:txt.find(';')]
def rage_response_parser(txt):
return txt[0:txt.find('"')]
def historic_response_parser(txt, uid, history):
resp = rage_response_parser(txt).replace('Человек:', '')
history[uid].append((resp, 0))
return resp
|
import json
import requests
from flask import request
from flask_restplus import Resource
from app.extensions import api
from app.api.utils.access_decorators import requires_role_view_all
from app.api.services.orgbook_service import OrgBookService
from werkzeug.exceptions import BadRequest, InternalServerError, NotFound, BadGateway
class SearchResource(Resource):
@api.doc(
description='Search OrgBook.',
params={'search': 'The search term to use when searching OrgBook.'})
@requires_role_view_all
def get(self):
search = request.args.get('search')
resp = OrgBookService.search(search)
if resp.status_code != requests.codes.ok:
raise BadGateway(f'OrgBook API responded with {resp.status_code}: {resp.reason}')
try:
results = json.loads(resp.text)['results']
except:
raise BadGateway('OrgBook API responded with unexpected data.')
return results
class CredentialResource(Resource):
@api.doc(description='Get information on an OrgBook credential.')
@requires_role_view_all
def get(self, credential_id):
resp = OrgBookService.get_credential(credential_id)
if resp.status_code != requests.codes.ok:
raise BadGateway(f'OrgBook API responded with {resp.status_code}: {resp.reason}')
credential = json.loads(resp.text)
return credential
|
outer_old = pd.DataFrame()
outer_new = pd.DataFrame()
for i in range(31):
with open("nist_data/conductivity/%s.json" % str(i+1)) as json_file:
#grab data, data headers (names), the salt name
json_full = json.load(json_file)
json_data = pd.DataFrame(json_full['data'])
json_datanames = np.array(json_full['dhead']) # make names into array to add as columns headers for df
json_data.columns = json_datanames
json_saltname = pd.DataFrame(json_full['components'])#components section contains names of DES components
#print(json_saltname['name']) #grabbing the HBD and HBA
inner_old = pd.DataFrame()
inner_new = pd.DataFrame()
#loop through the columns of the data, note that some of the
#json files are missing pressure data.
for indexer in range(len(json_data.columns)):
grab=json_data.columns[indexer]
list = json_data[grab]
my_list = [l[0] for l in list]
dfmy_list = pd.DataFrame(my_list)
dfmy_list.columns = [json_datanames[indexer][0]]
inner_new = pd.concat([dfmy_list, inner_old], axis=1)
inner_old = inner_new
#print(inner_old.columns)
#add the DES components, i.e. HBA and HBD
# they are not always listed in the same order on nist data, i.e., HBA always first. Will figure out later.
for i in range(len(json_saltname['name'])):
if 'chloride' in json_saltname['name'][i] or 'bromide' in json_saltname['name'][i]:
inner_old['HBA']=json_saltname['name'][i]
else:
inner_old['HBD']=json_saltname['name'][i]
#loop through the column names of the dataframe
for j in range(len(inner_old.columns)):
#if the words Mole fraction and a halogen are contained, values are correct and no value editing
#necessary and column is simply renamed to HBA mole fraction.
if 'Mole fraction' in inner_old.columns[j] and 'chloride' in inner_old.columns[j] or 'Mole fraction' in inner_old.columns[j] and 'bromide' in inner_old.columns[j]:
inner_old = inner_old.rename(columns={inner_old.columns[j]:'HBA Mole Fraction'})
#if the words Mole Ratio and a halogen are contained, dataset was mislabeled but values are correct.
#only need to rename column to HBA mole fraction.
elif 'Mole ratio' in inner_old.columns[j] and 'chloride' in inner_old.columns[j] or 'Mole ratio' in inner_old.columns[j] and 'bromide' in inner_old.columns[j]:
inner_old = inner_old.rename(columns={inner_old.columns[j]:'HBA Mole Fraction'})
#if the words mole ratio are present, but no halogens, the ratio of the HBD is displayed and needs
#to be changed to HBA mole fraction. First relabel the colum as HBA mole fraction.
elif 'Mole ratio' in inner_old.columns[j] and not 'chloride' in inner_old.columns[j] or 'Mole ratio' in inner_old.columns[j] and not 'bromide' in inner_old.columns[j]:
inner_old = inner_old.rename(columns={inner_old.columns[j]:'HBA Mole Fraction'})
#apparently the numbers are strings so change to integer. May need to do this for every other column
inner_old['HBA Mole Fraction'] = inner_old['HBA Mole Fraction'].astype(int)
#next make an empty list that will hold all the new HBA mole fractions
mole_fractions_list = []
#loop through every HBD ratio in the column
for k in range(len(inner_old['HBA Mole Fraction'])):
#Calculate the HBA mole fraction from every HBD ratio and append to the list
mole_fractions_list.append(1/(1+inner_old['HBA Mole Fraction'][k]))
#finally make the list the new mole fraction column in the dataframe
inner_old['HBA Mole Fraction'] = mole_fractions_list
#in the last case, if the word mole fraction is present but not a halogen, HBD mole fraction is displayed.
#Follow simialr process as before
elif 'Mole fraction' in inner_old.columns[j] and not 'chloride' in inner_old.columns[j] or 'Mole fraction' in inner_old.columns[j] and not 'bromide' in inner_old.columns[j]:
inner_old = inner_old.rename(columns={inner_old.columns[j]:'HBA Mole Fraction'})
#convert to float instead since it is a decimal
inner_old['HBA Mole Fraction'] = inner_old['HBA Mole Fraction'].astype(float)
#empty list
mole_fractions_list = []
#loop through column
for k in range(len(inner_old['HBA Mole Fraction'])):
#subtract 1 from HBD mole fraction to get HBA mole fraction and append to list
mole_fractions_list.append(1 - inner_old['HBA Mole Fraction'][k])
#replace column
inner_old['HBA Mole Fraction'] = mole_fractions_list
#add to the growing dataframe
outer_new = pd.concat([inner_old, outer_old], axis = 0, ignore_index = True)
outer_old = outer_new
outer_old.head(50)
|
from django import forms
from .models import SuperHeroApp
# choices
RICH_POWER_CHOICES = (
('rich', 'Rich'),
('superpower', 'Superpower'),
)
# choices
SUPER_POWER_CHOICES = (
('flight', 'Flight'),
('speed', 'Speed'),
('telekenetic', 'Telekenetic'),
('healing', 'Healing'),
('invisibility', 'Invisibility'),
('time travel', 'Time Travel'),
)
# choices
GOOD_BAD_CHOICES = (
('good', 'Good'),
('kinda good', 'Kinda Good'),
('lukewarm', 'Lukewarm'),
('sorta evil', 'Sorta Evil'),
('hell hot', 'Hell Hot'),
)
# form for model
class SuperForm(forms.ModelForm):
class Meta:
model = SuperHeroApp
fields = "__all__"
# labels for form questions
labels ={
"name": "Name",
"cityorigin": "City/Origin/Planet",
"richpower": "Are you rich, or have superpowers?",
"whichPower": "If superpower, which one(s)?",
"goodEvil": "On a scale of Heaven and Hell, which are you?",
"examples": "Give us 3 examples of when you used your super hero abilities:",
}
# to show specific widget types for form questions
widgets = {
"richpower": forms.RadioSelect(choices=RICH_POWER_CHOICES),
"whichPower": forms.CheckboxSelectMultiple(choices=SUPER_POWER_CHOICES),
"goodEvil": forms.Select(choices=GOOD_BAD_CHOICES),
}
|
import sublime, sublime_plugin, webbrowser
try:
from .github import *
except ValueError:
from github import *
class GithubRepositoryCommand(GithubWindowCommand):
@with_repo
def run(self, repo):
webbrowser.open_new_tab(repo.repository_url())
|
import datetime
from pathlib import Path
from vantage6.common.globals import APPNAME
#
# INSTALLATION SETTINGS
#
PACAKAGE_FOLDER = Path(__file__).parent.parent.parent
DATA_FOLDER = PACAKAGE_FOLDER / APPNAME / "server" / "_data"
#
# RUNTIME SETTINGS
#
# Expiretime of JWT tokens
JWT_ACCESS_TOKEN_EXPIRES = datetime.timedelta(hours=6)
# Expiretime of JWT token in a test environment
JWT_TEST_ACCESS_TOKEN_EXPIRES = datetime.timedelta(days=1)
# Which resources should be initialized. These names correspond to the
# file-names in the resource directory
RESOURCES = ['node', 'collaboration', 'organization', 'task', 'result',
'token', 'user', 'version', 'recover', 'role',
'rule', 'health']
# Super user information. This user is only created if it is not in the
# database yet at startup time.
SUPER_USER_INFO = {
"username": "root",
"password": "root"
}
# Whenever the refresh tokens should expire. Note that setting this to true
# would mean that nodes will disconnect after some time
REFRESH_TOKENS_EXPIRE = False
|
# -*- coding: utf-8 -*-
class Item:
def __init__(self, name, sell_in, quality):
self.name = name
self.sell_in = sell_in
self.quality = quality
def __repr__(self):
return "%s, %s, %s" % (self.name, self.sell_in, self.quality)
def _base_update(item: Item, quality_inc: int):
item.sell_in -= 1
item.quality += quality_inc
item.quality = max(min(item.quality, 50), 0)
class Sulfuras:
def __init__(self, item: Item):
self.item = item
def update_quality(self):
return
class AgedBrie:
def __init__(self, item: Item):
self.item = item
def update_quality(self):
quality_inc = 1
if self.item.sell_in <= 0:
quality_inc = 2
_base_update(self.item, quality_inc)
class BackstagePasses:
def __init__(self, item: Item):
self.item = item
def update_quality(self):
quality_inc = 1
if self.item.sell_in <= 10:
quality_inc = 2
if self.item.sell_in <= 5:
quality_inc = 3
if self.item.sell_in <= 0:
quality_inc = -self.item.quality
_base_update(self.item, quality_inc)
class Conjured:
def __init__(self, item: Item):
self.item = item
def update_quality(self):
quality_inc = -2
if self.item.sell_in <= 0:
quality_inc = -4
_base_update(self.item, quality_inc)
class OtherItems:
def __init__(self, item: Item):
self.item = item
def update_quality(self):
quality_inc = -1
if self.item.sell_in <= 0:
quality_inc = -2
_base_update(self.item, quality_inc)
def create_smart_item(item: Item):
if item.name == "Sulfuras, Hand of Ragnaros":
return Sulfuras(item)
elif item.name == "Aged Brie":
return AgedBrie(item)
elif item.name == "Backstage passes to a TAFKAL80ETC concert":
return BackstagePasses(item)
elif item.name == "Conjured":
return Conjured(item)
else:
return OtherItems(item)
class GildedRose(object):
def __init__(self, items):
self.items = items
def update_quality(self):
for item in self.items:
create_smart_item(item).update_quality()
class Item:
def __init__(self, name, sell_in, quality):
self.name = name
self.sell_in = sell_in
self.quality = quality
def __repr__(self):
return "%s, %s, %s" % (self.name, self.sell_in, self.quality)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-03-10 09:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('athenatools', '0018_auto_20190310_1506'),
]
operations = [
migrations.AddField(
model_name='purchase',
name='check_freeze',
field=models.BooleanField(default=True, verbose_name=b'\xe5\x86\xbb\xe5\x93\x81\xe6\xb8\xa9\xe5\xba\xa6\xe2\x89\xa4-12\xe2\x84\x83\xe4\xb8\x94\xe6\x97\xa0\xe8\xbd\xaf\xe5\x8c\x96'),
),
migrations.AddField(
model_name='purchase',
name='check_label',
field=models.BooleanField(default=True, verbose_name=b'\xe6\xa0\x87\xe7\xad\xbe\xe6\xad\xa3\xe5\xb8\xb8'),
),
migrations.AddField(
model_name='purchase',
name='check_odorless',
field=models.BooleanField(default=True, verbose_name=b'\xe6\x97\xa0\xe5\xbc\x82\xe5\x91\xb3'),
),
migrations.AddField(
model_name='purchase',
name='check_package',
field=models.BooleanField(default=True, verbose_name=b'\xe5\x8c\x85\xe8\xa3\x85\xe5\xae\x8c\xe5\xa5\xbd'),
),
migrations.AddField(
model_name='purchase',
name='storage',
field=models.CharField(choices=[('\u5ba4\u6e29', '\u5ba4\u6e29'), ('\u51b7\u51bb', '\u51b7\u51bb'), ('\u51b7\u85cf', '\u51b7\u85cf')], default='\u5ba4\u6e29', max_length=255, verbose_name=b'\xe9\xbb\x98\xe8\xae\xa4\xe8\xb4\xae\xe8\x97\x8f\xe6\x96\xb9\xe5\xbc\x8f'),
),
migrations.AlterField(
model_name='product',
name='default_check_freeze',
field=models.BooleanField(default=True, verbose_name=b'\xe9\xbb\x98\xe8\xae\xa4\xe5\x86\xbb\xe5\x93\x81\xe6\xb8\xa9\xe5\xba\xa6\xe2\x89\xa4-12\xe2\x84\x83\xe4\xb8\x94\xe6\x97\xa0\xe8\xbd\xaf\xe5\x8c\x96'),
),
migrations.AlterField(
model_name='product',
name='default_check_label',
field=models.BooleanField(default=True, verbose_name=b'\xe9\xbb\x98\xe8\xae\xa4\xe6\xa0\x87\xe7\xad\xbe\xe6\xad\xa3\xe5\xb8\xb8'),
),
migrations.AlterField(
model_name='product',
name='default_check_odorless',
field=models.BooleanField(default=True, verbose_name=b'\xe9\xbb\x98\xe8\xae\xa4\xe6\x97\xa0\xe5\xbc\x82\xe5\x91\xb3'),
),
migrations.AlterField(
model_name='product',
name='default_check_package',
field=models.BooleanField(default=True, verbose_name=b'\xe9\xbb\x98\xe8\xae\xa4\xe5\x8c\x85\xe8\xa3\x85\xe5\xae\x8c\xe5\xa5\xbd'),
),
]
|
# User types list passed into dropdown of same name for user selection used
# in models.py & forms.py
USER_TYPES_CHOICES = [
('', 'Select Post Category'),
('participant', 'Participant'),
('staff', 'Staff'),
('admin', 'Admin'),
]
# LMS Modules list passed into dropdown of same name for user selection used
# in modules.py & forms.py
LMS_MODULES_CHOICES = [
('', 'Select Learning Stage'),
('programme_preliminaries', 'Programme Preliminaries'),
('programming_paradigms', 'Programming Paradigms'),
('html_fundamentals', 'HTML Fundamentals'),
('css_fundamentals', 'CSS Fundamentals'),
('user_centric_frontend_development', 'User Centric Frontend Development'),
('javascript_fundamentals', 'Javascript Fundamentals'),
('interactive_frontend_development', 'Interactive Frontend Development'),
('python_fundamentals', 'Python Fundamentals'),
('practical_python', 'Practical Python'),
('data_centric_development', 'Data Centric Development'),
('full_stack_frameworks with django', 'Full Stack Frameworks with Django'),
('alumni', 'Alumni'),
('staff', 'Staff'),
]
|
#
# Copyright 2014 David Novakovic
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from twisted.trial import unittest
from cyclone.testing import CycloneTestCase, Client
from cyclone.web import Application, RequestHandler, asynchronous
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
class TestHandler(RequestHandler):
def get(self):
self.write("Something")
def post(self):
self.write("Something posted")
def put(self):
self.write("Something put")
def head(self):
self.write("")
def delete(self):
self.write("")
class DeferredTestHandler(RequestHandler):
@asynchronous
def get(self):
self.write("Something...")
reactor.callLater(0.1, self.do_something)
def do_something(self):
self.write("done!")
self.finish()
class CookieTestHandler(RequestHandler):
def get(self):
self.set_secure_cookie("test_cookie", "test_value")
self.finish()
def post(self):
value = self.get_secure_cookie("test_cookie")
self.finish(value)
def mock_app_builder():
return Application([
(r'/testing/', TestHandler),
(r'/deferred_testing/', DeferredTestHandler),
(r'/cookie_testing/', CookieTestHandler),
], cookie_secret="insecure")
class TestTestCase(unittest.TestCase):
def test_create(self):
self.assertRaises(ValueError, CycloneTestCase, mock_app_builder)
case = CycloneTestCase(app_builder=mock_app_builder)
self.assertTrue(case._app)
self.assertTrue(case.client)
class TestClient(unittest.TestCase):
def setUp(self):
self.app = mock_app_builder()
self.client = Client(self.app)
def test_create_client(self):
app = mock_app_builder()
client = Client(app)
self.assertTrue(client.app)
@inlineCallbacks
def test_get_request(self):
response = yield self.client.get("/testing/")
self.assertEqual(response.content, b"Something")
self.assertTrue(len(response.headers) > 3)
@inlineCallbacks
def test_get_request_with_params(self):
response = yield self.client.get("/testing/", {"q": "query"})
self.assertEqual(response.content, b"Something")
self.assertTrue(len(response.headers) > 3)
@inlineCallbacks
def test_post_request(self):
response = yield self.client.post("/testing/")
self.assertEqual(response.content, b"Something posted")
self.assertTrue(len(response.headers) > 3)
@inlineCallbacks
def test_put_request(self):
response = yield self.client.put("/testing/")
self.assertEqual(response.content, b"Something put")
self.assertTrue(len(response.headers) > 3)
@inlineCallbacks
def test_head_request(self):
response = yield self.client.head("/testing/")
self.assertEqual(response.content, b"")
self.assertTrue(len(response.headers) > 3)
@inlineCallbacks
def test_delete_request(self):
response = yield self.client.delete("/testing/")
self.assertEqual(response.content, b"")
self.assertTrue(len(response.headers) > 3)
@inlineCallbacks
def test_get_deferred_request(self):
response = yield self.client.get("/deferred_testing/")
self.assertEqual(response.content, b"Something...done!")
self.assertTrue(len(response.headers) > 3)
@inlineCallbacks
def test_cookies(self):
response = yield self.client.get("/cookie_testing/")
self.assertEqual(
self.client.cookies.get_secure_cookie("test_cookie"),
b"test_value"
)
response = yield self.client.post("/cookie_testing/")
self.assertEqual(response.content, b"test_value")
|
from pathlib import Path
from urllib.parse import quote, unquote
from django.conf import settings
from django.core.files.storage import default_storage
from django.utils.text import slugify
def save_uploaded_file(prefix, uploaded_file):
name, ext = uploaded_file.name.rsplit(".", 1)
filename = "{}.{}".format(slugify(name), ext)
iiif_path = Path(settings.IIIF_DIR) / prefix / filename
iiif_unique_path = default_storage.save(str(iiif_path), uploaded_file)
iiif_uri = quote(iiif_unique_path.replace(settings.IIIF_DIR, "")[1:],
safe="")
return iiif_uri
def delete_uploaded_file(uploaded_filename):
iiif_path = Path(settings.IIIF_DIR) / unquote(uploaded_filename)
default_storage.delete(str(iiif_path))
def transform_iiif_image(uri, region="full", size="max", rotation=0,
quality="default"):
# http://localhost:8182/iiif/2/ghostdriver.jpg/full/500,/10/default.jpg
base_uri = uri.rsplit('/', 4)[0]
return "{base_uri}/{region}/{size}/{rotation}/{quality}.jpg".format(
base_uri=base_uri, region=region, size=size, rotation=str(rotation),
quality=quality
);
|
from ..jsonrpc.http_taraxa import *
def dag_block_hex2int(block):
keys = ['level', 'period', 'number', 'timestamp']
for key in keys:
block[key] = hex2int(block[key])
return block
def getDagBlockByHash(hash, fullTransactions=False, **kwargs):
r = taraxa_getDagBlockByHash(hash, fullTransactions, **kwargs)
block = r['result']
if 'result' in r:
block = r['result']
if block:
block = dag_block_hex2int(block)
return block
else:
raise Exception(r["error"])
def getDagBlockByLevel(tag, fullTransactions=False, **kwargs):
r = taraxa_getDagBlockByLevel(tag, fullTransactions, **kwargs)
if 'result' in r:
blocks = r['result']
blocks = list(map(lambda block: dag_block_hex2int(block), blocks))
return blocks
else:
raise Exception(r["error"])
def dagBlockLevel(**kwargs):
r = taraxa_dagBlockLevel(**kwargs)
level = hex2int(r['result'])
return level
def dagBlockPeriod(**kwargs):
r = taraxa_dagBlockPeriod(**kwargs)
period = hex2int(r['result'])
return period
def blockNumber(**kwargs):
r = taraxa_blockNumber(**kwargs)
number = hex2int(r['result'])
return number
|
import unittest
from emingora.pom.analyser.tools.RepeatedGAVS import RepeatedGAVS
from emingora.pom.analyser.utils.PomReaderUtil import PomReaderUtil
class CheckRepeatedGAVTest(unittest.TestCase):
def test_true(self):
# Arrange
pom = PomReaderUtil.read('resources/noRepeatedDependency_pom.xml')
# Act
result = RepeatedGAVS.get_repeated_gavs(pom)
# Assert
self.assertEqual(0, len(result))
def test_false(self):
# Arrange
pom = PomReaderUtil.read('resources/withRepeatedDependency_pom.xml')
# Act
result = RepeatedGAVS.get_repeated_gavs(pom)
# Assert
self.assertEqual(1, len(result))
self.assertEqual("org.springframework", result[0].group_id)
self.assertEqual("spring-web", result[0].artifact_id)
self.assertEqual("1", result[0].version)
self.assertEqual(None, result[0].classifier)
|
# Copyright 2021 UC Davis Plant AI and Biophysics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def _is_valid_file(file):
"""Returns whether a file is valid.
This means that it is a file and not a directory, but also that
it isn't an unnecessary dummy file like `.DS_Store` on MacOS.
"""
if os.path.isfile(file):
if not file.startswith('.git') and file not in ['.DS_Store']:
return True
return False
def get_file_list(fpath, ext = None):
"""Gets a list of files from a path."""
base_list = [f for f in os.listdir(fpath)
if _is_valid_file(os.path.join(fpath, f))]
if ext is not None:
if isinstance(ext, str):
return [f for f in base_list if f.endswith(ext)]
else:
return [f for f in base_list if
any([f.endswith(i) for i in ext])]
return base_list
def get_dir_list(filepath):
"""Get a list of directories from a path."""
return [f for f in os.listdir(filepath)
if os.path.isdir(os.path.join(filepath, f))]
def nested_dir_list(fpath):
"""Returns a nested list of directories from a path."""
dirs = []
for f in os.scandir(fpath): # type: os.DirEntry
if f.is_dir():
if not os.path.basename(f.path).startswith('.'):
dirs.append(os.path.join(fpath, f.path))
if len(dirs) != 0:
for dir_ in dirs:
dirs.extend(nested_dir_list(dir_))
return dirs
def nested_file_list(fpath):
"""Returns a nested list of files from a path."""
files = []
dirs = nested_dir_list(fpath)
for dir_ in dirs:
files.extend([os.path.join(dir_, i) for i in os.listdir(dir_)
if _is_valid_file(os.path.join(dir_, i))])
return files
def create_dir(dir_):
"""Creates a directory (or does nothing if it exists)."""
os.makedirs(dir_, exist_ok = True)
def recursive_dirname(dir_, level = 1):
"""Returns a recursive dirname for the number of levels provided."""
if level == 0:
return dir_
return recursive_dirname(os.path.dirname(dir_), level - 1)
def is_image_file(file):
"""Returns whether a file is an image file."""
if not isinstance(file, (str, bytes, os.PathLike)):
return False
end = os.path.splitext(file)[-1][1:]
return end.lower() in ['jpg', 'jpeg', 'png', 'bmp', 'tiff']
|
import wx
def main():
for x in dir(wx):
if x.startswith('EVT'):
print(x)
if __name__ == '__main__':
main()
|
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms import TextAreaField
from wtforms.validators import DataRequired
from wtforms.validators import Email
from wtforms.validators import Length
class ContactUs(FlaskForm):
name = StringField("Name",
validators=[DataRequired(),
Length(min=2, max=20)])
email = StringField("Email", validators=[DataRequired(), Email()])
mobile = StringField("Mobile", validators=[Length(4, 16)])
subject = StringField("Subject",
validators=[DataRequired(),
Length(5, 200)])
message = TextAreaField("Message",
validators=[DataRequired(),
Length(5, 10000)])
|
#!/usr/bin/env python
# om is oauth-mini - a simple implementation of a useful subset of OAuth.
# It's designed to be useful and reusable but not general purpose.
#
# (c) 2011 Rdio Inc
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""A simple OAuth client implementation. Do less better.
Here are the restrictions:
- only HMAC-SHA1 is supported
- only WWW-Authentiate form signatures are generated
To sign a request:
auth = om((consumer_key,consumer_secret), url, params)
# send Authorization: <auth>
# when POSTing <params> to <url>
Optional additional arguments are:
token = (oauth_token, oauth_token_secret)
method = "POST"
realm = "Realm-for-authorization-header"
timestamp = oauth timestamp - otherwise auto generated
nonce = oauth nonce - otherwise auto generated
"""
from __future__ import unicode_literals
import time, random, hmac, hashlib, binascii
try:
from urllib.parse import urlparse, parse_qsl, quote
except ImportError:
from urlparse import urlparse, parse_qsl
from urllib import quote
import sys
PY3 = (sys.version_info >= (3, 0, 0))
def om(consumer, url, post_params, token=None, method='POST', realm=None, timestamp=None, nonce=None):
"""A one-shot simple OAuth signature generator"""
# the method must be upper-case
method = method.upper()
# turn the POST params into a list of tuples if it's not already
if isinstance(post_params, list):
params = list(post_params) # copy the params list since we'll be messing with it
else:
params = list(post_params.items())
# normalize the URL
parts = urlparse(url)
scheme, netloc, path, _, query = parts[:5]
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
netloc = netloc.lower()
normalized_url = '%s://%s%s' % (scheme, netloc, path)
# add query-string params (if any) to the params list
params.extend(parse_qsl(query))
# add OAuth params
params.extend([
('oauth_version', '1.0'),
('oauth_timestamp', timestamp if timestamp is not None else str(int(time.time()))),
('oauth_nonce', nonce if nonce is not None else str(random.randint(0, 1000000))),
('oauth_signature_method', 'HMAC-SHA1'),
('oauth_consumer_key', consumer[0]),
])
# the consumer secret is the first half of the HMAC-SHA1 key
hmac_key = consumer[1] + '&'
if token is not None:
# include a token in params
params.append(('oauth_token', token[0]))
# and the token secret in the HMAC-SHA1 key
hmac_key += token[1]
# Sort lexicographically, first after key, then after value.
params.sort()
# UTF-8 and escape the key/value pairs
def escape(s):
s_encoded = s.encode('utf-8')
safe_chars_encoded = u'~'.encode('utf-8')
return quote(s_encoded, safe=safe_chars_encoded)
params = [(escape(k), escape(v)) for k,v in params]
# Combine key value pairs into a string.
normalized_params = '&'.join(['%s=%s' % (k, v) for k, v in params])
# build the signature base string
signature_base_string = (escape(method) +
'&' + escape(normalized_url) +
'&' + escape(normalized_params))
# HMAC-SHA1
hashed = hmac.new(hmac_key.encode('utf-8'),
signature_base_string.encode('utf-8'),
hashlib.sha1)
# Calculate the digest base 64.
oauth_signature = binascii.b2a_base64(hashed.digest())[:-1]
if PY3:
# binascii.b2a_base64 will return bytes in Python3
# convert it to unicode here.
oauth_signature = oauth_signature.decode('utf-8')
# Build the Authorization header
authorization_params = [('oauth_signature', oauth_signature)]
if realm is not None:
authorization_params.insert(0, ('realm', escape(realm)))
oauth_params = frozenset(('oauth_version', 'oauth_timestamp', 'oauth_nonce',
'oauth_signature_method', 'oauth_signature',
'oauth_consumer_key', 'oauth_token'))
authorization_params.extend([p for p in params if p[0] in oauth_params])
return 'OAuth ' + (', '.join(['%s="%s"'%p for p in authorization_params]))
|
from django.apps import AppConfig
import actstream.registry
default_app_config = 'kitsune.questions.QuestionsConfig'
class QuestionsConfig(AppConfig):
name = 'kitsune.questions'
def ready(self):
Question = self.get_model('Question')
actstream.registry.register(Question)
Answer = self.get_model('Answer')
actstream.registry.register(Answer)
|
#!/usr/bin/env python3
"""
Convert structural variants in a VCF to CGH (CytoSure) format
"""
import argparse
import sys
import pandas as pd
import logging
import gzip
from collections import namedtuple, defaultdict
from io import StringIO
from lxml import etree
from cyvcf2 import VCF
from constants import *
__version__ = '0.7.1'
logger = logging.getLogger(__name__)
Event = namedtuple('Event', ['chrom', 'start', 'end', 'type', 'info'])
def events(variants, CONTIG_LENGTHS):
"""Iterate over variants and yield Events"""
for variant in variants:
if len(variant.ALT) != 1:
continue
chrom = variant.CHROM
if chrom not in CONTIG_LENGTHS:
continue
start = variant.start
sv_type = variant.INFO.get('SVTYPE')
if variant.INFO.get("END"):
end = variant.INFO.get('END')
if start >= end:
tmp=int(end)
end=start
start=tmp
logger.debug('%s at %s:%s-%s (%s bp)', sv_type, chrom, start+1, end, end - start)
assert len(variant.REF) == 1
yield Event(chrom=chrom, start=start, end=end, type=sv_type, info=dict(variant.INFO))
else:
if ":" in variant.ALT[0] and ("[" in variant.ALT[0] or "]" in variant.ALT[0]):
chrom2=variant.ALT[0].split(":")[0].split("[")[-1].split("]")[-1]
else:
print ("invalid variant type {}: skipping".format(variant.ALT[0]))
if chrom2 != chrom:
logger.debug('%s at %s:%s', sv_type, chrom, start+1)
yield Event( chrom=chrom, start=start, end=None, type=sv_type, info=dict(variant.INFO) )
else:
end=int(variant.ALT[0].split(":")[1].split("[")[0].split("]")[0])
if start >= end:
tmp=int(end)
end=start
start=tmp
logger.debug('%s at %s:%s', sv_type, chrom, start+1)
yield Event( chrom=chrom, start=start, end=end, type=sv_type, info=dict(variant.INFO) )
def strip_template(path):
"""
Read in the template CGH file and strip it of everything that we don't need.
Return the lxml.etree object.
"""
tree = etree.parse(path)
# Remove all aberrations
parent = tree.xpath('/data/cgh/submission')[0]
for aberration in parent.xpath('aberration'):
parent.remove(aberration)
# Remove all except the first probe (in the order in which they occur in
# the file) on each chromosome. Chromosomes without probes are not
# clickable in the CytoSure UI.
parent = tree.xpath('/data/cgh/probes')[0]
seen = set()
for probe in parent:
chrom = probe.attrib.get('chromosome')
if not chrom or chrom in seen:
parent.remove(probe)
else:
seen.add(chrom)
# Remove all segments
parent = tree.xpath('/data/cgh/segmentation')[0]
for segment in parent:
parent.remove(segment)
return tree
def make_probe(parent, chromosome, start, end, height, text):
probe = etree.SubElement(parent, 'probe')
probe.attrib.update({
'name': text,
'chromosome': CHROM_RENAME.get(chromosome, chromosome),
'start': str(start + 1),
'stop': str(end),
'normalized': '{:.3f}'.format(-height),
'smoothed': '0.0',
'smoothed_normalized': '-0.25',
'sequence': 'AACCGGTT',
})
red = 1000
green = red * 2**height
spot = etree.SubElement(probe, 'spot')
spot.attrib.update({
'index': '1',
'row': '1',
'column': '1',
'red': str(red),
'green': '{:.3f}'.format(green),
'gSNR': '100.0',
'rSNR': '100.0',
'outlier': 'false',
})
return probe
def make_segment(parent, chromosome, start, end, height):
segment = etree.SubElement(parent, 'segment')
segment.attrib.update({
'chrId': CHROM_RENAME.get(chromosome, chromosome),
'numProbes': '100',
'start': str(start + 1),
'stop': str(end),
'average': '{:.3f}'.format(-height), # CytoSure inverts the sign
})
return segment
def make_aberration(parent, chromosome, start, end, comment=None, method='converted from VCF',
confirmation=None, n_probes=0, copy_number=99):
"""
comment -- string
method -- short string
confirmation -- string
"""
aberration = etree.SubElement(parent, 'aberration')
aberration.attrib.update(dict(
chr=CHROM_RENAME.get(chromosome, chromosome),
start=str(start + 1),
stop=str(end),
maxStart=str(start + 1),
maxStop=str(end),
copyNumber=str(copy_number),
initialClassification='Unclassified',
finalClassification='Unclassified',
inheritance='Not_tested',
numProbes=str(n_probes),
startProbe='',
stopProbe='',
maxStartProbe='',
maxStopProbe='',
# TODO fill in the following values with something sensible
automationLevel='1.0',
baseline='0.0',
mosaicism='0.0',
gain='true',
inheritanceCoverage='0.0',
logRatio='-0.4444', # mean log ratio
method=method,
p='0.003333', # p-value
sd='0.2222', # standard deviation
))
if comment:
e = etree.SubElement(aberration, 'comments')
e.text = comment
if confirmation:
e = etree.SubElement(aberration, 'confirmation')
e.text = confirmation
return aberration
def spaced_probes(start, end, probe_spacing=PROBE_SPACING):
"""
Yield nicely spaced positions along the interval (start, end).
- start and end are always included
- at least three positions are included
"""
l = end - start
n = l // probe_spacing
spacing = l / max(n, 2) # float division
i = 0
pos = start
while pos <= end:
yield pos
i += 1
pos = start + int(i * spacing)
def probe_point(center, height=2.5, width=5001, steps=15):
"""
Yield (pos, height) pairs that "draw" a triangular shape (pointing upwards)
"""
pos_step = (width - 1) // (steps - 1)
height_step = height / ((steps - 1) // 2)
for i in range(-(steps // 2), steps // 2 + 1):
yield center + i * pos_step, height - height_step * abs(i) + 0.1
def format_comment(info):
comment = ''
for k, v in sorted(info.items()):
if k in ('CSQ', 'SVTYPE'):
continue
comment += '\n{}: {}'.format(k, v)
return comment
def merge_intervals(intervals):
"""Merge overlapping intervals into a single one"""
events = [(coord[0], 'START') for coord in intervals]
events.extend((coord[1], 'STOP') for coord in intervals)
events.sort()
active = 0
start = 0
for pos, what in events:
# Note adjacent 'touching' events are merged because 'START' < 'STOP'
if what == 'START':
if active == 0:
start = pos
active += 1
else:
active -= 1
if active == 0:
yield (start, pos)
def complement_intervals(intervals, chromosome_length):
"""
>>> list(complement_intervals([(0, 1), (3, 4), (18, 20)], 20))
[(1, 3), (4, 18)]
"""
prev_end = 0
for start, end in intervals:
if prev_end != start:
yield prev_end, start
prev_end = end
if prev_end != chromosome_length:
yield prev_end, chromosome_length
def add_probes_between_events(probes, chr_intervals, CONTIG_LENGTHS):
for chrom, intervals in chr_intervals.items():
if chrom not in CONTIG_LENGTHS:
continue
intervals = merge_intervals(intervals)
for start, end in complement_intervals(intervals, CONTIG_LENGTHS[chrom]):
for pos in spaced_probes(start, end, probe_spacing=200000):
# CytoSure does not display probes at height=0.0
make_probe(probes, chrom, pos, pos + 60, 0.01, 'between events')
class CoverageRecord:
__slots__ = ('chrom', 'start', 'end', 'coverage')
def __init__(self, chrom, start, end, coverage):
self.chrom = chrom
self.start = start
self.end = end
self.coverage = coverage
def parse_coverages(path):
with open(path) as f:
for line in f:
if line.startswith('#'):
continue
content=line.split('\t')
chrom=content[0]
start=content[1]
end=content[2]
coverage=content[3]
start = int(start)
end = int(end)
coverage = float(coverage)
yield CoverageRecord(chrom, start, end, coverage)
def retrieve_snp(content,args):
snp_data=[]
snp_data.append(content[0])
snp_data.append(int(content[1]))
snp_data.append(float( content[7].split(";{}=".format(args.dp))[-1].split(";")[0] ))
return (snp_data)
def parse_cn_coverages(args):
probe_data=[]
opener=open
first=True
df = pd.read_csv(args.cn, sep="\t")
for i in range(0,len(df["log2"])):
chrom = df["chromosome"][i]
start = int(df["start"][i])
end = int(df["end"][i])
coverage =float(df["log2"][i])
if "gene" in df:
if df["gene"][i] == "Antitarget":
continue
yield CoverageRecord(chrom, start, end, coverage)
def parse_snv_coverages(args):
snv_list=[]
if args.snv.endswith(".gz"):
for line in gzip.open(args.snv):
if line[0] == "#" or not ";{}=".format(args.dp) in line:
continue
content=line.strip().split()
snv_list.append(retrieve_snp(content,args))
elif args.snv.endswith(".vcf"):
for line in open(args.snv):
if line[0] == "#" or not ";{}=".format(args.dp) in line:
continue
content=line.strip().split()
snv_list.append(retrieve_snp(content,args))
else:
print ("only .vcf or gziped vcf is allowed, exiting")
for snv in snv_list:
chrom = snv[0]
start = snv[1]
end = start+1
coverage = snv[2]
yield CoverageRecord(chrom, start, end, coverage)
def group_by_chromosome(records):
"""
Group records by their .chrom attribute.
Yield pairs (chromosome, list_of_records) where list_of_records
are the consecutive records sharing the same chromosome.
"""
prev_chrom = None
chromosome_records = []
for record in records:
if record.chrom != prev_chrom:
if chromosome_records:
yield prev_chrom, chromosome_records
chromosome_records = []
chromosome_records.append(record)
prev_chrom = record.chrom
if chromosome_records:
yield prev_chrom, chromosome_records
def bin_coverages(coverages, n):
"""
Reduce the number of coverage records by re-binning
each *n* coverage values into a new single bin.
The coverages are assumed to be from a single chromosome.
"""
chrom = coverages[0].chrom
for i in range(0, len(coverages), n):
records = coverages[i:i+n]
cov = sum(r.coverage for r in records) / len(records)
yield CoverageRecord(chrom, records[0].start, records[-1].end, cov)
def subtract_intervals(records, intervals):
"""
Yield only those records that fall outside of the given intervals.
"""
events = [(r.start, 'rec', r) for r in records]
events.extend((i[0], 'istart', None) for i in intervals)
events.extend((i[1], 'iend', None) for i in intervals)
events.sort()
inside = False
for pos, typ, record in events:
if typ == 'istart':
inside = True
elif typ == 'iend':
inside = False
elif not inside:
yield record
def add_coverage_probes(probes, path, args, CONTIG_LENGTHS, N_INTERVALS):
"""
probes -- <probes> element
path -- path to tab-separated file with coverages
"""
logger.info('Reading %r ...', path)
if args.coverage:
coverages = [r for r in parse_coverages(path) if r.chrom in CONTIG_LENGTHS]
elif args.cn:
coverages = [r for r in parse_cn_coverages(args) if r.chrom in CONTIG_LENGTHS]
else:
coverages = [r for r in parse_snv_coverages(args) if r.chrom in CONTIG_LENGTHS]
mean_coverage = sum(r.coverage for r in coverages) / len(coverages)
logger.info('Mean coverage is %.2f', mean_coverage)
n = 0
for chromosome, records in group_by_chromosome(coverages):
coverage_factor = 1
if args.sex == 'male' and ( chromosome == 'Y' or chromosome == 'X'):
coverage_factor = 2
n_intervals = N_INTERVALS[chromosome]
for record in subtract_intervals(bin_coverages(records,args.bins), n_intervals):
if not args.cn:
height = min(coverage_factor * record.coverage / mean_coverage - 1, MAX_HEIGHT)
if height == 0.0:
height = 0.01
else:
height=record.coverage
print(height)
make_probe(probes, record.chrom, record.start, record.end, height, 'coverage')
n += 1
logger.info('Added %s coverage probes', n)
#apply filtering
def variant_filter(variants, min_size=5000,max_frequency=0.01, frequency_tag='FRQ'):
for variant in variants:
end = variant.INFO.get('END')
if end and not variant.INFO.get('SVTYPE') == 'TRA':
if abs( int(end) - variant.start) <= min_size:
# Too short
continue
elif variant.INFO.get('SVTYPE') == 'BND':
bnd_chrom, bnd_pos = variant.ALT[0][2:-1].split(':')
bnd_pos = int(variant.ALT[0].split(':')[1].split("]")[0].split("[")[0])
bnd_chrom= variant.ALT[0].split(':')[0].split("]")[-1].split("[")[-1]
if bnd_chrom == variant.CHROM and abs(bnd_pos - variant.start) < min_size:
continue
elif variant.INFO.get('SVTYPE') == 'TRA':
bnd_pos = variant.INFO.get('END')
bnd_chrom =variant.INFO.get('CHR2');
frequency = variant.INFO.get(frequency_tag)
if frequency is not None and frequency > max_frequency:
continue
yield variant
class BlacklistRecord:
__slots__ = ('chrom', 'start', 'end')
def __init__(self, chrom, start, end):
self.chrom = chrom
self.start = start
self.end = end
# read Blacklist
def read_blacklist(path):
with open(path) as f:
for line in f:
if line.startswith('#'):
continue
content=line.split('\t')
chrom=content[0]
start=content[1]
end=content[2]
start = int(start)
end = int(end)
yield BlacklistRecord(chrom, start, end)
def contained_by_blacklist(event, blacklist):
for br in blacklist:
if event.chrom == br.chrom:
if event.start >= br.start and event.end <= br.end:
return True
return False
#retrieve the sample id, assuming single sample vcf
def retrieve_sample_id(vcf, vcf_path):
samples = vcf.samples
if len(samples) == 1:
sample = samples[0]
else:
sample = vcf_path.split("/")[-1].split("_")[0].split(".")[0]
return(sample)
def main():
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
parser = argparse.ArgumentParser("VCF2cytosure - convert SV vcf files to cytosure")
group = parser.add_argument_group('Filtering')
group.add_argument('--size', default=1000, type=int,help='Minimum variant size. Default: %(default)s')
group.add_argument('--frequency', default=0.01, type=float,help='Maximum frequency. Default: %(default)s')
group.add_argument('--frequency_tag', default='FRQ', type=str,help='Frequency tag of the info field. Default: %(default)s')
group.add_argument('--no-filter', dest='do_filtering', action='store_false',default=True,help='Disable any filtering')
group = parser.add_argument_group('Input')
group.add_argument('--genome',required=False, default=37, help='Human genome version. Use 37 for GRCh37/hg19, 38 for GRCh38 template.')
group.add_argument('--sex',required=False, default='female', help='Sample sex male/female. Default: %(default)s')
group.add_argument('--vcf',required=True,help='VCF file')
group.add_argument('--bins',type=int,default=20,help='the number of coverage bins per probes default=20')
group.add_argument('--coverage',help='Coverage file')
group.add_argument('--cn', type=str,
help='add probes using cnvkit cn file(cannot be used together with --coverage)')
group.add_argument('--snv',type=str,help='snv vcf file, use coverage annotation to position the height of the probes(cannot be used together with --coverage)')
group.add_argument('--dp',type=str,default="DP",help='read depth tag of snv vcf file. This option is only used if you use snv to set the heigth of the probes. The dp tag is a tag which is used to retrieve the depth of coverage across the snv (default=DP)')
group.add_argument('--maxbnd',type=int,default=10000,help='Maxixmum BND size, BND events exceeding this size are discarded')
group.add_argument('--out',help='output file (default = the prefix of the input vcf)')
group.add_argument('--blacklist', help='Blacklist bed format file to exclude completely contained variants.')
group.add_argument('-V','--version',action='version',version="%(prog)s "+__version__ ,
help='Print program version and exit.')
# parser.add_argument('xml', help='CytoSure design file')
args= parser.parse_args()
logger.info('vcf2cytosure %s', __version__)
if (args.coverage and args.cn) or (args.coverage and args.snv) or (args.snv and args.cn):
print ("Choose one of --coverage, --snv and --cn. They cannot be combined.")
quit()
if int(args.genome) == 38:
CGH_TEMPLATE = CGH_TEMPLATE_38
CONTIG_LENGTHS = CONTIG_LENGTHS_38
N_INTERVALS = N_INTERVALS_38
else:
CGH_TEMPLATE = CGH_TEMPLATE_37
CONTIG_LENGTHS = CONTIG_LENGTHS_37
N_INTERVALS = N_INTERVALS_37
if not args.out:
args.out=".".join(args.vcf.split(".")[0:len(args.vcf.split("."))-1])+".cgh"
parser = etree.XMLParser(remove_blank_text=True)
sex_male = "false"
promega_sex = 'Female'
if args.sex == "male":
sex_male = 'true'
promega_sex = 'Male'
vcf = VCF(args.vcf)
sample_id=retrieve_sample_id(vcf, args.vcf)
tree = etree.parse(StringIO(CGH_TEMPLATE.format(sample_id,sample_id,sample_id,sample_id,sex_male,promega_sex,sex_male)), parser)
segmentation = tree.xpath('/data/cgh/segmentation')[0]
probes = tree.xpath('/data/cgh/probes')[0]
submission = tree.xpath('/data/cgh/submission')[0]
if args.blacklist:
blacklist = [r for r in read_blacklist(args.blacklist) if r.chrom in CONTIG_LENGTHS]
chr_intervals = defaultdict(list)
if args.do_filtering:
vcf = variant_filter(vcf,min_size=args.size,max_frequency=args.frequency,frequency_tag=args.frequency_tag)
n = 0
for event in events(vcf, CONTIG_LENGTHS):
height = ABERRATION_HEIGHTS[event.type]
end = event.end
make_segment(segmentation, event.chrom, event.start, end, height)
comment = format_comment(event.info)
if "rankScore" in event.info:
rank_score = int(event.info['RankScore'].partition(':')[2])
else:
rank_score =0
#occ=0
#if args.frequency_tag in event.info:
# occ=event.info[args.frequency_tag]
occ=0
if "OCC" in event.info:
occ=event.info["OCC"]
if event.type in ("INV",'INS', 'BND',"TRA") and not event.end:
continue
#pass
elif event.type in ("INV",'INS', 'BND',"TRA") and (abs(event.start-event.end) > args.maxbnd ):
#pass
continue
elif args.blacklist:
if contained_by_blacklist(event, blacklist):
continue
make_aberration(submission, event.chrom, event.start, end, confirmation=event.type,
comment=comment, n_probes=occ, copy_number=rank_score)
chr_intervals[event.chrom].append((event.start, event.end))
# show probes at slightly different height than segments
for pos in spaced_probes(event.start, event.end - 1):
make_probe(probes, event.chrom, pos, pos + 60, height, event.type)
n += 1
if args.coverage or args.snv or args.cn:
add_coverage_probes(probes, args.coverage, args, CONTIG_LENGTHS, N_INTERVALS)
else:
add_probes_between_events(probes, chr_intervals, CONTIG_LENGTHS)
tree.write(args.out, pretty_print=True)
logger.info('Wrote %d variants to CGH', n)
if __name__ == '__main__':
main()
|
from typing import Container, Sequence, List
#==============================================================================
"""
Write a function that takes as input a set and returns its power set.
Alternatively, Given a collection of integers that might contain duplicates,
nums, return all possible subsets (the power set). - [EPI: 15.4].
"""
def power_set_list_v1(L: List[chr]) -> Container[Sequence]:
""" Computes the power set of the given set, L."""
def _power_set_list_v1(chose_from, result, chosen=[]):
"""
Args:
chosen : list of chosen elements.
chose_from: list of elements to chose from.
result : list of power set elements.
"""
if len(chose_from) == 0:
result.append(list(chosen))
else:
first = chose_from.pop(0)
chosen.append(first)
_power_set_list_v1(chose_from, result, chosen)
chosen.pop()
_power_set_list_v1(chose_from, result, chosen)
chose_from.insert(0, first)
result = []
_power_set_list_v1(L, result)
return result
# print all subsets of the characters in s
def power_set_str_v2(s: str) -> Container[Sequence]:
"""
Note: it doesn't take empty set into accout.
"""
# print all subsets of the remaining elements, with given prefix
def _power_set_str_v2(prefix: str, s: str, result) -> None:
if len(s) > 0:
# print(prefix+s[0])
# result.append(prefix+s[0])
result.append(prefix + s[0])
_power_set_str_v2(prefix + s[0], s[1:], result)
_power_set_str_v2(prefix, s[1:], result)
res = []
_power_set_str_v2("", s, res)
return res
def power_set_str_v3(s: str) -> Container[Sequence]:
def _power_set_str_v3(prefix: str, s: str, result) -> None:
# print(prefix)
result.append(prefix)
for i in range(len(s)):
_power_set_str_v3(prefix + s[i], s[i + 1:], result)
res = []
_power_set_str_v3("", s, res)
return res
def main():
s = "RACE"
L: list[str] = list(s)
ps1 = power_set_list_v1(L)
ps2 = power_set_str_v2(s) # it doesn't take empty set into accout.
ps3 = power_set_str_v3(s)
# assert len(ps1) == len(ps2) == pow(2, len(L))
# assert len(ps1) == len(ps1)-1 == pow(2, len(L))-1
print(ps1, ps2, ps3, sep='\n')
if __name__ == '__main__':
main()
|
import time
import datetime
SITE_NAME = "IuliiNet"
DESC = "rete wireless mesh libera e decentralizzata in friuli"
AUTHOR = "lucapost"
SRC = "/home/lucapost/repo/iuliinet/iulii.net/src"
DST = "/home/lucapost/repo/iuliinet/iulii.net/dst"
SITEMAP = "/home/lucapost/repo/iuliinet/iulii.net/dst/sitemap.xml"
URL = "http://iulii.net"
PREFIX = "/"
HOME = "home"
PATH_SEPARATOR = '/'
SRC_EXT = {"markdown": "md", "textile": "textile", "plain": "txt"}
DST_EXT = "html"
HIDDEN = set(["404.textile", "splash.textile"])
menu_code = ''
PAGES = {SRC + "/index.md": ("home page", "wireless mesh network libera e decentralizzata in friuli venezia giulia"),
SRC + "/30_rete/index.md": ("rete", "esempi di configurazione dei nodi della rete"),
SRC + "/30_rete/30_servizi.md": ("rete", "elenco dei servizi disponibili nelle rete"),
SRC + "/70_contatti.md": ("contatti", "contattare via email twitter facebook googleplus irc commenti"),
SRC + "/50_links.md": ("links", "collegamenti a siti amici")}
current_time = datetime.datetime.now()
def get_page_contents(node):
"""Return page title and description from the global variable pages if a
match with current node page.src_file is found.
"""
try:
return (SITE_NAME + ' | ' + PAGES[node.page.src_pathname][0], \
PAGES[node.page.src_pathname][1])
except KeyError:
return ('%%%TITLE%%%', '')
def menu(node):
"""Generate a hierarchical menu."""
global menu_code
menu_code = '\n'
root = node
while root.parent:
root = root.parent
menu_(root, node)
return menu_code
def menu_(node, cur_node, node_prefix = PREFIX, indent = ''):
"""Auxiliary recursive function for menu generation."""
global menu_code
menu_code += indent + '<ul>\n'
for child in sorted(node.children, key=lambda n: n.page.src_pathname):
if child.page.dst_file.startswith("index.") or child.page.src_file in HIDDEN:
continue
menu_code += indent + '<li class="level-' + str(child.page.level) + '"><a '
if(child == cur_node
or (cur_node.page.dst_file.startswith("index.") and child == cur_node.parent)):
menu_code += 'class="current" '
menu_code += 'href="' + node_prefix + child.page.dst_file
if child.children:
menu_code += "/index." + DST_EXT + '">' + child.page.name + '</a>\n'
menu_(child, cur_node, node_prefix + child.page.dst_file + '/', indent + '\t')
menu_code += indent + '</li>\n'
else:
menu_code += '">' + child.page.name + '</a></li>\n'
menu_code += indent + '</ul>\n'
def header(node):
"""Build the header and return it to a string."""
(title, description) = get_page_contents(node)
return '''<!DOCTYPE html>
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7" lang="it"> <![endif]-->
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8" lang="it"> <![endif]-->
<!--[if IE 8]> <html class="no-js lt-ie9" lang="it"> <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="it"> <!--<![endif]-->
<head>
<meta charset="utf-8" />
<meta name="author" content="''' + AUTHOR + '''" />
<meta name="description" content="''' + description + '''" />
<title>''' + title + '''</title>
<meta name="viewport" content="width=device-width">
<link rel="stylesheet" type="text/css" media="all" href="'''+ PREFIX +'''/assets/css/reset.css" />
<link rel="stylesheet" type="text/css" media="all" href="'''+ PREFIX +'''/assetscss/text.css" />
<link rel="stylesheet" type="text/css" media="all" href="'''+ PREFIX +'''/assetscss/960.css" />
<link rel="stylesheet" type="text/css" media="all" href="'''+ PREFIX +'''/assetscss/hashgrid.css" />
<link rel="stylesheet" type="text/css" media="all" href="'''+ PREFIX +'''/assetscss/style.css" />
<link rel="icon" type="image/png" href="'''+ PREFIX +'''img/iuliinetlogo.png">
</head>
<body id="top">
<header class="container_12 clearfix">
<div class="grid_8">
<hgroup>
<h1><a href="''' + PREFIX + '''">''' + SITE_NAME + '''</a></h1>
<h2><a href="''' + PREFIX + '''">''' + DESC + '''</a></h2>
</hgroup>
</div>
<div class="grid_4">
<a href="''' + PREFIX + '''">
<img class="iuliinetlogo" title="iuliinet logo" alt="iuliinet logo" src="'''+ PREFIX +'''img/iuliinetlogo.png">
</a>
</div>
<div class="clear"></div>
</header>
<section class="container_12 clearfix">
<div class="grid_8">
'''
def footer(node):
"""Build the footer and return it to a string."""
return '''
</div>
<div class="grid_4 column">
<div class="navigation">
<div class="path">
<b>path</b>: %%%PATH%%%
</div>
<nav>
''' + menu(node) + '''
</nav>
</div>
</div>
<div class="clear"></div>
</section>
<footer class="container_12 clearfix">
<div class="grid_12">
<a href="#top" title="back to top" class="backtotop">back to top</a>
<div class="foot">
<p>© ''' + str(current_time.year) + ''' <a href="http://iulii.net" title="iulii.net website">iulii.net</a> | <a rel="license" href="http://creativecommons.org/licenses/by-nc/3.0/">license CC by-nc</a> | edit: ''' + time.strftime("%Y%m%d %I:%M:%S %p", node.page.last_edit) + '''</p>
</div>
</div>
<div class="clear"></div>
</footer>
<!--[if lt IE 7]><p class=chromeframe>Your browser is <em>ancient!</em> <a href="http://browsehappy.com/">Upgrade to a different browser</a> or <a href="http://www.google.com/chromeframe/?redirect=true">install Google Chrome Frame</a> to experience this site.</p><![endif]-->
<script src="'''+ PREFIX +'''js/jquery.js"></script>
<script src="'''+ PREFIX +'''js/plugins.js"></script>
<script src="'''+ PREFIX +'''js/main.js"></script>
<script src="'''+ PREFIX +'''js/hashgrid.js"></script>
</body>
</html>'''
|
#stati
strDiStatRunning = "Running ..."
strDiamondsStatusGood = "Good"
strDiamondsStatusLikelihood = "Failed/Couldn't find point with better likelihood"
strDiamondsStatusCovariance = "Failed/Covariance decomposition failed"
strDiamondsStatusAssertion = "Failed/Assertion failed"
strDiamondsStatusTooManyRuns = "Failed/Too many runs"
strDiamondsStatusPriorsChanged = "Priors changed"
#diamonds text
strDiamondsErrBetterLikelihood = "Can't find point with a better Likelihood"
strDiamondsErrCovarianceFailed = "Covariance Matrix decomposition failed"
strDiamondsErrAssertionFailed = "Assertion failed"
|
import csv
def unicode_csv_reader(utf8_data, dialect=csv.excel, **kwargs):
"""
UTF8 CSV Reader from: http://stackoverflow.com/q/904041/2100287
"""
csv_reader = csv.reader(utf8_data, dialect=dialect, **kwargs)
for row in csv_reader:
yield [unicode(cell, 'utf-8') for cell in row]
|
import matplotlib.pyplot as plt
from data_analysis.vldbj_data_parsing.varying_eps_statistics import *
from paper_figures.vldbj.draw_indexing_time_size import TICK_SIZE, LEGEND_SIZE, LABEL_SIZE
import json
from paper_figures.vldbj.draw_varying_c import us_to_ms_factor, large_size_plus
def get_dict(file_path):
with open(file_path) as ifs:
return json.load(ifs)
relative_root_path = '../..'
rmax_index_dict = get_dict('{}/data_analysis/data-json/varying_parameters/varying_rmax_index.json'.format(relative_root_path))
rmax_query_dict = get_dict('{}/data_analysis/data-json/varying_parameters/varying_rmax_query.json'.format(relative_root_path))
def draw_query_index_time():
# rmax_lst = [0.01 * (i + 1) for i in xrange(0, 30, 1)]
rmax_lst = [0.01 * (i + 1) for i in range(4, 10, 1)]
exp_figure, ax_tuple = plt.subplots(1, 2, sharex=True, figsize=(16, 7))
# 1st: draw querying time
algorithm_tag_lst = [bflpmc_tag, flpmc_tag]
legend_lst = ['FBLPMC', 'FLPMC']
ax = ax_tuple[0]
lst_lst = []
for idx, algorithm in enumerate(algorithm_tag_lst):
time_lst = map(lambda rmax: rmax_query_dict[algorithm][format_str(rmax)], rmax_lst)
time_lst = list(map(lambda val: float(val) / us_to_ms_factor, time_lst))
lst_lst.append(time_lst)
shape_lst = ['D-.', 's--', 'o:', 'x-', 'v-', '^-', '<-', '>-']
color_lst = ['blue', 'orange', 'green', 'red', 'm', 'brown', 'pink', 'gray']
ax.plot(rmax_lst, time_lst, shape_lst[idx], color=color_lst[idx], markersize=22 if idx != 0 else 18,
markerfacecolor='none')
# ax.set_yscale('log')
# setup ticks for x and y axis
ax.set_ylim(0, 0.04)
ax.set_yticks([0, 0.01, 0.02, 0.03, 0.04])
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(TICK_SIZE + large_size_plus)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(TICK_SIZE + large_size_plus)
# setup labels and grid, legend
ax.set_ylabel('Avg Query Time (ms)', fontsize=LABEL_SIZE + large_size_plus)
ax.set_xlabel('$r_{max}$', fontsize=LABEL_SIZE + large_size_plus)
ax.grid(True, alpha=0.2)
ax.legend(legend_lst, ncol=2, prop={'size': LEGEND_SIZE, "weight": "bold"}, loc=1)
# 2nd: draw the index
algorithm_tag_lst = [flp_tag]
legend_lst = ['FLP']
ax = ax_tuple[1]
lst_lst = []
for idx, algorithm in enumerate(algorithm_tag_lst):
time_lst = list(map(lambda rmax: rmax_index_dict[algorithm][format_str(rmax)], rmax_lst))
lst_lst.append(time_lst)
shape_lst = ['D-.', 'x-', '^-']
color_lst = ['blue', 'red', 'brown']
ax.plot(rmax_lst, time_lst, shape_lst[idx], color=color_lst[idx], markersize=22 if idx != 0 else 18,
markerfacecolor='none')
# ax.set_yscale('log')
# setup ticks for x and y axis
ax.set_ylim(0.03, 0.05)
ax.set_yticks([0.03, 0.035, 0.040, 0.045, 0.05])
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(TICK_SIZE + large_size_plus)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(TICK_SIZE + large_size_plus)
# setup labels and grid, legend
ax.set_ylabel('Indexing Time (s)', fontsize=LABEL_SIZE + large_size_plus)
ax.set_xlabel('$r_{max}$', fontsize=LABEL_SIZE + large_size_plus)
ax.grid(True, alpha=0.4)
ax.legend(legend_lst, ncol=2, prop={'size': LEGEND_SIZE, "weight": "bold"}, loc=1)
# 3rd: save the figure
exp_figure.subplots_adjust(wspace=0)
plt.tight_layout()
plt.savefig('figures/' + 'varying_rmax' + '.pdf', bbox_inches='tight', dpi=300)
plt.close()
if __name__ == '__main__':
# unit: us
algorithm_lst = [bflpmc_tag, flpmc_tag]
for algorithm in algorithm_lst:
print(algorithm, rmax_query_dict[algorithm])
index_lst = [flp_tag]
for algorithm in index_lst:
print(algorithm, rmax_index_dict[algorithm])
draw_query_index_time()
|
# -*- coding: utf-8 -*-
"""
@author: Ryan Piersma
"""
from nltk.tokenize import RegexpTokenizer
import os, fnmatch
#Some code from:
#https://stackoverflow.com/questions/15547409/how-to-get-rid-of-punctuation-using-nltk-tokenizer
#data structure for holding sequences:
# Dictionary where key = a word, value = a pair consisting of
# a word that follows it and the number of times that it has
# occurred after that word
def createWordSequences(corpus):
seqDict = {}
startRead = False
tokenizer = RegexpTokenizer(r'\w+')
lastWordLastLine = "dummyword"
for line in corpus.readlines():
if line.find("FOOTNOTES") != -1:
break
if startRead:
tokenized = tokenizer.tokenize(line)
words = [w.lower() for w in tokenized]
#Handle the sequence between last word of one line and first word
#of next line
if len(words) > 0:
firstWordCurLine = words[0]
if not lastWordLastLine == "dummyword":
if lastWordLastLine in seqDict:
if not any(firstWordCurLine in d for d in seqDict[lastWordLastLine]):
seqDict[lastWordLastLine].append({firstWordCurLine : 1})
else:
wordIndex = -1
for d in seqDict[lastWordLastLine]:
if firstWordCurLine in d:
wordIndex = seqDict[lastWordLastLine].index(d)
seqDict[lastWordLastLine][wordIndex][firstWordCurLine] += 1
else:
seqDict[lastWordLastLine] = [{firstWordCurLine : 1}]
#Handle sequences that happen on a single line
for i in range(len(words) - 1):
if words[i] in seqDict:
if not any(words[i+1] in d for d in seqDict[words[i]]):
seqDict[words[i]].append({words[i+1] : 1})
else:
wordIndex = -1
for d in seqDict[words[i]]:
if words[i+1] in d:
wordIndex = seqDict[words[i]].index(d)
seqDict[words[i]][wordIndex][words[i+1]] += 1
else:
seqDict[words[i]] = [{words[i+1] : 1}]
#Store the last word on the line
if (len(words) > 0):
lastWordLastLine = words[len(words) - 1]
if line.find("START OF THE PROJECT GUTENBERG EBOOK"):
startRead = True
#print(seqDict)
return seqDict
#Source for this function: https://stackoverflow.com/questions/13299731/python-need-to-loop-through-directories-looking-for-txt-files
def findFiles (path, filter):
for root, dirs, files in os.walk(path):
for file in fnmatch.filter(files, filter):
yield os.path.join(root, file)
def convertToList(listDict, bookID):
listTuples = []
beforeWord = ""
for key,dictList in listDict.items():
beforeWord = key
for dict in dictList:
for afterWord,timesFound in dict.items():
listTuples.append(tuple([beforeWord, bookID, afterWord, timesFound]))
return listTuples
def main():
tupleLists = []
csvWrite = open("sequences.csv","w+")
#put path here
directory = "C:/Users/ryanp/Desktop/Duke/Fall 2018/CS 316- Databases/Final Project/LingusticDB-database/books/books"
for filename in findFiles(directory, "[0-9]*.txt"):
print(filename)
filenamePartition = filename.rsplit("\\") #change for Linux I think
bookId = filenamePartition[-1][:-4]
f = open(filename,"r",encoding = "ISO-8859-1")
createSequences = createWordSequences(f)
addThisToTupleList = convertToList(createSequences, bookId)
for tuple in addThisToTupleList:
csvWrite.write(tuple[0] + "|" + tuple[1] + "|" + tuple[2] + "|" + str(tuple[3]) + "\n")
csvWrite.close()
return tupleLists
main()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = ['LabResourceArgs', 'LabResource']
@pulumi.input_type
class LabResourceArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
artifacts_storage_account: Optional[pulumi.Input[str]] = None,
created_date: Optional[pulumi.Input[str]] = None,
default_storage_account: Optional[pulumi.Input[str]] = None,
default_virtual_network_id: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
lab_storage_type: Optional[pulumi.Input[Union[str, 'LabStorageType']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
storage_accounts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None,
vault_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a LabResource resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] artifacts_storage_account: The artifact storage account of the lab.
:param pulumi.Input[str] created_date: The creation date of the lab.
:param pulumi.Input[str] default_storage_account: The lab's default storage account.
:param pulumi.Input[str] default_virtual_network_id: The default virtual network identifier of the lab.
:param pulumi.Input[str] id: The identifier of the resource.
:param pulumi.Input[Union[str, 'LabStorageType']] lab_storage_type: The type of the lab storage.
:param pulumi.Input[str] location: The location of the resource.
:param pulumi.Input[str] name: The name of the resource.
:param pulumi.Input[str] provisioning_state: The provisioning status of the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] storage_accounts: The storage accounts of the lab.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags of the resource.
:param pulumi.Input[str] type: The type of the resource.
:param pulumi.Input[str] vault_name: The name of the key vault of the lab.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if artifacts_storage_account is not None:
pulumi.set(__self__, "artifacts_storage_account", artifacts_storage_account)
if created_date is not None:
pulumi.set(__self__, "created_date", created_date)
if default_storage_account is not None:
pulumi.set(__self__, "default_storage_account", default_storage_account)
if default_virtual_network_id is not None:
pulumi.set(__self__, "default_virtual_network_id", default_virtual_network_id)
if id is not None:
pulumi.set(__self__, "id", id)
if lab_storage_type is not None:
pulumi.set(__self__, "lab_storage_type", lab_storage_type)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if storage_accounts is not None:
pulumi.set(__self__, "storage_accounts", storage_accounts)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if type is not None:
pulumi.set(__self__, "type", type)
if vault_name is not None:
pulumi.set(__self__, "vault_name", vault_name)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="artifactsStorageAccount")
def artifacts_storage_account(self) -> Optional[pulumi.Input[str]]:
"""
The artifact storage account of the lab.
"""
return pulumi.get(self, "artifacts_storage_account")
@artifacts_storage_account.setter
def artifacts_storage_account(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "artifacts_storage_account", value)
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> Optional[pulumi.Input[str]]:
"""
The creation date of the lab.
"""
return pulumi.get(self, "created_date")
@created_date.setter
def created_date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_date", value)
@property
@pulumi.getter(name="defaultStorageAccount")
def default_storage_account(self) -> Optional[pulumi.Input[str]]:
"""
The lab's default storage account.
"""
return pulumi.get(self, "default_storage_account")
@default_storage_account.setter
def default_storage_account(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_storage_account", value)
@property
@pulumi.getter(name="defaultVirtualNetworkId")
def default_virtual_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The default virtual network identifier of the lab.
"""
return pulumi.get(self, "default_virtual_network_id")
@default_virtual_network_id.setter
def default_virtual_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_virtual_network_id", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The identifier of the resource.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="labStorageType")
def lab_storage_type(self) -> Optional[pulumi.Input[Union[str, 'LabStorageType']]]:
"""
The type of the lab storage.
"""
return pulumi.get(self, "lab_storage_type")
@lab_storage_type.setter
def lab_storage_type(self, value: Optional[pulumi.Input[Union[str, 'LabStorageType']]]):
pulumi.set(self, "lab_storage_type", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
The provisioning status of the resource.
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter(name="storageAccounts")
def storage_accounts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The storage accounts of the lab.
"""
return pulumi.get(self, "storage_accounts")
@storage_accounts.setter
def storage_accounts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "storage_accounts", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="vaultName")
def vault_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the key vault of the lab.
"""
return pulumi.get(self, "vault_name")
@vault_name.setter
def vault_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vault_name", value)
class LabResource(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
artifacts_storage_account: Optional[pulumi.Input[str]] = None,
created_date: Optional[pulumi.Input[str]] = None,
default_storage_account: Optional[pulumi.Input[str]] = None,
default_virtual_network_id: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
lab_storage_type: Optional[pulumi.Input[Union[str, 'LabStorageType']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_accounts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A lab.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] artifacts_storage_account: The artifact storage account of the lab.
:param pulumi.Input[str] created_date: The creation date of the lab.
:param pulumi.Input[str] default_storage_account: The lab's default storage account.
:param pulumi.Input[str] default_virtual_network_id: The default virtual network identifier of the lab.
:param pulumi.Input[str] id: The identifier of the resource.
:param pulumi.Input[Union[str, 'LabStorageType']] lab_storage_type: The type of the lab storage.
:param pulumi.Input[str] location: The location of the resource.
:param pulumi.Input[str] name: The name of the resource.
:param pulumi.Input[str] provisioning_state: The provisioning status of the resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] storage_accounts: The storage accounts of the lab.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags of the resource.
:param pulumi.Input[str] type: The type of the resource.
:param pulumi.Input[str] vault_name: The name of the key vault of the lab.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: LabResourceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A lab.
:param str resource_name: The name of the resource.
:param LabResourceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(LabResourceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
artifacts_storage_account: Optional[pulumi.Input[str]] = None,
created_date: Optional[pulumi.Input[str]] = None,
default_storage_account: Optional[pulumi.Input[str]] = None,
default_virtual_network_id: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
lab_storage_type: Optional[pulumi.Input[Union[str, 'LabStorageType']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_accounts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = LabResourceArgs.__new__(LabResourceArgs)
__props__.__dict__["artifacts_storage_account"] = artifacts_storage_account
__props__.__dict__["created_date"] = created_date
__props__.__dict__["default_storage_account"] = default_storage_account
__props__.__dict__["default_virtual_network_id"] = default_virtual_network_id
__props__.__dict__["id"] = id
__props__.__dict__["lab_storage_type"] = lab_storage_type
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["provisioning_state"] = provisioning_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["storage_accounts"] = storage_accounts
__props__.__dict__["tags"] = tags
__props__.__dict__["type"] = type
__props__.__dict__["vault_name"] = vault_name
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:devtestlab/v20150521preview:LabResource"), pulumi.Alias(type_="azure-native:devtestlab:LabResource"), pulumi.Alias(type_="azure-nextgen:devtestlab:LabResource"), pulumi.Alias(type_="azure-native:devtestlab/v20160515:LabResource"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20160515:LabResource"), pulumi.Alias(type_="azure-native:devtestlab/v20180915:LabResource"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20180915:LabResource")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(LabResource, __self__).__init__(
'azure-native:devtestlab/v20150521preview:LabResource',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'LabResource':
"""
Get an existing LabResource resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = LabResourceArgs.__new__(LabResourceArgs)
__props__.__dict__["artifacts_storage_account"] = None
__props__.__dict__["created_date"] = None
__props__.__dict__["default_storage_account"] = None
__props__.__dict__["default_virtual_network_id"] = None
__props__.__dict__["lab_storage_type"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["storage_accounts"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["vault_name"] = None
return LabResource(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="artifactsStorageAccount")
def artifacts_storage_account(self) -> pulumi.Output[Optional[str]]:
"""
The artifact storage account of the lab.
"""
return pulumi.get(self, "artifacts_storage_account")
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> pulumi.Output[Optional[str]]:
"""
The creation date of the lab.
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter(name="defaultStorageAccount")
def default_storage_account(self) -> pulumi.Output[Optional[str]]:
"""
The lab's default storage account.
"""
return pulumi.get(self, "default_storage_account")
@property
@pulumi.getter(name="defaultVirtualNetworkId")
def default_virtual_network_id(self) -> pulumi.Output[Optional[str]]:
"""
The default virtual network identifier of the lab.
"""
return pulumi.get(self, "default_virtual_network_id")
@property
@pulumi.getter(name="labStorageType")
def lab_storage_type(self) -> pulumi.Output[Optional[str]]:
"""
The type of the lab storage.
"""
return pulumi.get(self, "lab_storage_type")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
The provisioning status of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="storageAccounts")
def storage_accounts(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The storage accounts of the lab.
"""
return pulumi.get(self, "storage_accounts")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[Optional[str]]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="vaultName")
def vault_name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the key vault of the lab.
"""
return pulumi.get(self, "vault_name")
|
# PROJECT JULY-SEPTEMBRE 2019
# SOLVING THE N-BODIES PROBLEM / FUNCTIONS
# By Enguerran VIDAL
# This file contains the multitude of functions used throughout this project, hence its importation in every single .py files
###############################################################
# IMPORTS #
###############################################################
#-----------------MODULES
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import random
import matplotlib.animation
#-----------------PYTHON FILES
from __constants_conversions import*
###############################################################
# FUNCTIONS #
###############################################################
def array_max_abs(array):
''' Works only for column vectors '''
m=0
for i in range(len(array)):
x=abs(array[i])
if x>m:
m=x
return m
def mean_value_array(X):
''' only works with column vectors of shape (n,)'''
(n,)=X.shape
S=0
for i in range(n):
S=S+X[i]
X_bar=S/n
return X_bar
def array_normal(X):
return (X-min(X))/(max(X)-min(X))
#----------------------2D TRANSFORMATIONS
def points_vector_2D(pointI,pointJ):
''' Returns the distance between two points in 3D'''
x=pointJ[0]-pointI[0]
y=pointJ[1]-pointI[1]
return np.array([x,y])
def points_distance_2D(pointI,pointJ):
''' Returns the distance between two points in 3D'''
x=pointJ[0]-pointI[0]
y=pointJ[1]-pointI[1]
return np.sqrt(x**2+y**2)
def object_distance_2D(objectI,objectJ):
''' Returns the distance between two points in 3D'''
v=object_vector_2D(objectI,objectJ)
return np.sqrt(v[0]**2+v[1]**2)
def object_vector_2D(objectI,objectJ):
''' Returns the distance between two objects in 2D'''
return objectJ.position-objectI.position
def vector_module_2D(vector):
''' Returns a vector's module in 2D'''
return np.sqrt(vector[0]**2+vector[1]**2)
#----------------------3D TRANSFORMATIONS
def points_vector_3D(pointI,pointJ):
''' Returns the vector between two points in 3D'''
x=pointJ[0]-pointI[0]
y=pointJ[1]-pointI[1]
z=pointJ[2]-pointI[2]
return np.array([x,y,z])
def points_distance_3D(pointI,pointJ):
''' Returns the distance between two points in 3D'''
x=pointJ[0]-pointI[0]
y=pointJ[1]-pointI[1]
z=pointJ[2]-pointI[2]
return np.sqrt(x**2+y**2+z**2)
def object_distance_3D(objectI,objectJ):
''' Returns the distance between two objects in 3D'''
v=object_vector_3D(objectI,objectJ)
return np.sqrt(v[0]**2+v[1]**2+v[2]**2)
def object_vector_3D(objectI,objectJ):
''' Returns the vector between two objects in 3D'''
return objectJ.position-objectI.position
def vector_module_3D(vector):
''' Returns a vector's module in 3D'''
return np.sqrt(vector[0]**2+vector[1]**2+vector[2]**2)
#------------------FILE READING
def translate_file(file_name):
''' Helps translate the formated parameters file in order to solve the NBody Problem ( is used in the main.py code )'''
file=open(file_name,'r')
lines=file.readlines()
n=len(lines)
parameters_labels=[None,None,None,None,None,None,None,None]
parameters_values=[None,None,None,None,None,None,None,None]
for i in range(n):
lines[i]=lines[i].lower()
content=lines[i].split()
if len(content)>0:
if content[0]=='dimension':
parameters_labels[0]=content[0]
parameters_values[0]=content[2]
if content[0]=='algorithm' and content[1]=='method':
parameters_labels[1]=content[0]+' '+content[1]
parameters_values[1]=content[3]
if content[0]=='distribution' and content[1]=='type':
parameters_labels[2]=content[0]+' '+content[1]
parameters_values[2]=content[3]
if content[0]=='number' and content[1]=='of' and content[2]=='bodies':
parameters_labels[3]=content[0]+' '+content[1]+' '+content[2]
parameters_values[3]=int(content[4])
if content[0]=='theta':
parameters_labels[4]=content[0]
parameters_values[4]=float(content[2])
if content[0]=='frame' and content[1]=='length':
parameters_labels[5]=content[0]+' '+content[1]
parameters_values[5]=time_conversion(float(content[3]),content[4])
if content[0]=='plot' and content[1]=='unit':
parameters_labels[6]=content[0]+' '+content[1]
parameters_values[6]=content[3]
if content[0]=='distribution' and content[1]=='seed':
parameters_labels[7]=content[0]+' '+content[1]
parameters_values[7]=int(content[3])
return parameters_labels,parameters_values
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import numpy as np
from paddlenlp.datasets import MapDataset
def create_dataloader(dataset,
mode='train',
batch_size=1,
batchify_fn=None,
trans_fn=None):
if trans_fn:
dataset = dataset.map(trans_fn)
shuffle = True if mode == 'train' else False
if mode == 'train':
batch_sampler = paddle.io.DistributedBatchSampler(dataset,
batch_size=batch_size,
shuffle=shuffle)
else:
batch_sampler = paddle.io.BatchSampler(dataset,
batch_size=batch_size,
shuffle=shuffle)
return paddle.io.DataLoader(dataset=dataset,
batch_sampler=batch_sampler,
collate_fn=batchify_fn,
return_list=True)
def read_text_pair(data_path, is_test=False):
"""Reads data."""
with open(data_path, 'r', encoding='utf-8') as f:
for line in f:
data = line.rstrip().split("\t")
if is_test == False:
if len(data) != 3:
continue
yield {'query1': data[0], 'query2': data[1], 'label': data[2]}
else:
if len(data) != 2:
continue
yield {'query1': data[0], 'query2': data[1]}
def convert_example(example, tokenizer, max_seq_length=512, is_test=False):
query, title = example["query1"], example["query2"]
encoded_inputs = tokenizer(text=query,
text_pair=title,
max_seq_len=max_seq_length)
input_ids = encoded_inputs["input_ids"]
token_type_ids = encoded_inputs["token_type_ids"]
if not is_test:
label = np.array([example["label"]], dtype="int64")
return input_ids, token_type_ids, label
else:
return input_ids, token_type_ids
|
"""
Advent of Code : Day 07
"""
import re
from os import path
def parse_input(filename):
""" Parse input file values """
script_dir = path.dirname(__file__)
file_path = path.join(script_dir, filename)
with open(file_path, "r") as file:
values = file.read().splitlines()
return values
# PART 1
def part1(rules):
""" Count bags that can fit your bag """
graph = dict()
parent = re.compile(r"^(\w+\s\w+)")
child = re.compile(r"(?:contain|,)\s\d+\s(\w+\s\w+)")
for rule in rules:
parent_bag = parent.match(rule).group(1)
child_bags = child.findall(rule)
graph[parent_bag] = child_bags
# this is what graph will look like
# {
# "light red": ["bright white bag", "muted yellow"],
# "bright white": ["shiny gold"] so on...
# }
my_bag = "shiny gold"
found_in = set()
def fit_my_bag(curr_bag):
for parent, child in graph.items():
if len(child) != 0:
if curr_bag in child:
found_in.add(parent)
fit_my_bag(parent)
fit_my_bag(my_bag)
count = len(found_in)
return count
# PART 2 : THIS ONE WAS QUITE A HEAD-SCRATCHER
def part2(rules):
""" Number of bags to be carried within """
graph = dict()
parent = re.compile(r"^(\w+\s\w+)")
child = re.compile(r"(?:contain|,)\s(\d)+\s(\w+\s\w+)")
for rule in rules:
parent_bag = parent.match(rule).group(1)
child_bags = child.findall(rule)
graph[parent_bag] = child_bags
# this is what graph will look like
# {
# "light red":
# [ ('1', "bright white bag"), ('2', "muted yellow") ],
# "bright white":
# [ ('1' "shiny gold") ],
# so on...
# }
my_bag = "shiny gold"
# BEWARE - RECURSION & GRAPH BELOW
def count_bags_within(curr_bag):
count = 0
for parent_bag, children_bags in graph.items():
if curr_bag in parent_bag:
if len(children_bags) == 0:
return 1
else:
child_sum = 0
for bag in children_bags:
return_val = count_bags_within(bag[1])
if return_val != 1:
count += int(bag[0]) # add the parent bag to the count
inside = int(bag[0]) * return_val
child_sum += inside
count += child_sum
return count
sum_of_bags = count_bags_within(my_bag)
return sum_of_bags
|
# Exercício 073:
'''Crie uma tupla preenchida com os 20 primeiros colocados da Tabela do Campeonato Brasileiro de Futebol,
na ordem de colocação. Depois mostre:
a) Os 5 primeiros times.
b) Os últimos 4 colocados.
c) Times em ordem alfabética.
d) Em que posição está o time da Chapecoense.'''
brasileirao = ('Atlético Mineiro - MG', 'Flamengo - RJ', 'Palmeiras - SP', 'Fortaleza - CE', 'Corinthians - SP',
'Red Bull Bragantino - SP', 'Fluminense - RJ', 'América - MG', 'Atlético - GO', 'Santos - SP',
'Ceará - CE', 'Internacional - RS', 'São Paulo - SP', 'Athletico Paranaense - PR', 'Cuiabá - MT',
'Juventude - RS', 'Grêmio - RS', 'Bahia - BA', 'Sport - PE', 'Chapecoense - SC')
print(f'Os times do Brasileirão 2021 são: {brasileirao}')
print(f'Os 5 primeiros times são: {brasileirao[0:5]}')
print(f'Os 4 últimos times são: {brasileirao[-4:]}')
print(f'Os times em ordem alfabética são: {sorted(brasileirao)}')
print(f'O time da Chapecoense - SC está na {brasileirao.index("Chapecoense - SC") + 1}ª posição')
|
# coding: UTF-8
# !/usr/bin/python
import urllib
import urllib2
import json
import os
import time
import random
import cookielib
localpath="E:\\m\\"
url="https://douban.fm/j/v2/playlist"
textmod ={'sid':'1480150','client':'s:mainsite|y:3.0','channel':'0','app_name':'radio_website','version':'100','type':'s'}
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36','Cookie':'flag="ok"; bid=ETgXWca8WcA; dbcl2="2052674:XnoHHJknDCY"; ck=_M0j; _ga=GA1.2.1710167474.1590557785; _gid=GA1.2.1479044468.1590557785'}
textmod['sid']="1548236"
def downloadfm(url,headers,textmod):
param = urllib.urlencode(textmod)
req = urllib2.Request(url = '%s%s%s' % (url,'?',param), headers=headers)
res = urllib2.urlopen(req)
res = res.read()
alm = json.loads(res)
songs= alm["song"]
for song in songs:
url=song["url"]
filename=song["title"]+"."+song["file_ext"]
sid=song["sid"]
textmod['sid']=sid
try:
print sid+"-"+unicode(filename)
except:
print "ignore..."
filePath=localpath+filename
if not os.path.exists(filePath):
f = urllib2.urlopen(url)
data = f.read()
try:
with open(filePath, "wb") as code:
code.write(data)
except:
print "write file exception ignore it!"
finally:
f.close()
return True
while True:
downloadfm(url,headers,textmod)
|
#
# Simulates writing a series of images to a directory forming a tilt
# series. Used to test passive acquisition by watching a directory.
# It uses a tilt series downloaded from data.kitware.com
from . import TIFFWriter, DM3Writer
import argparse
_writer_map = {
'tiff': TIFFWriter,
'dm3': DM3Writer
}
def main():
parser = argparse.ArgumentParser(
description='Simulates a tilt series be written to a directory..')
parser.add_argument('-p', '--path',
help='path to write files', required=True)
parser.add_argument('-d', '--delay', type=int,
help='the delay between writing images', default=1)
parser.add_argument('-t', '--type', help='the type of images to use',
choices=['tiff', 'dm3'], default='tiff')
args = parser.parse_args()
writer = _writer_map[args.type](args.path, args.delay)
writer.start()
writer.join()
if __name__ == '__main__':
main()
|
import toolbox.engine as engine
import toolbox.connection as conn
import toolbox.trataArquivo as trataArquivo
import pandas as pd
import numpy as np
from datetime import datetime
import os, shutil
def selectOMs():
log = open("logs/logs.txt", "a+")
try:
conection = conn.getConnection('Domain','user','password','database')
print('Connected!')
wb1 = pd.ExcelFile('./tratativaCEP.xlsx')
df_pedidos = pd.read_excel(wb1)
pedidos = np.asarray(df_pedidos['pedido'])
ped = []
for pedido in pedidos:
ped.append(pedido)
ped =str(ped).replace("[","").replace("]","")
query = 'SELECT Sequence as pedido,ClientDocument as cpf,Street as rua,Number as numero,Neighborhood as bairro,City as cidade,UF,PostalCode as cep,SellerName,dominio FROM oms.oms2 where CREATIONDATE > "2019-09-25 00:00:00" and Sequence IN({});'.format(ped)
select = engine.queryReturn(conection,query)
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
log.write('\n ----------------------------------------------------------------------------------- \n')
log.write('Extração do banco realizada com sucesso! {}'.format(current_time))
print('Extração do banco realizada com sucesso! {}'.format(current_time))
log.write('\n ----------------------------------------------------------------------------------- \n')
df = pd.DataFrame(select)
df['cpf_corrigido'] = trataArquivo.trataNumber(df['cpf'])
df['uf_corrigida'] = trataArquivo.trataString(df['UF'])
df['rua_corrigida'] = trataArquivo.repRua(df['rua'])
df['numero_corrigido'] = trataArquivo.trataNumber(df['numero'])
df['bairro_corrigido'] = trataArquivo.trataString(df['bairro'])
df['cep_corrigido'] = trataArquivo.trataNumber(df['cep'])
df['cidade_corrigida'] = trataArquivo.trataString(df['cidade'])
df['Escritório de vendas'] = ''
df['bairro_api'] = '-'
df['cep_api'] = '-'
df['cep_errado'] = 'N'
df['uf_errada'] ='N'
df['tratativa_manual'] = 'N'
print(df.head())
log.write('\n ----------------------------------------------------------------------------------- \n')
log.write('DESCRIÇÃO DO ARQUIVO EXTRAÍDO: {}'.format(df.head()))
log.write('DESCRIÇÃO DO ARQUIVO EXTRAÍDO: {}'.format(df.describe()))
log.write('\n ----------------------------------------------------------------------------------- \n')
df = df[['pedido','cpf','rua','numero','bairro','cidade','UF','cep','SellerName','dominio','Escritório de vendas','cpf_corrigido','rua_corrigida','numero_corrigido','bairro_corrigido','cidade_corrigida','uf_corrigida','cep_corrigido','cep_api','bairro_api','cep_errado','uf_errada','tratativa_manual']]
nomeArquivo = 'baseCep.xlsx'
df.to_excel(nomeArquivo, index=False)
log.write('\n ----------------------------------------------------------------------------------- \n')
print('Arquivo inicial gerado com sucesso!')
log.write('Arquivo inicial gerado com sucesso!\n')
log.write('\n ----------------------------------------------------------------------------------- \n')
except ValueError:
print('Erro na conexão com o banco de dados!!')
log.write('\n ----------------------------------------------------------------------------------- \n')
log.write('Erro na conexão com o banco de dados!!\n')
log.write('\n ----------------------------------------------------------------------------------- \n')
return
# selectOMs()
|
from tulius.forum.comments import models as comments
from tulius.forum.threads import models as threads
def test_comment_model(user):
obj = comments.Comment(title='foo', body='bar')
assert str(obj) == 'foo'
obj.title = ''
assert str(obj) == 'bar'
def test_thread_model(user):
obj = threads.Thread(user=user.user, title='foo', body='bar')
assert str(obj) == 'foo'
obj.title = ''
assert str(obj) == 'bar'
|
import pandas as pd
pd.set_option('display.max_columns', 18)
data = pd.read_csv('../datasets/athlete_events.csv')
isnull = data.isnull()
print(isnull)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.