content stringlengths 5 1.05M |
|---|
import matplotlib.pyplot as pl
import numpy as np
def myplot(flag, *args):
size1 = 10
size2 = 4
if flag == 1:
# pl.figure(figsize=(size1, size2))
if args:
polynomial = args[0]
x = np.arange(11, 1, -0.1)
y = pow(x, 3) * polynomial[3] + pow(x, 2) * polynomial[2] + x * polynomial[1] + polynomial[0]
pl.plot(x, y)
pl.gca().invert_xaxis()
elif flag == 0:
pl.figure(figsize=(size1, size2))
elif flag == 10:
pl.show()
elif flag == 2:
# pl.figure(figsize=(size1, size2))
if args:
data = args[0]
x = np.arange(len(data), 0, -1)
pl.plot(x, data)
pl.gca().invert_xaxis()
return
|
#!/usr/bin/env python3
#
# command from kubesicher (https://github.com/pflaeging/kubesicher)
#
# Usage:
# ./get-k8sobjects.py -h
# usage: get-k8sobjects.py [-h] [-v] [--oc] [-n NAMESPACE] [-d] [-b BASEDIR]
# [-t OBJECTTYPES] [-l LABEL]
#
# Get kubernetes objects as yaml
#
# optional arguments:
# -h, --help show this help message and exit
# -v, --verbose verbose output
# --oc use oc command instead of kubectl
# -n NAMESPACE, --namespace NAMESPACE
# export named namespace instead of current
# -d, --makedir create directory with namespace / project name for the
# exported files
# -b BASEDIR, --basedir BASEDIR
# export root directory. Default is current directory
# -t OBJECTTYPES, --objecttypes OBJECTTYPES
# objecttypes to export. Use comma as separator and no
# spaces
# -l LABEL, --label LABEL
# only export objects with this label
#
# Beta version: don't trust me ;-)
#
#
# Peter Pfläging <peter@pflaeging.net>
# License: Apache 2.0
#
import os, sys, yaml, json, re, argparse, subprocess, pprint
# defaults
default_objecttypes = ["deployments", "services", "route", "pvc", "configmap", "secret", "roles", "crd", "sa"]
command = "/usr/local/bin/kubectl"
verbose = False
destination = ""
labeler = []
def v_out(string):
if verbose: print(string)
def execute(line):
return subprocess.run(
line,
capture_output=True
).stdout.decode("utf-8")
def exportable(object):
jsonset = json.loads(object)
# delete certain elements
del jsonset["metadata"]["selfLink"]
# Make filename
filename = "%s%s_%s.yaml" % (destination, jsonset["kind"], jsonset["metadata"]["name"])
exportfile = open(filename, 'w')
# serialize to yaml
yamlout = yaml.dump(jsonset)
exportfile.writelines(yamlout)
exportfile.close()
def makenamespacedir(dirName):
if not os.path.exists(dirName):
os.makedirs(dirName)
v_out("Namespace Directory %s created" % dirName)
else:
v_out("Namespace Directory %s already exists" % dirName)
def serialize_namespace(namespace, objecttypes):
v_out("Namespace: %s" % namespace)
for otype in objecttypes:
v_out("\t Getting %s:" % otype)
# secrets are not exportable!
deploymentobjects = execute([command, "get", otype, "-o", "name", "-n", namespace] + labeler).split("\n")
for i in deploymentobjects:
if not i: continue
v_out("\t \t %s" % i)
if otype != "secret":
getter = execute([command, "get", i, "-o", "json", "-n", namespace, "--export"])
else:
getter = execute([command, "get", i, "-o", "json", "-n", namespace])
exportable(getter)
def main():
# setting global variables
global default_objecttypes
global command
global verbose
global destination
global labeler
# argument parsing
parser = argparse.ArgumentParser(
description="Get kubernetes objects as yaml",
epilog="Beta version: don't trust me ;-)"
)
parser.add_argument("-v", "--verbose", help="verbose output", action="store_true")
parser.add_argument("--oc", help="use oc command instead of kubectl", action="store_true")
parser.add_argument("-n", "--namespace", help="export named namespace instead of current", default="")
parser.add_argument("-d", "--makedir", help="create directory with namespace / project name for the exported files", action="store_true")
parser.add_argument("-b", "--basedir", help="export root directory. Default is current directory", default=".")
parser.add_argument("-t", "--objecttypes", help="objecttypes to export. Use comma as separator and no spaces", default="")
parser.add_argument("-l", "--label", help="only export objects with this label", default="")
args = parser.parse_args()
verbose = args.verbose
if args.oc: command = "/usr/local/bin/oc"
destination = "%s/" % args.basedir
if args.namespace == "":
namespace = execute([command, "config", "view", "--minify", "-o=jsonpath={..namespace}"])
else:
namespace = args.namespace
if args.makedir:
destination = "%s%s/" % (destination, namespace)
makenamespacedir(destination)
if args.objecttypes:
objects = args.objecttypes.split(",")
else:
objects = default_objecttypes
if args.label:
labeler = ["-l", args.label]
else:
labeler = []
serialize_namespace(namespace,objects)
if __name__ == '__main__':
main() |
from rest_framework.viewsets import ReadOnlyModelViewSet
from .serializers import BerrySerializer
from .models import Berry
class BerryViewSet(ReadOnlyModelViewSet):
"""Berry API view/presentation (READONLY)."""
serializer_class = BerrySerializer
queryset = Berry.berries.all() |
def torsion(a, b, c, d):
# sqrt=math.sqrt, acos=math.acos
dr=180./pi
v12x, v12y, v12z = a[0] - b[0], a[1]-b[1], a[2]-b[2]
v32x, v32y, v32z = c[0] - b[0], c[1]-b[1], c[2]-b[2]
v43x, v43y, v43z = d[0] - c[0], d[1]-c[1], d[2]-c[2]
vn13x = v12y*v32z - v12z*v32y
vn13y = v12z*v32x - v12x*v32z
vn13z = v12x*v32y - v12y*v32x
vn24x = v32z*v43y - v32y*v43z
vn24y = v32x*v43z - v32z*v43x
vn24z = v32y*v43x - v32x*v43y
v12 = vn13x*vn24x + vn13y*vn24y + vn13z*vn24z
v11 = vn13x**2 + vn13y**2 + vn13z**2
v22 = vn24x**2 + vn24y**2 + vn24z**2
# print v11*v22,v12,v11,v22
v1122=v11*v22
if v1122<1e-16:
if v12>0:
return 0.0
else:
return -180.0
else:
ang = v12/sqrt(v11*v22)
if ang >= 1.0:
return 0.0
elif ang <= -1.0:
return -180.0
else:
ang = acos(ang) * dr
vtmp = vn13x * (vn24y*v32z - vn24z*v32y) + \
vn13y * (vn24z*v32x - vn24x*v32z) + \
vn13z * (vn24x*v32y - vn24y*v32x) < 0.0
if vtmp:
return -ang
else:
return ang
|
"""
Query and deal common tables.
"""
from evennia.utils import logger
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ValidationError
from muddery.server.utils.exception import MudderyError, ERR
class ElementPropertiesMapper(object):
"""
Object's properties.
"""
def __init__(self):
self.model_name = "element_properties"
self.model = apps.get_model(settings.WORLD_DATA_APP, self.model_name)
self.objects = self.model.objects
def get_properties(self, element_type, element_key, level):
"""
Get object's properties.
Args:
element_type: (string) element's type.
element_key: (string) element's key.
level: (number) object's level.
"""
return self.objects.filter(element=element_type, key=element_key, level=level)
def get_properties_all_levels(self, element_type, element_key):
"""
Get object's properties.
Args:
element_type: (string) the element's type.
element_key: (string) the element's key.
"""
return self.objects.filter(element=element_type, key=element_key).order_by("level")
def add_properties(self, element_type, element_key, level, values):
"""
Add object's properties.
Args:
element_type: (string) the element's type
element_key: (string) the element's key.
level: (number) object's level.
values: (dict) values to save.
"""
# import values
for prop, value in values.items():
records = self.objects.filter(element=element_type, key=element_key, level=level, property=prop)
if records:
# Update.
records.update(value=value)
else:
# Create.
record = {
"element": element_type,
"key": element_key,
"level": level,
"property": prop,
"value": value
}
data = self.model(**record)
data.save()
def delete_properties(self, element_type, element_key, level):
"""
Delete object's properties.
Args:
element_type: (string) the element's type
element_key: (string) the element's key.
level: (number) object's level.
"""
return self.objects.filter(element=element_type, key=element_key, level=level).delete()
ELEMENT_PROPERTIES = ElementPropertiesMapper()
|
#! /usr/bin/env python
import khmer
import numpy as np
from khmer.khmer_args import optimal_size
import os
import sys
# The following is for ease of development (so I don't need to keep re-installing the tool)
try:
from CMash import MinHash as MH
except ImportError:
try:
import MinHash as MH
except ImportError:
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from CMash import MinHash as MH
import pandas as pd
from multiprocessing import Pool # Much faster without dummy (threading)
import multiprocessing
import threading
from itertools import *
import argparse
import screed
# Helper function that uses equations (2.1) and (2.7) that tells you where you need
# to set the threshold to ensure (with confidence 1-t) that you got all organisms
# with coverage >= c
def threshold_calc(k, c, p, confidence):
delta = c*(1-np.sqrt(-2*np.log(1 - confidence)*(c+p) / float(c**2 * k)))
if delta < 0:
delta = 0
return delta
# This will calculate the similarity indicies between one sketch and the sample NodeGraph
def compute_indicies(sketch, num_sample_kmers, true_fprate):
#global sample_kmers
num_hashes = len(sketch._kmers)
num_training_kmers = sketch._true_num_kmers
count = 0
adjust = 0
for kmer in sketch._kmers:
if kmer != '':
count += sample_kmers.get(kmer)
else:
adjust += 1 # Hash wasn't full, adjust accordingly
count -= int(np.round(true_fprate*num_hashes)) # adjust by the FP rate of the bloom filter
intersection_cardinality = count
containment_index = count / float(num_hashes - adjust)
jaccard_index = num_training_kmers * containment_index / float(num_training_kmers + num_sample_kmers - num_training_kmers * containment_index)
#print("Train %d sample %d" % (num_training_kmers, num_sample_kmers))
# It can happen that the query file has less k-mers in it than the training file, so just round to nearest reasonable value
if containment_index > 1:
containment_index = 1
elif containment_index < 0:
containment_index = 0
if jaccard_index > 1:
jaccard_index = 1
elif jaccard_index < 0:
jaccard_index = 0
return intersection_cardinality, containment_index, jaccard_index
def unwrap_compute_indicies(arg):
return compute_indicies(*arg)
def restricted_float(x):
x = float(x)
if x < 0.0 or x >= 1.0:
raise argparse.ArgumentTypeError("%r not in range [0.0, 1.0)" % (x,))
return x
# Can read in with: test=pd.read_csv(os.path.abspath('../data/results.csv'),index_col=0)
def main():
parser = argparse.ArgumentParser(description="This script creates a CSV file of similarity indicies between the"
" input file and each of the sketches in the training/reference file.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-t', '--threads', type=int, help="Number of threads to use", default=multiprocessing.cpu_count())
parser.add_argument('-f', '--force', action="store_true", help="Force creation of new NodeGraph.")
parser.add_argument('-fp', '--fp_rate', type=restricted_float, help="False positive rate.", default=0.0001)
parser.add_argument('-ct', '--containment_threshold', type=restricted_float,
help="Only return results with containment index above this value", default=0.02)
parser.add_argument('-c', '--confidence', type=restricted_float,
help="Desired probability that all results were returned with containment index above threshold [-ct]",
default=0.95)
parser.add_argument('-ng', '--node_graph', help="NodeGraph/bloom filter location. Used if it exists; if not, one "
"will be created and put in the same directory as the specified "
"output CSV file.", default=None)
parser.add_argument('-b', '--base_name', action="store_true",
help="Flag to indicate that only the base names (not the full path) should be saved in the output CSV file")
parser.add_argument('-i', '--intersect_nodegraph', action="store_true",
help="Option to only insert query k-mers in bloom filter if they appear anywhere in the training"
" database. Note that the Jaccard estimates will now be "
"J(query intersect union_i training_i, training_i) instead of J(query, training_i), "
"but will use significantly less space.")
parser.add_argument('in_file', help="Input file: FASTQ/A file (can be gzipped).")
parser.add_argument('training_data', help="Training/reference data (HDF5 file created by MakeTrainingDatabase.py)")
parser.add_argument('out_csv', help='Output CSV file')
# Parse and check args
args = parser.parse_args()
base_name = args.base_name
training_data = os.path.abspath(args.training_data)
if not os.path.exists(training_data):
raise Exception("Training/reference file %s does not exist." % training_data)
# Let's get the k-mer sizes in the training database
ksizes = set()
# Import all the training data
sketches = MH.import_multiple_from_single_hdf5(training_data)
# Check for issues with the sketches (can also check if all the kmers make sense (i.e. no '' or non-ACTG characters))
if sketches[0]._kmers is None:
raise Exception("For some reason, the k-mers were not saved when the database was created. Try running MakeDNADatabase.py again.")
num_hashes = len(sketches[0]._kmers)
for i in range(len(sketches)):
sketch = sketches[i]
if sketch._kmers is None:
raise Exception(
"For some reason, the k-mers were not saved when the database was created. Try running MakeDNADatabase.py again.")
if len(sketch._kmers) != num_hashes:
raise Exception("Unequal number of hashes for sketch of %s" % sketch.input_file_name)
ksizes.add(sketch.ksize)
if len(ksizes) > 1:
raise Exception("Training/reference data uses different k-mer sizes. Culprit was %s." % (sketch.input_file_name))
# Get the appropriate k-mer size
ksize = ksizes.pop()
# Get number of threads to use
num_threads = args.threads
# Check and parse the query file
query_file = os.path.abspath(args.in_file)
if not os.path.exists(query_file):
raise Exception("Query file %s does not exist." % query_file)
# Node graph is stored in the output folder with name <InputFASTQ/A>.NodeGraph.K<k_size>
if args.node_graph is None: # If no node graph is specified, create one
node_graph_out = os.path.join(os.path.dirname(os.path.abspath(args.out_csv)),
os.path.basename(query_file) + ".NodeGraph.K" + str(ksize))
if not os.path.exists(node_graph_out): # Don't complain if the default location works
print("Node graph not provided (via -ng). Creating one at: %s" % node_graph_out)
elif os.path.exists(args.node_graph): # If one is specified and it exists, use it
node_graph_out = args.node_graph
else: # Otherwise, the specified one doesn't exist
raise Exception("Provided NodeGraph %s does not exist." % args.node_graph)
# import and check the intersect nodegraph
if args.intersect_nodegraph is True:
intersect_nodegraph_file = os.path.splitext(training_data)[0] + ".intersect.Nodegraph"
else:
intersect_nodegraph_file = None
intersect_nodegraph = None
if intersect_nodegraph_file is not None:
if not os.path.exists(intersect_nodegraph_file):
raise Exception("Intersection nodegraph does not exist. Please re-run MakeDNADatabase.py with the -i flag.")
try:
intersect_nodegraph = khmer.load_nodegraph(intersect_nodegraph_file)
if intersect_nodegraph.ksize() != ksize:
raise Exception("Given intersect nodegraph %s has K-mer size %d while the database K-mer size is %d"
% (intersect_nodegraph_file, intersect_nodegraph.ksize(), ksize))
except:
raise Exception("Could not load given intersect nodegraph %s" % intersect_nodegraph_file)
results_file = os.path.abspath(args.out_csv)
force = args.force
fprate = args.fp_rate
coverage_threshold = args.containment_threshold # desired coverage cutoff
confidence = args.confidence # desired confidence that you got all the organisms with coverage >= desired coverage
# Get names of training files for use as rows in returned tabular data
training_file_names = []
for i in range(len(sketches)):
training_file_names.append(sketches[i].input_file_name)
# Only form the Nodegraph if we need to
global sample_kmers
if not os.path.exists(node_graph_out) or force is True:
hll = khmer.HLLCounter(0.01, ksize)
hll.consume_seqfile(query_file)
full_kmer_count_estimate = hll.estimate_cardinality()
res = optimal_size(full_kmer_count_estimate, fp_rate=fprate)
if intersect_nodegraph is None: # If no intersect list was given, just populate the bloom filter
sample_kmers = khmer.Nodegraph(ksize, res.htable_size, res.num_htables)
#sample_kmers.consume_seqfile(query_file)
rparser = khmer.ReadParser(query_file)
threads = []
for _ in range(num_threads):
cur_thrd = threading.Thread(target=sample_kmers.consume_seqfile_with_reads_parser, args=(rparser,))
threads.append(cur_thrd)
cur_thrd.start()
for thread in threads:
thread.join()
else: # Otherwise, only put a k-mer in the bloom filter if it's in the intersect list
# (WARNING: this will cause the Jaccard index to be calculated in terms of J(query\intersect hash_list, training)
# instead of J(query, training)
# (TODO: fix this after khmer is updated)
#intersect_nodegraph_kmer_count = intersect_nodegraph.n_unique_kmers() # Doesnt work due to khmer bug
intersect_nodegraph_kmer_count = intersect_nodegraph.n_occupied() # Not technically correct, but I need to wait until khmer is updated
if intersect_nodegraph_kmer_count < full_kmer_count_estimate: # At max, we have as many k-mers as in the union of the training database (But makes this always return 0)
res = optimal_size(intersect_nodegraph_kmer_count, fp_rate=fprate)
sample_kmers = khmer.Nodegraph(ksize, res.htable_size, res.num_htables)
else:
sample_kmers = khmer.Nodegraph(ksize, res.htable_size, res.num_htables)
for record in screed.open(query_file):
seq = record.sequence
for i in range(len(seq) - ksize + 1):
kmer = seq[i:i + ksize]
if intersect_nodegraph.get(kmer) > 0:
sample_kmers.add(kmer)
# Save the sample_kmers
sample_kmers.save(node_graph_out)
true_fprate = khmer.calc_expected_collisions(sample_kmers, max_false_pos=0.99)
else:
sample_kmers = khmer.load_nodegraph(node_graph_out)
node_ksize = sample_kmers.ksize()
if node_ksize != ksize:
raise Exception("Node graph %s has wrong k-mer size of %d (input was %d). Try --force or change -k." % (
node_graph_out, node_ksize, ksize))
true_fprate = khmer.calc_expected_collisions(sample_kmers, max_false_pos=0.99)
#num_sample_kmers = sample_kmers.n_unique_kmers() # For some reason this only works when creating a new node graph, use the following instead
num_sample_kmers = sample_kmers.n_occupied()
# Compute all the indicies for all the training data
pool = Pool(processes=num_threads)
res = pool.map(unwrap_compute_indicies, zip(sketches, repeat(num_sample_kmers), repeat(true_fprate)))
# Gather up the results in a nice form
intersection_cardinalities = np.zeros(len(sketches))
containment_indexes = np.zeros(len(sketches))
jaccard_indexes = np.zeros(len(sketches))
for i in range(len(res)):
(intersection_cardinality, containment_index, jaccard_index) = res[i]
intersection_cardinalities[i] = intersection_cardinality
containment_indexes[i] = containment_index
jaccard_indexes[i] = jaccard_index
d = {'intersection': intersection_cardinalities, 'containment index': containment_indexes, 'jaccard index': jaccard_indexes}
# Use only the basenames to label the rows (if requested)
if base_name is True:
df = pd.DataFrame(d, map(os.path.basename, training_file_names))
else:
df = pd.DataFrame(d, training_file_names)
# Only get the rows above a certain threshold
if coverage_threshold <= 0:
est_threshold = 0
else:
est_threshold = threshold_calc(num_hashes, coverage_threshold, fprate, confidence)
filtered_results = df[df['containment index'] > est_threshold].sort_values('containment index', ascending=False)
# Export the results
filtered_results.to_csv(results_file, index=True, encoding='utf-8')
if __name__ == "__main__":
main()
|
import pandas as pd
import numpy as np
import scipy.integrate as integrate
import thesis_functions.utilities
from thesis_functions.initial_conditions import initial_conditions
from thesis_functions.initialconditions import InputDataDictionary, SetInitialConditions
from thesis_functions.visualization import CreatePlotGrid, SetPlotGridData
from thesis_functions.astro import FindOrbitCenter, ComputeLibrationPoints, stop_yEquals0, stop_zEquals0
from thesis_functions.astro import ComputeNonlinearDerivs, ComputeRelmoDynamicsMatrix
from thesis_functions.astro import odeintNonlinearDerivs, odeintNonlinearDerivsWithLinearRelmoSTM, odeintNonlinearDerivsWithLinearRelmo
from thesis_functions.astro import ComputeRequiredVelocity, PropagateSatelliteAndChaser
from thesis_functions.astro import PropagateSatellite, ComputeOffsets, ConvertOffsets, ConvertOffset, BuildFrames
initial_condition = initial_conditions.loc["Barbee", 1]
mu = initial_condition.mu
initial_state = pd.Series({
"x": initial_condition.x,
"y": 0.0,
"z": initial_condition.z,
"x_dot": 0.0,
"y_dot": initial_condition.y_dot,
"z_dot": 0.0})
# X1 and X2 are positions of larger and smaller bodies along X axis
libration_system = ComputeLibrationPoints(mu)
# The FindOrbitCenter function doesn't work if you only propagate a partial orbit, so just treat L1 as the center
center = libration_system.L1
params = pd.Series({
"m1": 5.97219e24, # Earth (kg)
"m2": 7.34767309e22, # Moon (kg)
"G": 6.67384e-11/1e9, # m3/(kg*s^2) >> converted to km3
"r12": 384400.0}) # km
params.loc["M"] = params.m1 + params.m2
# This is how you convert between dimensional time (seconds) and non-dimensional time (units are seconds)
time_const = params.r12**(1.5) / (params.G * params.M)**(0.5)
period = 2 * np.pi * time_const # Period in seconds of Moon around Earth
# Period of libration point orbit (in nondimensional time units)
period = initial_condition.t
# Create a collection of waypoints where we initially population in RIC cooredinates.
waypoints = pd.Panel(np.array([np.vstack((np.zeros(6), np.array([100, 15, 5, 1, 0.03, 0.0]), np.zeros(6)))]).transpose((0, 2, 1)),
items=["ric"],
major_axis=np.array([5.31, 5.67, 6.03, 6.64, 7.0, 7.26]) * 86400 / time_const,
minor_axis=list("xyz"))
# Append a waypoint at t=0 for initial state.
t = 0.0
frame = BuildFrames(initial_state.to_frame().transpose(), center).iloc[0]
ric_to_rlp = np.linalg.inv(frame.loc["ric"])
waypoints.loc["ric", t] = initial_state[list("xyz")].values
waypoints.loc["rlp", t] = np.dot(waypoints.loc["ric", t], ric_to_rlp)
waypoints.loc["vnb", t] = np.dot(waypoints.loc["rlp", t], frame.loc["vnb"])
# Finally, re-sort our waypoints.
waypoints = waypoints.sort_index(1)
# Create a Panel to store waypoint frames.
waypoint_frames = pd.Panel()
# Prepend 0 to the list of waypoint times and create a set of
# waypoint intervals.
waypoint_intervals = zip(waypoints.major_axis[:-1], waypoints.major_axis[1:])
state = initial_state
for start, end in waypoint_intervals:
time = np.linspace(start, end, 500)
# Build an ephem for the given timespan up to the waypoint.
ephem = PropagateSatellite(mu, time, state)
# Build the corresponding frames.
frames = BuildFrames(ephem, center)
# Select the last item in our frames collection as the waypoint frame.
waypoint_frame = frames.iloc[-1]
# Calculate the matrix to go from RIC to RLP
ric_to_rlp = np.linalg.inv(waypoint_frame.loc["ric"])
# Calculate the waypoint in the RLP frame and store it
waypoints.loc["rlp", end] = np.dot(waypoints.loc["ric", end], ric_to_rlp)
waypoints.loc["vnb", end] = np.dot(waypoints.loc["rlp", end], waypoint_frame.loc["vnb"])
# Reset the state as the last entry in the ephem.
state = ephem.irow(-1)
# Create a panel the represents the ephem of each satellite.
t = 0.0
target_satellite_ephem = pd.Panel(items=["rlp"], major_axis=[t], minor_axis=["x", "y", "z", "x_dot", "y_dot", "z_dot"])
chaser_satellite_ephem = target_satellite_ephem.copy()
# Configure the initial states of each ephem.
target_satellite_ephem.loc["rlp", t] = initial_state.values
# For the follower, use the position from the initial waypoint
chaser_satellite_ephem.loc["rlp_linear", t, ["x", "y", "z"]] = waypoints["rlp"].iloc[0]
chaser_satellite_ephem.loc["rlp", t, ["x", "y", "z"]] = waypoints["rlp"].iloc[0]
# Next, simulate the two spacecraft for each waypoint interval.
for start, end in waypoint_intervals:
time = np.linspace(start, end, 500)
# Select out the RLP vector of the next waypoint.
next_waypoint = waypoints["rlp"][(waypoints.major_axis > start)].iloc[0]
chaser_satellite_state = chaser_satellite_ephem.loc["rlp", start, ["x", "y", "z"]]
# Compute the required velocity at the current waypoint.
required_relative_velocity = ComputeRequiredVelocity(state, chaser_satellite_state, start, next_waypoint, end, mu)
# Calculate required velocity.
required_velocity = required_relative_velocity - target_satellite_ephem.loc["rlp", start, ["x_dot", "y_dot", "z_dot"]]
# Store the required velocity.
chaser_satellite_ephem.loc["rlp", start, ["x_dot", "y_dot", "z_dot"]] = required_velocity
# Calculate the relative state between the two spacecraft.
relative_state = target_satellite_ephem.loc["rlp", start] - chaser_satellite_ephem.loc["rlp", start]
# Propagate the target spacecraft using nonlinear dynamics and generate linear offset.
target_ephem, linear_offset = PropagateSatelliteAndChaser(mu, time, target_satellite_ephem.loc["rlp", start], relative_state)
# Propagate the chaser spacecraft using nonlinear dynamics.
chaser_ephem = PropagateSatellite(mu, time, chaser_satellite_ephem.loc["rlp", start])
# We need to re-index our ephems. Boo.
target_satellite_ephem = target_satellite_ephem.reindex(major_axis=np.unique(np.concatenate((target_satellite_ephem.major_axis.values, time))))
chaser_satellite_ephem = chaser_satellite_ephem.reindex(major_axis=np.unique(np.concatenate((chaser_satellite_ephem.major_axis.values, time))))
# Store the ephems.
target_satellite_ephem.loc["rlp", time] = target_ephem.values
chaser_satellite_ephem.loc["rlp", time] = chaser_ephem.values
chaser_satellite_ephem.loc["rlp_linear", time] = (target_ephem + linear_offset).values
|
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
from ldl.data import get_episodic_loader
from model import DLDReversed
def train(model, loss_func, train_loader, epochs, silent=False, device=None):
for epoch in range(epochs):
for batch_i, (x, y) in enumerate(train_loader):
x = x.squeeze().to(device)
y = y.to(device)
# Activate predictor for the needed class
model.activate_predictor(class_=y.item())
predictor_feature = model(x)
loss = loss_func(predictor_feature, x).mean()
optimizer = model.get_optimizer(y.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_i % 100 == 0 and not silent:
msg = 'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'
print(msg.format(epoch+1, batch_i, len(train_loader),
batch_i/len(train_loader)*100, loss.item()))
def test(model, test_loader, silent=False, device='cpu'):
model.eval()
correct = 0
with torch.no_grad():
for batch_i, (x, y) in enumerate(test_loader):
x = x.squeeze()
predict_next_state_feature = model.predict(x.to(device))
mses = []
x = x.to(device)
for predict in predict_next_state_feature:
predict = predict.to(device)
mses.append((predict - x).pow(2).sum(0) / 2)
class_min_mse = np.argmin(mses)
if class_min_mse == y.item():
correct += 1
acc = correct / (batch_i+1)
if not silent:
print('Accuracy: {}/{} ({:.0f}%)\n'.format(correct, batch_i+1, 100. * acc))
return acc
def preprocess_data(data):
return data
def run_experiment(config):
np.random.seed(2019)
torch.manual_seed(2019)
dataset = config['dataset']
way = config['way']
train_shot = config['train_shot']
test_shot = config['test_shot']
mse_loss = config['loss']
trials = config['trials']
epochs = config['epochs']
silent = config['silent']
split = config['split']
add_rotations = config['add_rotations']
in_alphabet = config['in_alphabet']
x_dim = config['x_dim']
z_dim = config['z_dim']
optimizer = config['optimizer']
lr = config['lr']
initialization = config['initialization']
gpu = config['gpu']
device = torch.device(f"cuda:{gpu}" if torch.cuda.is_available() else "cpu")
accs = []
for _ in tqdm(range(trials)):
dataloader = get_episodic_loader(dataset, way, train_shot, test_shot, x_dim,
split=split,
add_rotations=add_rotations,
in_alphabet=in_alphabet)
model = DLDReversed(way, in_dim=x_dim**2, z_dim=z_dim, opt=optimizer,
lr=lr, initialization=initialization)
model.to(device)
for sample in dataloader:
x_train = sample['xs'].reshape((-1, x_dim**2))
y_train = np.asarray(
[i // train_shot for i in range(train_shot * way)])
x_test = sample['xq'].reshape((-1, x_dim**2))
y_test = np.asarray(
[i // test_shot for i in range(test_shot * way)])
x_train = preprocess_data(x_train)
x_test = preprocess_data(x_test)
x_train = torch.tensor(x_train)
y_train = torch.tensor(y_train)
x_test = torch.tensor(x_test)
y_test = torch.tensor(y_test)
# print("Train: ", x_train.shape, y_train.shape)
# print("Test: ", x_test.shape, y_test.shape)
inds = np.random.permutation(x_train.shape[0])
samples_train = list(zip(x_train[inds], y_train[inds]))
samples_test = list(zip(x_test, y_test))
train(model, loss_func=mse_loss, train_loader=samples_train, epochs=epochs,
silent=silent, device=device)
accs.append(test(model, samples_test, silent=silent, device=device))
return np.mean(accs)
if __name__ == "__main__":
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
config = {
'dataset': 'omniglot',
'way': 5,
'train_shot': 1,
'test_shot': 1,
'loss': nn.MSELoss(reduction='none'),
'epochs': 3,
'trials': 100,
'silent': True,
'split': 'test',
'in_alphabet': False,
'add_rotations': True,
'x_dim': 28,
'z_dim': 784,
'initialization': 'orthogonal',
'optimizer': 'adam',
'lr': 0.001,
'channels': 1,
'gpu': 0
}
mean_accuracy = run_experiment(config)
print("Mean accuracy: ", mean_accuracy)
|
import openpyxl
from openpyxl import load_workbook
import time
AW18 = load_workbook(filename = 'C:\\Users\\timde\\Desktop\\KingsofIndigo\\InformatieKoppelen\\AW18.xlsx')
SS19 = load_workbook(filename = 'C:\\Users\\timde\\Desktop\\KingsofIndigo\\InformatieKoppelen\\SS19.xlsx')
Samen = load_workbook(filename = 'C:\\Users\\timde\\Desktop\\KingsofIndigo\\InformatieKoppelen\\Samenvoegen.xlsx')
AW18sheet = AW18.get_sheet_names
SS19sheet = SS19.get_sheet_names
Samensheet = Samen.get_sheet_names
AW18sheet = AW18.active
SS19sheet = SS19.active
Samensheet = Samen.active
max_row=Samensheet.max_row
#print(max_row)
Loops = 0
for cell in AW18sheet['A']:
I = cell.row
KnummerAW18 = AW18sheet.cell(row=I, column=1).value
print(KnummerAW18)
for cell in Samensheet['B']:
E = cell.row
KnummerSamen = Samensheet.cell(row=E, column=2).value
if KnummerAW18 == KnummerSamen:
ModelInformation = AW18sheet.cell(row=I, column=5).value
ModelSize = AW18sheet.cell(row=I, column=6).value
Samensheet.cell(row=E, column=56, value=ModelInformation)
Samensheet.cell(row=E, column=57, value=ModelSize)
print(ModelInformation)
print(ModelSize)
print(KnummerSamen)
else:
Loops += 1
print (Loops)
Samen.save('Samenvoegen.xlsx') |
# -*- coding:utf-8 -*-
import requests
import urllib
wd = {'q': '长城'}
headers = {"User-Agent": "Mozilla/5.0"}
# params 接收一个字典或者字符串的查询参数,字典类型自动转换为url编码,不需要urlencode()
response = requests.get("https://www.google.com/search?", params=wd, headers=headers)
# 查看响应内容,response.text 返回的是Unicode格式的数据
# print response.text
# 查看响应内容,response.content返回的字节流数据
print response.content
# 查看完整url地址
print response.url
# 查看响应头部字符编码
print response.encoding
# 查看响应码
print response.status_code |
# scheduler script
from wakeup import wakeup
print("wakeup")
wakeup()
|
# Copyright 2017 NeuStar, Inc.All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from .records import Records
class WhitelistId:
def __init__(self, connection, base_uri, whitelist_id):
self.connection = connection
self.base_uri = base_uri+"/"+whitelist_id
def get(self):
"""Find a specific whitelist."""
return self.connection.get(self.base_uri)
def put(self, sponsor_id, account_id, name, description, **kwargs):
"""Update a specific Whitelist."""
properties = {"sponsorId": sponsor_id, "accountId": account_id, "name": name, "description": description}
if kwargs is not None:
properties.update(kwargs)
return self.connection.put(self.base_uri, json.dumps(properties))
def delete(self):
"""Delete a specific Whitelist."""
return self.connection.delete(self.base_uri)
def records(self):
"""Create an Records object."""
return Records(self.connection, self.base_uri) |
import requests
import json
payload = {'sentence': 'hello'}
response = requests.post("https://chatbot-api52.herokuapp.com/predict", data=json.dumps(payload))
print(response.text) |
"""Class implementing the lambda response format standards for API Gateway"""
from slat.types import JsonapiBody, JsonDict
class LambdaProxyResponse:
"""
ensure we have the correct response format as per
https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-output-format
"""
@staticmethod
def respond(status_code: int, resp_payload: JsonapiBody, context: object) -> JsonDict:
"""
Returns the expected Proxy integration lambda response format
structure mandated as per
:param status_code:
:type status_code: int
:param resp_payload:
:type resp_payload: JsonapiBody
:param context:
:type context: object
:return:
:rtype: JsonDict
"""
if isinstance(resp_payload, JsonapiBody):
resp_payload.meta['request_id'] = context.aws_request_id
response = {
'statusCode': status_code,
'body': '' if not resp_payload or resp_payload == {} else str(resp_payload),
'headers': {
'Content-Type': 'application/json',
'X-Hb-Request-Id': context.aws_request_id,
},
'multiValueHeaders': {},
'isBase64Encoded': False
}
return response
|
#! /usr/bin/python2
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
import os
import unittest
import requests
import commonl.testing
import tcfl.tc
import ttbl.fsdb
srcdir = os.path.dirname(__file__)
ttbd = commonl.testing.test_ttbd(config_files = [
os.path.join(srcdir, "conf_test_things.py") ])
# use targe3 and not 1 and 2, because next text is going to be
# releasing them half way and we don't want this guy to get it by
# mistake
@tcfl.tc.target(ttbd.url_spec + " and id == 'target3'")
class _01(tcfl.tc.tc_c, unittest.TestCase):
def eval_(self, target):
# Shall not be able to plug something we don't own
with self.assertRaisesRegexp(requests.HTTPError,
"400: thing1: tried to use "
"non-acquired target"):
target.thing_plug('thing1')
@staticmethod
def clean():
ttbd.check_log_for_issues()
@tcfl.tc.target(ttbd.url_spec + " and id == 'thing2'", name = 'thing2')
@tcfl.tc.target(ttbd.url_spec + " and id == 'thing1'", name = 'thing1')
@tcfl.tc.target(ttbd.url_spec + " and id == 'target'")
class _02(tcfl.tc.tc_c, unittest.TestCase):
def setup(self):
self.exception_to_result[AssertionError] = tcfl.tc.failed_e
def eval_01(self, target, thing1):
# server's state for target
fsdb_target = ttbl.fsdb.fsdb(os.path.join(ttbd.state_dir, target.id))
things = target.thing_list()
self.assertTrue(all([ things[thing] == False for thing in things ]),
"all things are not unplugged upon acquisition")
# if we own the target and the thing, we can plug it
target.thing_plug(thing1)
# verify the server records the state
self.assertEqual(fsdb_target.get("thing-" + thing1.id), 'True')
# and the API call confirms
things = target.thing_list()
self.assertTrue(things[thing1.id],
"thing reports unplugged after plugging it")
target.thing_unplug(thing1)
# the server's state information will drop it
self.assertTrue(fsdb_target.get("thing-" + thing1.id) == None)
# and the API will reflect it
things = target.thing_list()
self.assertFalse(things[thing1.id],
msg = "thing reports unplugged after plugging it")
def eval_02_thing_released_upon_release(self, target, thing1):
# server's state for target
fsdb_target = ttbl.fsdb.fsdb(os.path.join(ttbd.state_dir, target.id))
target.thing_plug(thing1)
things = target.thing_list()
self.assertTrue(things[thing1.id], "thing1 is not plugged; it should")
# when we release the thing, it becomes unplugged
thing1.release()
# verify the server did it
self.assertTrue(fsdb_target.get("thing-" + thing1.id) == None)
# and API confirms the server did it
things = target.thing_list()
self.assertFalse(things[thing1.id],
"thing not unplugged after releasing it")
def eval_03(self, target, thing2):
target.release()
target.acquire()
things = target.thing_list()
self.assertFalse(things[thing2.id],
"thing2 not unplugged after releasing the target")
@staticmethod
def clean():
ttbd.check_log_for_issues()
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 Intel Inc.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the"License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import webob
from webob import exc
import re
import json
from vsm.api import common
from vsm.api.openstack import wsgi
from vsm.api import xmlutil
from vsm import exception
from vsm import flags
from vsm.openstack.common import log as logging
from vsm.api.views import clusters as clusters_views
from vsm.api.views import summary as summary_view
from vsm.openstack.common import jsonutils
from vsm import utils
from vsm import conductor
from vsm import scheduler
from vsm import db
from vsm.agent.cephconfigutils import CephConfigParser
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
def make_cluster(elem, detailed=False):
elem.set('id')
elem.set('name')
elem.set('file_system')
elem.set('journal_size')
elem.set('size')
elem.set('management_network')
elem.set('ceph_public_network')
elem.set('cluster_network')
elem.set('primary_public_ip_netmask')
elem.set('secondary_public_ip_netmask')
elem.set('cluster_ip_netmask')
if detailed:
pass
#xmlutil.make_links(elem, 'links')
cluster_nsmap = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM}
class ClusterTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('cluster', selector='cluster')
make_cluster(root, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=cluster_nsmap)
class ClustersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('clusters')
elem = xmlutil.SubTemplateElement(root, 'cluster', selector='clusters')
make_cluster(elem, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=cluster_nsmap)
class ClusterController(wsgi.Controller):
"""The Cluster API controller for the OpenStack API."""
_view_builder_class = clusters_views.ViewBuilder
def __init__(self, ext_mgr):
self.conductor_api = conductor.API()
self.scheduler_api = scheduler.API()
self.ext_mgr = ext_mgr
super(ClusterController, self).__init__()
def _get_server_search_options(self):
"""Return server search options allowed by non-admin."""
return ('id', 'name', 'public_ip')
@wsgi.serializers(xml=ClustersTemplate)
def index(self, req):
"""Get cluster list."""
search_opts = {}
search_opts.update(req.GET)
context = req.environ['vsm.context']
remove_invalid_options(context,
search_opts,
self._get_server_search_options)
clusters = self.conductor_api.get_cluster_list(context)
#server_list = servers.values()
#sorted_servers = sorted(server_list,
# key=lambda item: item['id'])
return self._view_builder.index(clusters)
#@wsgi.serializers(xml=ClustersTemplate)
def create(self, req, body):
"""create cluster."""
LOG.info("CEPH_LOG cluster create body: %s" % body)
context = req.environ['vsm.context']
server_list = body['cluster']['servers']
LOG.info('Begin to call scheduler.createcluster')
# only and should only one mds
mds_count = 0
for server in server_list:
if server['is_mds'] == True:
mds_count = mds_count + 1
if mds_count > 1:
raise exc.HTTPBadRequest("More than one mds.")
# RGW with simple configuration(one rgw instance)
# RGW with federated configuration(multiple rgw instances)
"""
# only and should only one rgw
rgw_count = 0
for server in server_list:
if server['is_rgw'] == True:
rgw_count = rgw_count + 1
if rgw_count > 1:
raise exc.HTTPBadRequest("More than one rgw.")
"""
self.scheduler_api.create_cluster(context, server_list)
return webob.Response(status_int=202)
def import_ceph_conf(self, req, body=None):
"""
import_ceph_conf to db and ceph nodes
"""
LOG.info("CEPH_LOG import_ceph_conf body=%s"%body )
context = req.environ['vsm.context']
ceph_conf_path = body["cluster"]["ceph_conf_path"]
cluster_name = body["cluster"]["cluster_name"]
cluster = db.cluster_get_by_name(context,cluster_name)
if cluster:
ceph_conf_dict_old = CephConfigParser(FLAGS.ceph_conf, sync=False).as_dict()
ceph_conf_dict = CephConfigParser(ceph_conf_path, sync=False).as_dict()
check_ret = check_ceph_conf(ceph_conf_dict_old,ceph_conf_dict)
if check_ret:
return {"message":"%s"%check_ret}
self.scheduler_api.import_ceph_conf(context,cluster_id=cluster.id,ceph_conf_path=ceph_conf_path)
return {"message":"Success"}
else:
return {"message":"No such cluster which named %s in DB"%cluster_name}
def detect_cephconf(self, req, body=None):
'''
:param res:
:param body:
:return:
'''
LOG.info("CEPH_LOG detect_cephconf body=%s"%body )
context = req.environ['vsm.context']
ret = self.scheduler_api.detect_cephconf(context,body)
LOG.info('CEPH_LOG detect_cephconf get ret=%s'%ret)
return ret
def detect_crushmap(self, req, body=None):
'''
:param res:
:param body:
:return:
'''
LOG.info("CEPH_LOG detect_crushmap body=%s"%body )
context = req.environ['vsm.context']
ret = self.scheduler_api.detect_crushmap(context,body)
LOG.info('CEPH_LOG detect_crushmap get ret=%s'%ret)
return ret
def get_crushmap_tree_data(self, req, body=None):
'''
:param res:
:param body:
:return:
'''
LOG.info("CEPH_LOG get_crushmap_tree_data body=%s"%body )
context = req.environ['vsm.context']
ret = self.scheduler_api.get_crushmap_tree_data(context,body)
LOG.info('CEPH_LOG get_crushmap_tree_data get ret=%s'%ret)
return ret
def check_pre_existing_cluster(self,req,body):
'''
:param res:
:param body:
:return:
'''
LOG.info("CEPH_LOG check_pre_existing_cluster body=%s"%body )
context = req.environ['vsm.context']
ret = self.scheduler_api.check_pre_existing_cluster(context,body)
LOG.info('CEPH_LOG check_pre_existing_cluster get ret=%s'%ret)
return ret
def import_cluster(self,req,body):
'''
:param res:
:param body:
:return:
'''
LOG.info("CEPH_LOG import_cluster body=%s"%body )
context = req.environ['vsm.context']
ret = self.scheduler_api.import_cluster(context,body)
LOG.info('CEPH_LOG import_cluster get ret=%s'%ret)
return ret
def integrate(self, req,body=None):
"""
integrate an existing ceph cluster
"""
LOG.info("CEPH_LOG cluster integrate body" )
context = req.environ['vsm.context']
#server_list = body['cluster']['servers']
LOG.info('Begin to call scheduler.integrate_cluster')
self.scheduler_api.integrate_cluster(context)
def start_cluster(self, req, body=None):
"""
start_cluster
"""
LOG.info("CEPH_LOG start_cluster body=%s"%body )
context = req.environ['vsm.context']
cluster_id = body["cluster"]["id"]
if cluster_id:
nodes = db.init_node_get_by_cluster_id(context,cluster_id)
else:
nodes = db.init_node_get_all(context)
servers = {"servers":nodes}
self.scheduler_api.start_cluster(context,servers)
return {"message":"Success"}
def stop_cluster(self, req, body=None):
"""
stop_cluster
"""
LOG.info("CEPH_LOG stop_cluster body=%s"%body )
context = req.environ['vsm.context']
cluster_id = body["cluster"]["id"]
if cluster_id:
nodes = db.init_node_get_by_cluster_id(context,cluster_id)
else:
nodes = db.init_node_get_all(context)
servers = {"servers":nodes}
self.scheduler_api.stop_cluster(context,servers)
return {"message":"Success"}
def get_ceph_health_list(self, req):
context = req.environ['vsm.context']
ceph_status = self.scheduler_api.get_ceph_health_list(context)
return {"ceph_status":ceph_status}
@wsgi.serializers(xml=ClustersTemplate)
def show(self, req, id):
"""update cluster."""
LOG.info("CEPH_LOG cluster show id: %s" % id)
return {"cluster": {"id":1,"name":"2"}}
@wsgi.serializers(xml=ClustersTemplate)
def update(self, req, id, body):
"""update cluster."""
LOG.info("CEPH_LOG cluster update body: %s" % body)
return {"cluster": {"id":1,"name":"2"}}
def delete(self, req, id):
"""delete cluster."""
LOG.info("CEPH_LOG cluster delete id: %s" % id)
return webob.Response(status_int=202)
def summary(self, req,cluster_id=None):
#LOG.info('osd-summary body %s ' % body)
context = req.environ['vsm.context']
#TODO: as we have only one cluster for now, the cluster_id
#has been hardcoded. In furture, client should pass
# the cluster id by url.
if cluster_id:
sum = db.summary_get_by_cluster_id_and_type(context, cluster_id, 'cluster')
else:
sum = db.summary_get_by_type_first(context, 'cluster')
vb = summary_view.ViewBuilder()
return vb.basic(sum, 'cluster')
def get_service_list(self, req,cluster_id=None):
context = req.environ['vsm.context']
service = db.service_get_all(context)
return {"services":service}
def refresh(self, req):
"""
:param req:
:return:
refresh cluster status
"""
context = req.environ['vsm.context']
self.scheduler_api.cluster_refresh(context)
def create_resource(ext_mgr):
return wsgi.Resource(ClusterController(ext_mgr))
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
bad_options = ", ".join(unknown_options)
log_msg = _("Removing options '%(bad_options)s' from query") % locals()
LOG.debug(log_msg)
for opt in unknown_options:
del search_options[opt]
def check_ceph_conf(ceph_conf_dict_old,ceph_conf_dict):
check_result = ''
keys_old = ceph_conf_dict_old.keys()
keys_new = ceph_conf_dict.keys()
old_osds = [key for key in keys_old if key.find("osd.")!=-1]
old_mons = [key for key in keys_old if key.find("mon.")!=-1]
old_mds = [key for key in keys_old if key.find("mds.")!=-1]
new_osds = [key for key in keys_new if key.find("osd.")!=-1]
new_mons = [key for key in keys_new if key.find("mon.")!=-1]
new_mds = [key for key in keys_new if key.find("mds.")!=-1]
if set(old_osds) != set(new_osds):
check_result = '%s\n.error:The number of osd is different from the old version'%check_result
if set(old_mons) != set(new_mons):
check_result = '%s\n.error:The number of mon is different from the old version'%check_result
if set(old_mds) != set(new_mds):
check_result = '%s\n.error:The number of mds is different from the old version'%check_result
if check_result:
return check_result
try:
osd_field_to_check = ["osd journal","devs","host","cluster addr","public addr"]
for osd in new_osds:
for field in osd_field_to_check:
if ceph_conf_dict[osd][field]!= ceph_conf_dict_old[osd][field]:
check_result = '%s\n.error:the value of %s below section %s'%(check_result,field,osd)
mon_field_to_check = ['mon addr','host']
for mon in new_mons:
for field in mon_field_to_check:
if ceph_conf_dict[mon][field]!= ceph_conf_dict_old[mon][field]:
check_result = '%s\n.error:the value of %s below section %s'%(check_result,field,mon)
mds_field_to_check = ['public addr','host']
for mds in new_mds:
for field in mds_field_to_check:
if ceph_conf_dict[mds][field]!= ceph_conf_dict_old[mds][field]:
check_result = '%s\n.error:the value of %s below section %s'%(check_result,field,mds)
except Exception,e:
check_result = '%s\n.error:KeyError :%s'%(check_result,e)
return check_result
|
import tokenize
from . import _checker
class Checker(
_checker.BaseChecker,
):
@classmethod
def check(
cls,
filename,
lines,
tokens,
start_position_to_token,
ast_tree,
astroid_tree,
all_astroid_nodes,
):
for token in tokens:
if token.type != tokenize.STRING:
continue
if token.string.startswith('"') or token.string.endswith('"'):
yield from cls.error_yielder.yield_error(
error_id='I011',
line_number=token.start[0],
column_offset=token.start[1],
)
|
from django import forms
from django.contrib.auth.models import User
from UserPage.models import UserProfile
class SignUpForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta():
model = User
fields = ('username', 'password', 'email')
|
# Holds al the data of a certain column
class Cue:
# column: an array with this structure: [title, datapoint_1, ..., datapoint_n]
def __init__(self, column):
# The name of the Cue is the first element of the column.
# At the same time, the element is popped out of the column.
self.name = column.pop(0)
# Calculate the data-type of the column
self.numerical = self._getDataType(column)
# Transform the data from string to number (if necessary)
self.data = self._getTransformedData(column)
# Calculate the levels available in the data and save them
self.levels = self._getLevels()
# Returns the minimum value available in the data of the cue
def getMinimum(self):
return self.levels[0]
# Returns the maximum value available in the data of the cue
def getMaximum(self):
return self.levels[len(self.levels)-1]
# Returns the data, sorted
def _sortedData(self):
return sorted(self.data)
# Calculates the levels (i.e. all the different categories) of a dataset
# The levels are returned sorted from low to high (or alphabetically)
def _getLevels(self):
levels = []
# Loop all the data
for datapoint in self.data:
# Is the datapoint present in the levels array?
if not self.isDatapointPresent(datapoint, levels):
# If the datapoint is not present, add it to the levels array:
levels.append(datapoint)
return sorted(levels)
# Returns true if a certain datapoint is present in the data of the cue
# datapoint: Element to be searched
# data: array on where to look. Defaults to the data of the current cue
def isDatapointPresent(self, datapoint, data=None):
if data is None:
data = self.data
# Try to find the datapoint inside the data
try:
# If it is found, return True
data.index(datapoint)
return True
except ValueError:
# If it is not found, return False
return False
# Transforms the data to numbers (if possible)
# data: an array of datapoints
def _getTransformedData(self, data):
# Only if the cue is a number type cue, the transformation is performed
if self.numerical:
for i in range(0, len(data)):
data[i] = float(data[i])
return data
# Returns True if all the values in the column are numbers
# data: an array of datapoints
def _getDataType(self, data):
# For every datapoint, check if it is a digit
for datapoint in data:
if not datapoint.replace('.','',1).isdigit():
# If any single one is not a digit, return False
return False
return True
# Returns the amount of times a target value appears in the cue
def getValueCount(self, value):
count = 0
# Search across all the data
for datapoint in self.data:
if str(datapoint) == str(value):
count += 1
return count
# Returns the datapoint in position 'index'
def getDataInIndex(self, index):
return self.data[index]
# Returns the amount of datapoints in the cue
def getCueLength(self):
return len(self.data)
# Returns the name of the cue
def getName(self):
return self.name
# Returns all the possible values of the cue
def getLevels(self):
return self.levels
# Returns a list of the values in the specific positions
# positions: targeted indexes
def getValuesInPositions(self, positions):
values = []
# Add all the positions into a new list of lines
for p in positions:
# Get the value in that position
value = self.data[p]
# Append the value into the values' list
values.append(value)
return values |
import numpy as np
import nibabel as nb
import pandas as pd
import sys
from nibabel.cifti2 import Cifti2Image
from sklearn.linear_model import LinearRegression
from scipy.signal import butter, filtfilt
from nilearn.signal import clean
import matplotlib.pyplot as plt
from matplotlib import gridspec as mgs
import matplotlib.cm as cm
from matplotlib.colors import ListedColormap, Normalize
import seaborn as sns
from seaborn import color_palette
def surface_filt_reg(datafile,confound,lowpass,highpass,outfilename,pre_svg,post_svg,fd,tr,
dvars,process_order=['DMT','REG','TMP'],filter_order=2):
'''
input file
datafile : gifti or cifti file
tr : repetition time
confound: confound matrix in regressors by timepoints
lowpass: lowpas filter in Hz
highpass: highpass filter in Hz
outfilename: outputput file name
pre_svg: plot of svg name before regression
post_svg: plot of svg name after regression
fd: framewise displacement
dvars: dvars before regression
'''
datamatrix=read_gifti_cifti(datafile=datafile)
dd_data=demean_detrend_data(data=datamatrix,TR=tr,order=1)
dd_confound=demean_detrend_data(data=confound,TR=tr,order=1)
if process_order[1] == 'TMP':
filtered_data = butter_bandpass(data=dd_data,fs=1/tr,lowpass=lowpass,highpass=highpass,order=2)
filtered_confound = butter_bandpass(data=dd_confound,fs=1/tr,lowpass=lowpass,highpass=highpass,order=2)
pre_davrs=compute_dvars(datat = datamatrix)
reg_data = linear_regression(data=filtered_data,confound=filtered_confound)
plot_svg(fdata=datamatrix,fd=fd,dvars=pre_davrs,filename=pre_svg,tr=tr)
reg_davrs=compute_dvars(datat = reg_data)
plot_svg(fdata=reg_data,fd=fd,dvars=reg_davrs,filename=post_svg,tr=tr)
write_gifti_cifti(data_matrix=reg_data,template=datafile,filename=outfilename)
elif process_order[1] == 'REG':
reg_data = linear_regression(data=dd_data,confound=dd_confound)
pre_davrs=compute_dvars(datat = datamatrix)
filtered_data = butter_bandpass(data=reg_data,fs=1/tr,lowpass=lowpass,highpass=highpass,order=2)
plot_svg(fdata=pre_davrs,fd=fd,dvars=pre_davrs,filename=pre_svg,tr=tr)
reg_davrs=compute_dvars(datat = filtered_data)
plot_svg(fdata=filtered_data,fd=fd,dvars=reg_davrs,filename=post_svg,tr=tr)
write_gifti_cifti(data_matrix=filtered_data,template=datafile,filename=outfilename)
return outfilename
def demean_detrend_data(data,TR,order=1):
'''
data should be voxels/vertices by timepoints dimension
order=1
# order of polynomial detrend is usually obtained from
# order = floor(1 + TR*nVOLS / 150)
TR= repetition time
this can be use for both timeseries and bold
'''
# demean the data first, check if it has been demean
if np.mean(data) > 0.00000000001:
mean_data=np.mean(data,axis=1)
means_expanded = np.outer(mean_data, np.ones(data.shape[1]))
demeand=data-means_expanded
else:
demeand=data
x=np.linspace(0,(data.shape[1]-1)*TR,num=data.shape[1])
predicted=np.zeros_like(demeand)
for j in range(demeand.shape[0]):
model = np.polyfit(x,demeand[j,:],order)
predicted[j,:] = np.polyval(model, x)
return demeand - predicted
from scipy.signal import butter, filtfilt
def butter_bandpass(data,fs,lowpass,highpass,order=2):
'''
data : voxels/vertices by timepoints dimension
fs : sampling frequency, =1/TR(s)
lowpass frequency
highpass frequency
'''
nyq = 0.5 * fs
lowcut = np.float(highpass) / nyq
highcut = np.float(lowpass) / nyq
b, a = butter(order, [lowcut, highcut], btype='band')
mean_data=np.mean(data,axis=1)
y=np.zeros_like(data)
for i in range(data.shape[0]):
y[i,:] = filtfilt(b, a, data[i,:])
#add mean back
mean_datag=np.outer(mean_data, np.ones(data.shape[1]))
return y + mean_datag
def linear_regression(data,confound):
'''
both data and confound should be point/voxels/vertices by timepoints
'''
regr = LinearRegression()
regr.fit(confound.T,data.T)
y_pred = regr.predict(confound.T)
return data - y_pred.T
def read_gifti_cifti(datafile):
if datafile.endswith('.dtseries.nii'):
data=nb.load(datafile).get_fdata().T
elif datafile.endswith('.func.gii'):
data=nb.load(datafile).agg_data()
return data
def write_gifti_cifti(data_matrix,template,filename):
'''
data matrix: veritices by timepoint
template: real file loaded with nibabel to get header and filemap
filename ; name of the output
'''
if template.endswith('.dtseries.nii'):
from nibabel.cifti2 import Cifti2Image
template=nb.load(template)
dataimg=Cifti2Image(dataobj=data_matrix.T,header=template.header,
file_map=template.file_map,nifti_header=template.nifti_header)
elif template.endswith('.func.gii'):
template=nb.load(template)
dataimg=nb.gifti.GiftiImage(header=template.header,file_map=template.file_map,extra=template.extra)
for i in range(data_matrix.shape[1]):
d_timepoint=nb.gifti.GiftiDataArray(data=np.asarray(data_matrix[:,i]),intent='NIFTI_INTENT_TIME_SERIES')
dataimg.add_gifti_data_array(d_timepoint)
dataimg.to_filename(filename)
return filename
def compute_dvars(datat):
'''
datat should be points by timepoints
'''
firstcolumn=np.zeros((datat.shape[0]))[...,None]
datax=np.hstack((firstcolumn,np.diff(datat)))
datax_ss=np.sum(np.square(datax),axis=0)/datat.shape[0]
return np.sqrt(datax_ss)
def plot_carpet(func_data,detrend=True, nskip=0, size=(950, 800),
subplot=None, title=None, output_file=None, legend=False,
tr=None):
"""
Plot an image representation of voxel intensities across time also know
as the "carpet plot" or "Power plot". See Jonathan Power Neuroimage
2017 Jul 1; 154:150-158.
Parameters
----------
img : Niimg-like object
See http://nilearn.github.io/manipulating_images/input_output.html
4D input image
atlaslabels: ndarray
A 3D array of integer labels from an atlas, resampled into ``img`` space.
detrend : boolean, optional
Detrend and standardize the data prior to plotting.
nskip : int
Number of volumes at the beginning of the scan marked as nonsteady state.
long_cutoff : int
Number of TRs to consider img too long (and decimate the time direction
to save memory)
axes : matplotlib axes, optional
The axes used to display the plot. If None, the complete
figure is used.
title : string, optional
The title displayed on the figure.
output_file : string, or None, optional
The name of an image file to export the plot to. Valid extensions
are .png, .pdf, .svg. If output_file is not None, the plot
is saved to a file, and the display is closed.
legend : bool
Whether to render the average functional series with ``atlaslabels`` as
overlay.
tr : float , optional
Specify the TR, if specified it uses this value. If left as None,
# Frames is plotted instead of time.
"""
# Define TR and number of frames
notr = False
if tr is None:
notr = True
tr = 1.
ntsteps = func_data.shape[-1]
data = func_data.reshape(-1, ntsteps)
p_dec = 1 + data.shape[0] // size[0]
if p_dec:
data = data[::p_dec, :]
t_dec = 1 + data.shape[1] // size[1]
if t_dec:
data = data[:, ::t_dec]
# Detrend data
v = (None, None)
if detrend:
data = clean(data.T, t_r=tr).T
v = (-2, 2)
# If subplot is not defined
if subplot is None:
subplot = mgs.GridSpec(1, 1)[0]
# Define nested GridSpec
wratios = [1, 100, 20]
gs = mgs.GridSpecFromSubplotSpec(1, 2 + int(legend), subplot_spec=subplot,
width_ratios=wratios[:2 + int(legend)],
wspace=0.0)
mycolors = ListedColormap(cm.get_cmap('Set1').colors[:3][::-1])
# Carpet plot
ax1 = plt.subplot(gs[1])
ax1.imshow(data, interpolation='nearest', aspect='auto', cmap='gray',
vmin=v[0], vmax=v[1])
ax1.grid(False)
ax1.set_yticks([])
ax1.set_yticklabels([])
# Set 10 frame markers in X axis
interval = max((int(data.shape[-1] + 1) //
10, int(data.shape[-1] + 1) // 5, 1))
xticks = list(range(0, data.shape[-1])[::interval])
ax1.set_xticks(xticks)
if notr:
ax1.set_xlabel('time (frame #)')
else:
ax1.set_xlabel('time (s)')
labels = tr * (np.array(xticks)) * t_dec
ax1.set_xticklabels(['%.02f' % t for t in labels.tolist()], fontsize=10)
# Remove and redefine spines
for side in ["top", "right"]:
ax1.spines[side].set_color('none')
ax1.spines[side].set_visible(False)
ax1.yaxis.set_ticks_position('left')
ax1.xaxis.set_ticks_position('bottom')
ax1.spines["bottom"].set_visible(False)
ax1.spines["left"].set_color('none')
ax1.spines["left"].set_visible(False)
if output_file is not None:
figure = plt.gcf()
figure.savefig(output_file, bbox_inches='tight')
plt.close(figure)
figure = None
return output_file
return [ax1], gs
def confoundplot(tseries, gs_ts, gs_dist=None, name=None,
units=None, tr=None, hide_x=True, color='b', nskip=0,
cutoff=None, ylims=None):
# Define TR and number of frames
notr = False
if tr is None:
notr = True
tr = 1.
ntsteps = len(tseries)
tseries = np.array(tseries)
# Define nested GridSpec
gs = mgs.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs_ts,
width_ratios=[1, 100], wspace=0.0)
ax_ts = plt.subplot(gs[1])
ax_ts.grid(False)
# Set 10 frame markers in X axis
interval = max((ntsteps // 10, ntsteps // 5, 1))
xticks = list(range(0, ntsteps)[::interval])
ax_ts.set_xticks(xticks)
if not hide_x:
if notr:
ax_ts.set_xlabel('time (frame #)')
else:
ax_ts.set_xlabel('time (s)')
labels = tr * np.array(xticks)
ax_ts.set_xticklabels(['%.02f' % t for t in labels.tolist()])
else:
ax_ts.set_xticklabels([])
if name is not None:
if units is not None:
name += ' [%s]' % units
ax_ts.annotate(
name, xy=(0.0, 0.7), xytext=(0, 0), xycoords='axes fraction',
textcoords='offset points', va='center', ha='left',
color=color, size=20,
bbox={'boxstyle': 'round', 'fc': 'w', 'ec': 'none',
'color': 'none', 'lw': 0, 'alpha': 0.8})
for side in ["top", "right"]:
ax_ts.spines[side].set_color('none')
ax_ts.spines[side].set_visible(False)
if not hide_x:
ax_ts.spines["bottom"].set_position(('outward', 20))
ax_ts.xaxis.set_ticks_position('bottom')
else:
ax_ts.spines["bottom"].set_color('none')
ax_ts.spines["bottom"].set_visible(False)
# ax_ts.spines["left"].set_position(('outward', 30))
ax_ts.spines["left"].set_color('none')
ax_ts.spines["left"].set_visible(False)
# ax_ts.yaxis.set_ticks_position('left')
ax_ts.set_yticks([])
ax_ts.set_yticklabels([])
nonnan = tseries[~np.isnan(tseries)]
if nonnan.size > 0:
# Calculate Y limits
valrange = (nonnan.max() - nonnan.min())
def_ylims = [nonnan.min() - 0.1 * valrange,
nonnan.max() + 0.1 * valrange]
if ylims is not None:
if ylims[0] is not None:
def_ylims[0] = min([def_ylims[0], ylims[0]])
if ylims[1] is not None:
def_ylims[1] = max([def_ylims[1], ylims[1]])
# Add space for plot title and mean/SD annotation
def_ylims[0] -= 0.1 * (def_ylims[1] - def_ylims[0])
ax_ts.set_ylim(def_ylims)
# Annotate stats
maxv = nonnan.max()
mean = nonnan.mean()
stdv = nonnan.std()
p95 = np.percentile(nonnan, 95.0)
else:
maxv = 0
mean = 0
stdv = 0
p95 = 0
stats_label = (r'max: {max:.3f}{units} $\bullet$ mean: {mean:.3f}{units} '
r'$\bullet$ $\sigma$: {sigma:.3f}').format(
max=maxv, mean=mean, units=units or '', sigma=stdv)
ax_ts.annotate(
stats_label, xy=(0.98, 0.7), xycoords='axes fraction',
xytext=(0, 0), textcoords='offset points',
va='center', ha='right', color=color, size=10,
bbox={'boxstyle': 'round', 'fc': 'w', 'ec': 'none', 'color': 'none',
'lw': 0, 'alpha': 0.8}
)
# Annotate percentile 95
ax_ts.plot((0, ntsteps - 1), [p95] * 2, linewidth=.1, color='lightgray')
ax_ts.annotate(
'%.2f' % p95, xy=(0, p95), xytext=(-1, 0),
textcoords='offset points', va='center', ha='right',
color='lightgray', size=3)
if cutoff is None:
cutoff = []
for i, thr in enumerate(cutoff):
ax_ts.plot((0, ntsteps - 1), [thr] * 2,
linewidth=.2, color='dimgray')
ax_ts.annotate(
'%.2f' % thr, xy=(0, thr), xytext=(-1, 0),
textcoords='offset points', va='center', ha='right',
color='dimgray', size=3)
ax_ts.plot(tseries, color=color, linewidth=1.5)
ax_ts.set_xlim((0, ntsteps - 1))
if gs_dist is not None:
ax_dist = plt.subplot(gs_dist)
sns.displot(tseries, vertical=True, ax=ax_dist)
ax_dist.set_xlabel('Timesteps')
ax_dist.set_ylim(ax_ts.get_ylim())
ax_dist.set_yticklabels([])
return [ax_ts, ax_dist], gs
return ax_ts, gs
def plot_svg(fdata,fd,dvars,filename,tr=1):
'''
plot carpetplot with fd and dvars
'''
fig = plt.figure(constrained_layout=False, figsize=(30, 15))
grid = mgs.GridSpec(3, 1, wspace=0.0, hspace=0.05,
height_ratios=[1] * (3 - 1) + [5])
confoundplot(fd, grid[0], tr=tr, color='b', name='FD')
confoundplot(dvars, grid[1], tr=tr, color='r', name='DVARS')
plot_carpet(func_data=fdata,subplot=grid[-1], tr=tr,)
fig.savefig(filename,bbox_inches="tight", pad_inches=None)
|
"""
A Vocabulary maps strings to integers, allowing for strings to be mapped to an
out-of-vocabulary token.
"""
import codecs
import gzip
import hashlib
import logging
import os
from collections import defaultdict
from enum import Enum
from pathlib import Path
from typing import Callable, Any, List, Dict, Union, Sequence, Set, Optional, Iterable
import numpy
import torch
from allennlp.common.file_utils import cached_path
from allennlp.common.tqdm import Tqdm
from allennlp.common.util import namespace_match
from allennlp.data import instance as adi # pylint: disable=unused-import
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
DEFAULT_NON_PADDED_NAMESPACES = ("*tags", "*labels")
DEFAULT_PADDING_TOKEN = "@@PADDING@@"
DEFAULT_OOV_TOKEN = "@@UNKNOWN@@"
NAMESPACE_PADDING_FILE = 'non_padded_namespaces.txt'
UNK_COUNT_FILE = 'unk_count_namespaces.txt'
TOKEN_INDEX_SEP = '<(-.-)>'
class MappingMode(Enum):
token2index = 1
index2token = 2
class _NamespaceDependentDefaultDict(defaultdict):
"""
This is a `defaultdict
<https://docs.python.org/2/library/collections.html#collections.defaultdict>`_ where the
default value is dependent on the key that is passed.
We use "namespaces" in the :class:`Vocabulary` object to keep track of several different
mappings from strings to integers, so that we have a consistent API for mapping words, tags,
labels, characters, or whatever else you want, into integers. The issue is that some of those
namespaces (words and characters) should have integers reserved for padding and
out-of-vocabulary tokens, while others (labels and tags) shouldn't. This class allows you to
specify filters on the namespace (the key used in the ``defaultdict``), and use different
default values depending on whether the namespace passes the filter.
To do filtering, we take a sequence of ``non_padded_namespaces``. This is a list or tuple of
strings that are either matched exactly against the keys, or treated as suffixes, if the
string starts with ``*``. In other words, if ``*tags`` is in ``non_padded_namespaces`` then
``passage_tags``, ``question_tags``, etc. (anything that ends with ``tags``) will have the
``non_padded`` default value.
Parameters
----------
non_padded_namespaces : ``Sequence[str]``
A list or tuple of strings describing which namespaces are not padded. If a namespace
(key) is missing from this dictionary, we will use :func:`namespace_match` to see whether
the namespace should be padded. If the given namespace matches any of the strings in this
list, we will use ``non_padded_function`` to initialize the value for that namespace, and
we will use ``padded_function`` otherwise.
padded_function : ``Callable[[], Any]``
A zero-argument function to call to initialize a value for a namespace that `should` be
padded.
non_padded_function : ``Callable[[], Any]``
A zero-argument function to call to initialize a value for a namespace that should `not` be
padded.
"""
def __init__(self,
non_padded_namespaces: Sequence[str],
padded_function: Callable[[], Any],
non_padded_function: Callable[[], Any],
padding_token: str = DEFAULT_PADDING_TOKEN,
oov_token: str = DEFAULT_OOV_TOKEN) -> None:
self._non_padded_namespaces = non_padded_namespaces
self.padding_token = padding_token
self.oov_token = oov_token.replace('@', '')
self._padded_function = padded_function
self._non_padded_function = non_padded_function
super(_NamespaceDependentDefaultDict, self).__init__()
def __missing__(self, key: str):
if any(namespace_match(pattern, key) for pattern in self._non_padded_namespaces):
value = self._non_padded_function()
else:
value = self._padded_function()
dict.__setitem__(self, key, value)
return value
def initialize_dictionary(self, namespace: str, unk_num: int, mode: MappingMode):
if mode == MappingMode.token2index:
if any(namespace_match(pattern, namespace) for pattern in self._non_padded_namespaces):
dict.__setitem__(self, namespace, {})
else:
init_namespace_dictionary = RandomHashDict(unk_num=unk_num, oov_token=self.oov_token)
init_namespace_dictionary.update({self.padding_token: 0})
init_namespace_dictionary.add_unk_tokens()
dict.__setitem__(self, namespace, init_namespace_dictionary)
elif mode == MappingMode.index2token:
if any(namespace_match(pattern, namespace) for pattern in self._non_padded_namespaces):
dict.__setitem__(self, namespace, {})
else:
init_namespace_dictionary = {0: self.padding_token}
for i in range(unk_num):
init_namespace_dictionary[len(init_namespace_dictionary)] = f"@@{self.oov_token}#{str(i)}@@"
dict.__setitem__(self, namespace, init_namespace_dictionary)
class RandomHashDict(dict):
def __init__(self, unk_num: int = 1, oov_token: str = DEFAULT_OOV_TOKEN, **kwargs):
super().__init__(**kwargs)
self.unk_num = unk_num
self.oov_token = oov_token.replace('@', '')
def add_unk_tokens(self):
for i in range(self.unk_num):
self.__setitem__(f"@@{self.oov_token}#{str(i)}@@", self.__len__())
def __getitem__(self, k):
if self.unk_num == 0 or k in super().keys():
return super().__getitem__(k)
else:
k = self.hash_string(k, self.unk_num)
return super().__getitem__(k)
def hash_string(self, input_str, unk_num):
hcode = int(hashlib.sha1(input_str.encode('utf-8')).hexdigest(), 16)
hcode %= unk_num
return f"@@{self.oov_token}#{str(hcode)}@@"
def __str__(self) -> str:
return super().__str__()
def _read_pretrained_words(embeddings_filename: str) -> Set[str]:
words = set()
with gzip.open(cached_path(embeddings_filename), 'rb') as embeddings_file:
for line in embeddings_file:
fields = line.decode('utf-8').strip().split(' ')
word = fields[0]
words.add(word)
return words
class ExVocabulary:
def __init__(self,
counter: Dict[str, Dict[str, int]] = None,
min_count: Dict[str, int] = None,
max_vocab_size: Union[int, Dict[str, int]] = None,
non_padded_namespaces: Sequence[str] = DEFAULT_NON_PADDED_NAMESPACES,
pretrained_files: Optional[Dict[str, str]] = None,
only_include_pretrained_words: bool = False,
unk_token_num: Dict[str, int] = None) -> None:
self._padding_token = DEFAULT_PADDING_TOKEN
self._oov_token = DEFAULT_OOV_TOKEN
if not isinstance(max_vocab_size, dict):
int_max_vocab_size = max_vocab_size
max_vocab_size = defaultdict(lambda: int_max_vocab_size) # type: ignore
self._non_padded_namespaces = non_padded_namespaces
self.unk_token_num = unk_token_num
self._initialize_dictionary(list(self.unk_token_num.keys()), non_padded_namespaces,
self._padding_token, self._oov_token)
min_count = min_count or {}
pretrained_files = pretrained_files or {}
if counter is not None:
for namespace in counter:
if namespace in pretrained_files:
pretrained_list = _read_pretrained_words(pretrained_files[namespace])
else:
pretrained_list = None
token_counts = list(counter[namespace].items())
token_counts.sort(key=lambda x: x[1], reverse=True)
max_vocab = max_vocab_size[namespace]
if max_vocab:
token_counts = token_counts[:max_vocab]
for token, count in token_counts:
if pretrained_list is not None:
if only_include_pretrained_words:
if token in pretrained_list and count >= min_count.get(namespace, 1):
self.add_token_to_namespace(token, namespace)
elif token in pretrained_list or count >= min_count.get(namespace, 1):
self.add_token_to_namespace(token, namespace)
elif count >= min_count.get(namespace, 1):
self.add_token_to_namespace(token, namespace)
def _initialize_dictionary(self, namespaces: List[str], non_padded_namespaces, padding_token, oov_token):
self._token_to_index = _NamespaceDependentDefaultDict(
non_padded_namespaces,
lambda: {padding_token: 0, oov_token: 1},
lambda: {},
padding_token,
oov_token
)
self._index_to_token = _NamespaceDependentDefaultDict(
non_padded_namespaces,
lambda: {0: padding_token, 1: oov_token},
lambda: {},
padding_token,
oov_token
)
if self.unk_token_num is None:
self.unk_token_num = defaultdict(lambda: 1)
for namespace in namespaces:
cur_unk_num = self.unk_token_num.get(namespace, 1)
self._token_to_index.initialize_dictionary(namespace, cur_unk_num, MappingMode.token2index)
self._index_to_token.initialize_dictionary(namespace, cur_unk_num, MappingMode.index2token)
def save_to_files(self, directory: str) -> None:
"""
Persist this Vocabulary to files so it can be reloaded later.
Each namespace corresponds to one file.
Parameters
----------
directory : ``str``
The directory where we save the serialized vocabulary.
"""
os.makedirs(directory, exist_ok=True)
if os.listdir(directory):
logging.warning("vocabulary serialization directory %s is not empty", directory)
with codecs.open(os.path.join(directory, NAMESPACE_PADDING_FILE), 'w', 'utf-8') as namespace_file:
for namespace_str in self._non_padded_namespaces:
print(namespace_str, file=namespace_file)
with codecs.open(os.path.join(directory, UNK_COUNT_FILE), 'w', 'utf-8') as unk_count_file:
for namespace_str, unk_count_value in self.unk_token_num.items():
print(namespace_str + '###' + str(unk_count_value), file=unk_count_file)
# assert len(self._token_to_index) == len(self._index_to_token)
for namespace, mapping in self._index_to_token.items():
if namespace in self._token_to_index:
assert len(self._token_to_index[namespace]) == len(self._index_to_token[namespace])
# Each namespace gets written to its own file, in index order.
with codecs.open(os.path.join(directory, namespace + '.txt'), 'w', 'utf-8') as token_file:
num_tokens = len(mapping)
for i in range(num_tokens):
if namespace in self._token_to_index:
assert mapping[i] == self._index_to_token[namespace][self._token_to_index[namespace][mapping[i]]]
print(mapping[i].replace('\n', '@@NEWLINE@@') + TOKEN_INDEX_SEP + str(i), file=token_file)
@classmethod
def from_files(cls, directory: str) -> 'ExVocabulary':
"""
Loads a ``Vocabulary`` that was serialized using ``save_to_files``.
Parameters
----------
directory : ``str``
The directory containing the serialized vocabulary.
"""
logger.info("Loading token dictionary from %s.", directory)
with codecs.open(os.path.join(directory, NAMESPACE_PADDING_FILE), 'r', 'utf-8') as namespace_file:
non_padded_namespaces = [namespace_str.strip() for namespace_str in namespace_file]
unk_token_num = dict()
with codecs.open(os.path.join(directory, UNK_COUNT_FILE), 'r', 'utf-8') as unk_count_file:
for unk_count in unk_count_file:
namespace_str, unk_count_value = unk_count.split('###')[0], int(unk_count.split('###')[1])
unk_token_num[namespace_str] = unk_count_value
vocab = ExVocabulary(non_padded_namespaces=non_padded_namespaces, unk_token_num=unk_token_num)
# Check every file in the directory.
for namespace_filename in os.listdir(directory):
if namespace_filename == NAMESPACE_PADDING_FILE:
continue
elif namespace_filename == UNK_COUNT_FILE:
continue
elif namespace_filename == 'weights':
continue # This is used for save weights
namespace = namespace_filename.replace('.txt', '')
filename = os.path.join(directory, namespace_filename)
vocab.set_from_file(filename, namespace=namespace)
return vocab
def set_from_file(self,
filename: str,
namespace: str = "tokens"):
"""
If you already have a vocabulary file for a trained model somewhere, and you really want to
use that vocabulary file instead of just setting the vocabulary from a dataset, for
whatever reason, you can do that with this method. You must specify the namespace to use,
and we assume that you want to use padding and OOV tokens for this.
Parameters
----------
filename : ``str``
The file containing the vocabulary to load. It should be formatted as one token per
line, with nothing else in the line. The index we assign to the token is the line
number in the file (1-indexed if ``is_padded``, 0-indexed otherwise). Note that this
file should contain the OOV token string!
is_padded : ``bool``, optional (default=True)
Is this vocabulary padded? For token / word / character vocabularies, this should be
``True``; while for tag or label vocabularies, this should typically be ``False``. If
``True``, we add a padding token with index 0, and we enforce that the ``oov_token`` is
present in the file.
oov_token : ``str``, optional (default=DEFAULT_OOV_TOKEN)
What token does this vocabulary use to represent out-of-vocabulary characters? This
must show up as a line in the vocabulary file. When we find it, we replace
``oov_token`` with ``self._oov_token``, because we only use one OOV token across
namespaces.
namespace : ``str``, optional (default="tokens")
What namespace should we overwrite with this vocab file?
"""
# self._token_to_index[namespace] = {}
# self._index_to_token[namespace] = {}
with codecs.open(filename, 'r', 'utf-8') as input_file:
lines = input_file.read().split('\n')
# Be flexible about having final newline or not
if lines and lines[-1] == '':
lines = lines[:-1]
for i, line in enumerate(lines):
row = line.split(TOKEN_INDEX_SEP)
token, index = row[0], int(row[1])
token = token.replace('@@NEWLINE@@', '\n')
if token not in self._token_to_index[namespace]:
self._token_to_index[namespace][token] = index
self._index_to_token[namespace][index] = token
else:
assert self._token_to_index[namespace][token] == index
assert self._index_to_token[namespace][index] == token
assert len(self._token_to_index) == len(self._index_to_token)
@classmethod
def from_instances(cls,
instances: Iterable['adi.Instance'],
min_count: Dict[str, int] = None,
max_vocab_size: Union[int, Dict[str, int]] = None,
non_padded_namespaces: Sequence[str] = DEFAULT_NON_PADDED_NAMESPACES,
pretrained_files: Optional[Dict[str, str]] = None,
only_include_pretrained_words: bool = False,
unk_token_num: Dict[str, int] = None,
exclude_namespaces=None,
include_namespaces=None) -> 'ExVocabulary':
"""
Constructs a vocabulary given a collection of `Instances` and some parameters.
We count all of the vocabulary items in the instances, then pass those counts
and the other parameters, to :func:`__init__`. See that method for a description
of what the other parameters do.
"""
logger.info("Fitting token dictionary from dataset.")
namespace_token_counts: Dict[str, Dict[str, int]] = defaultdict(lambda: defaultdict(int))
for instance in Tqdm.tqdm(instances):
instance.count_vocab_items(namespace_token_counts)
if exclude_namespaces is not None:
for namespace in namespace_token_counts:
if namespace in exclude_namespaces:
namespace_token_counts[namespace] = dict()
if include_namespaces is not None:
# If include namespaces is not None, we only include those namespaces.
if namespace not in include_namespaces:
namespace_token_counts[namespace] = dict()
print("Start counting for namespaces:")
for namespace, counter in namespace_token_counts.items():
if len(counter) != 0:
print(namespace)
return ExVocabulary(counter=namespace_token_counts,
min_count=min_count,
max_vocab_size=max_vocab_size,
non_padded_namespaces=non_padded_namespaces,
pretrained_files=pretrained_files,
only_include_pretrained_words=only_include_pretrained_words,
unk_token_num=unk_token_num)
def is_padded(self, namespace: str) -> bool:
"""
Returns whether or not there are padding and OOV tokens added to the given namepsace.
"""
return self._index_to_token[namespace][0] == self._padding_token
def add_token_to_namespace(self, token: str, namespace: str = 'tokens') -> int:
"""
Adds ``token`` to the index, if it is not already present. Either way, we return the index of
the token.
"""
if not isinstance(token, str):
raise ValueError("Vocabulary tokens must be strings, or saving and loading will break."
" Got %s (with type %s)" % (repr(token), type(token)))
if token not in self._token_to_index[namespace]:
index = len(self._token_to_index[namespace])
self._token_to_index[namespace][token] = index
self._index_to_token[namespace][index] = token
return index
else:
return self._token_to_index[namespace][token]
def change_token_with_index_to_namespace(self, token: str, index: int, namespace: str = 'tokens') -> int:
"""
Adds ``token`` to the index, if it is not already present. Either way, we return the index of
the token.
"""
if not isinstance(token, str):
raise ValueError("Vocabulary tokens must be strings, or saving and loading will break."
" Got %s (with type %s)" % (repr(token), type(token)))
if index in self._index_to_token[namespace] or index >= 0:
if self._token_to_index[namespace][token] == index and \
self._index_to_token[namespace][index] == token:
return 0 # Already changed
raise ValueError(f"Index: {index} already exists or is invalid in the {namespace}, "
f"can not set special Token: {token}")
if token in self._token_to_index[namespace]:
self._index_to_token[namespace].pop(self._token_to_index[namespace][token])
self._token_to_index[namespace][token] = index
self._index_to_token[namespace][index] = token
def get_index_to_token_vocabulary(self, namespace: str = 'tokens') -> Dict[int, str]:
return self._index_to_token[namespace]
def get_token_to_index_vocabulary(self, namespace: str = 'tokens') -> Dict[str, int]:
return self._token_to_index[namespace]
def get_token_index(self, token: str, namespace: str = 'tokens') -> int:
if token in self._token_to_index[namespace]:
return self._token_to_index[namespace][token]
elif namespace in self.unk_token_num.keys(): # If we specify the unk token.
return self._token_to_index[namespace][token]
else:
try:
return self._token_to_index[namespace][self._oov_token]
except KeyError:
logger.error('Namespace: %s', namespace)
logger.error('Token: %s', token)
raise
def get_token_from_index(self, index: int, namespace: str = 'tokens') -> str:
return self._index_to_token[namespace][index]
def get_vocab_size(self, namespace: str = 'tokens') -> int:
return len(self._token_to_index[namespace])
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.__dict__ == other.__dict__
return False
def __str__(self) -> str:
base_string = f"Vocabulary with namespaces:\n"
non_padded_namespaces = f"\tNon Padded Namespaces: {self._non_padded_namespaces}\n"
namespaces = [f"\tNamespace: {name}, Size: {self.get_vocab_size(name)} \n"
for name in self._index_to_token]
return " ".join([base_string, non_padded_namespaces] + namespaces)
def read_normal_embedding_file(embeddings_filename: str, # pylint: disable=invalid-name
embedding_dim: int,
vocab: ExVocabulary,
namespace: str = "tokens") -> torch.FloatTensor:
words_to_keep = set(vocab.get_index_to_token_vocabulary(namespace).values())
vocab_size = vocab.get_vocab_size(namespace)
embeddings = {}
# First we read the embeddings from the file, only keeping vectors for the words we need.
logger.info("Reading embeddings from file")
with open(embeddings_filename, 'rb') as embeddings_file:
for line in embeddings_file:
fields = line.decode('utf-8').rstrip().split(' ')
if len(fields) - 1 != embedding_dim:
# Sometimes there are funny unicode parsing problems that lead to different
# fields lengths (e.g., a word with a unicode space character that splits
# into more than one column). We skip those lines. Note that if you have
# some kind of long header, this could result in all of your lines getting
# skipped. It's hard to check for that here; you just have to look in the
# embedding_misses_file and at the model summary to make sure things look
# like they are supposed to.
logger.warning("Found line with wrong number of dimensions (expected %d, was %d): %s",
embedding_dim, len(fields) - 1, line)
continue
word = fields[0]
if word in words_to_keep:
vector = numpy.asarray(fields[1:], dtype='float32')
embeddings[word] = vector
if not embeddings:
raise ValueError("No embeddings of correct dimension found; you probably "
"misspecified your embedding_dim parameter, or didn't "
"pre-populate your Vocabulary")
all_embeddings = numpy.asarray(list(embeddings.values()))
embeddings_mean = float(numpy.mean(all_embeddings))
embeddings_std = float(numpy.std(all_embeddings))
# Now we initialize the weight matrix for an embedding layer, starting with random vectors,
# then filling in the word vectors we just read.
logger.info("Initializing pre-trained embedding layer")
embedding_matrix = torch.FloatTensor(vocab_size, embedding_dim).normal_(embeddings_mean,
embeddings_std)
for i in Tqdm.tqdm(range(0, vocab_size)):
word = vocab.get_token_from_index(i, namespace)
# If we don't have a pre-trained vector for this word, we'll just leave this row alone,
# so the word has a random initialization.
if word in embeddings:
embedding_matrix[i] = torch.FloatTensor(embeddings[word])
else:
logger.debug("Word %s was not found in the embedding file. Initialising randomly.", word)
# The weight matrix is initialized, so we construct and return the actual Embedding.
return embedding_matrix
def build_vocab_embeddings(vocab: ExVocabulary, external_embd_path, embd_dim, namespaces=None, saved_path=None):
if not isinstance(external_embd_path, list):
external_embd_path = [external_embd_path]
weight_list = []
if namespaces is None:
namespaces = ['tokens'] * len(external_embd_path)
elif not isinstance(namespaces, list):
namespaces = [namespaces] * len(external_embd_path)
else:
assert len(namespaces) == len(external_embd_path)
for external_embd_file, namespace in zip(external_embd_path, namespaces):
weight = read_normal_embedding_file(external_embd_file,
embd_dim, vocab, namespace=namespace)
weight_list.append(weight)
if saved_path is not None:
vocab.save_to_files(saved_path)
Path(saved_path / "weights").mkdir(parents=True)
assert len(weight_list) == len(external_embd_path)
for weight, external_embd_file in zip(weight_list, external_embd_path):
embd_name = Path(external_embd_file).stem
torch.save(weight, saved_path / "weights" / embd_name)
def load_vocab_embeddings(saved_path):
vocab = ExVocabulary.from_files(saved_path)
weight_dict = {}
for file in Path(saved_path / "weights").iterdir():
weight_dict[str(file.name)] = torch.load(file)
# assert vocab.get_vocab_size('tokens') == int(weight_dict[str(file.name)].size(0))
return vocab, weight_dict
if __name__ == '__main__':
d = RandomHashDict(unk_num=10, oov_token=DEFAULT_OOV_TOKEN)
d.update({'@@PADDING@@': 0})
d.add_unk_tokens()
print(d)
print(d['hello'])
print(d['yes'])
print(d['ho'])
print(d['no'])
print(d['hello1'])
print(d['yes2'])
print(d['ho3'])
print(d['no4'])
|
NO_PENALTY = 'no_penalty'
HARD_CODED_PENALTY = 'hard_coded_penalty'
UNCERTAINTY_PENALTY = 'uncertainty_penalty'
POLICY_ENTROPY_PENALTY = 'policy_entropy_penalty' |
from module import Module
CONFIGS = [
'.config/kitty'
]
PROGRAMS = {
'kitty': 'brew install --cask kitty',
}
DEPENDENCIES = [
]
class Kitty(Module):
def __init__(self):
super().__init__(
'kitty',
'Kitty terminal',
CONFIGS,
PROGRAMS,
DEPENDENCIES,
)
|
# Relational Operators
# Relational operators are used to evaluate conditions inside if statements. Some examples of relational operators are:
# = = -> equals
# >= -> greater than/equal to
# <=, etc.
= = -> equals
integer = int(input("Enter your integers"))
if integer==18:
print("Equal")
else:
print("Not Equal")
# >= -> greater than/equal to
a = int(input("enter your number "))
if a>=20:
print("true")
else:
print("false")
# <=, Less than or equal to
b = int(input("Enter your number "))
if b<20:
print("false")
else:
print("true") |
# REPEATED MATRIX VALUES
# O(M * N) time and O(N) space, where M -> number of rows and
# N -> number of columns in input matrix
def repeatedMatrixValues(matrix):
# Write your code here.
count, counted = {}, {}
for num in matrix[0]:
count[num] = 0
counted[num] = False
m, n = len(matrix), len(matrix[0])
for i in range(m):
# setting counted of every element as False so that it is
# counted for the first time
for num in counted:
counted[num] = False
for j in range(n):
num = matrix[i][j]
if num in count and counted[num] == False:
count[num] += 1
# setting counted to True so that it is not counted
# more than once in a row or column
counted[num] = True
for j in range(n):
# setting counted of every element as False so that it is
# counted for the first time
for num in counted:
counted[num] = False
for i in range(m):
num = matrix[i][j]
if num in count and counted[num] == False:
count[num] += 1
# setting counted to True so that it is not counted
# more than once in a row or column
counted[num] = True
finalNums = []
for num in count:
if count[num] == m + n:
finalNums.append(num)
return finalNums |
import pyJvsip as pv
N=6
A = pv.create('cvview_d',N).randn(7)
B = A.empty.fill(5.0)
C = A.empty.fill(0.0)
print('A = '+A.mstring('%+.2f'))
print('B = '+B.mstring('%+.2f'))
pv.add(A,B,C)
print('C = A+B')
print('C = '+C.mstring('%+.2f'))
""" OUTPUT
A = [+0.16+0.50i -0.21-0.75i -0.56-0.09i \
+1.15+0.45i +0.10+0.43i +0.63-1.05i]
B = [+5.00+0.00i +5.00+0.00i +5.00+0.00i \
+5.00+0.00i +5.00+0.00i +5.00+0.00i]
C = A+B
C = [+5.16+0.50i +4.79-0.75i +4.44-0.09i \
+6.15+0.45i +5.10+0.43i +5.63-1.05i]
"""
|
"""Z terminal interface
Like ATI Session Manager.
Command usage:
zti [-h] [--nolog] [--noztirc | --rcfile rcfile] [host]
positional arguments:
host hostname[:port] to connect/go to
optional arguments:
-h, --help show this help message and exit
--nolog Do not set LOGDEST to zti.log
--noztirc Do not SOURCE .ztirc in home directory
--rcfile rcfile Filename to run using SOURCE
Create a command plugin by creating a "zti.commands" entry
point through setup.py that takes a single argument of a
command line string.
Environment variables used:
COLORTERM (see _termlib.py)
ESCDELAY
SESSION_PS_SIZE (see tnz.py)
TERM_PROGRAM (see _termlib.py)
TNZ_COLORS (see tnz.py)
TNZ_LOGGING (see tnz.py)
ZTI_AUTOSIZE
ZTI_SECLEVEL (see tnz.py)
ZTI_TITLE
_BPX_TERMPATH (see _termlib.py)
Copyright 2021 IBM Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
"""
if __name__ == "__main__":
# We want to support starting by 'python -m tnz.zti', but that can
# generally be problematic because the zti module is imported as
# __main__. Subsequent imports of tnz.zti will import as tnz.zti -
# treating it as a different module. That causes a __main__.Zti and
# a tnz.zti.Zti class. Since there is the intention for there to be
# a single instance of Zti static data, it is undesirable to have
# both instance of Zti. Route the the main in tnz.zti (not __main__)
# to use the one desired instance of the Zti class. Consider
# eventually dropping support for 'python -m tnz.zti' so that we can
# avoid this.
from tnz import zti
raise SystemExit(zti.main())
import atexit
import cmd
import ctypes
import logging
import os
import platform
import signal
import socket
import sys
import tempfile
import threading
import time
import traceback
from . import _sigx as sigx
from ._termlib import Term as curses
from ._util import session_ps_14bit
from . import ati
from . import rexx
from . import tnz
from . import __version__
__author__ = "Neil Johnson"
class Zti(cmd.Cmd):
"""
The Zti class provides Z terminal interface (3270) function to
users of an ascii character terminal. For users of ATI, this
class performs the function of the ATI session manager and error
handler.
"""
def __del__(self):
"""Zti destructor
"""
self.__bg_wait_end()
# do not switch to shell mode here
# doing so may log and logging may encounter errors
# if this is being done during shutdown
def __init__(self, stdin=None, stdout=None):
"""A Zti instance is a line-oriented and full-screen ascii
chraracter terminal interface to 3270 telnet sessions.
Tab is used for the readline completion key.
The optional arguments stdin and stdout specify the input and
output file objects that the Cmd instance or subclass
instance will use for input and output. If not specified,
they will default to sys.stdin and sys.stdout
"""
super().__init__(stdin=stdin, stdout=stdout)
if stdin:
self.use_rawinput = False
self.__stdin = stdin
else:
self.__stdin = sys.stdin
if stdout:
self.__stdout = stdout
else:
self.__stdout = sys.stdout
self.autosize = False
if os.getenv("ZTI_AUTOSIZE"):
self.autosize = True
self.pend_intro = None
self.__dirty_ranges = []
self.single_session = False
self.colors = 768
self.cv2attr = {}
self.stdscr = self._stdscr
self.rewrite = True
self.rewrite_cursor = True
self.rewrite_keylock = True
self.rewrite_status = True
self.twin_beg = None
self.twin_loc = None
self.twin_end = None
self.__lock = threading.Lock()
self.__bg = False
self.__thread = None
self.__tty_mode = 0 # 0=shell, 1=prog
self.__has_color = None
self.__prog_curs_vis = 0 # cursor invisible in prog mode
self.__cur_curs_vis = 1 # current cursor state
self.__shell_mousemask = None
self.ddmrecv = False # allow host-initiated ind$file get
self.ddmsend = False # allow host-initiated ind$file put
self.__plugin_kwargs = {}
self.__plugin_goto = ""
self.__tb_count = 0
self.__in_wait = False
self.__in_script = False
self.__wait_rc = None
self.__tracing = False
self.registered_atexit = False
self.downloads = []
self.__downcnt = 0
self.downloadaction = False
self.__sessel_y = None
self.__sessel_x = 0
self.__sessel = []
self.__stdin_selected = False
if _osname == "Windows":
self.__stdin_selectr = None
self.__sigwinch_selected = False
else:
self.__stdin_selectr = self.__stdin
self.__sigwinch_selected = True
if self._zti is None:
Zti._zti = self
self.__install_plugins()
# Methods
def atexit(self):
while self.downloads:
download = self.downloads.pop()
download.remove()
def cmdloop(self, intro=None):
"""Override cmd.Cmd.cmdloop to handle KeyboardInterrupt.
"""
self.__shell_mode()
if self.cmdqueue:
if intro is None:
intro = self.intro
if intro is None:
intro = self.pend_intro
if intro is not None:
self.pend_intro = intro
intro = ""
while True:
try:
text = intro
intro = ""
super().cmdloop(intro=text)
return
except KeyboardInterrupt:
self.__shell_mode()
print("\nKeyboardInterrupt")
except BaseException:
self.postloop()
raise
def complete_goto(self, text, line, begidx, endidx):
textu = text.upper()
textl = len(text)
line_lower = line.islower()
line_upper = line.isupper()
slist = ati.ati.sessions.split()
completions = []
for session in slist:
if session.startswith(textu):
if line_lower:
completion = text + session[textl:].lower()
elif line_upper:
completion = text + session[textl:].upper()
else:
completion = text + session[textl:]
completions.append(completion)
return completions
def complete_say(self, text, line, begidx, endidx):
textu = text.upper()
textl = len(text)
line_lower = line.islower()
line_upper = line.isupper()
klist = ati.ati.keys()
completions = []
for akey in klist:
if akey.startswith(textu):
if line_lower:
completion = text + akey[textl:].lower()
elif line_upper:
completion = text + akey[textl:].upper()
else:
completion = text + akey[textl:]
completions.append(completion)
return completions
def default(self, line):
"""Override cmd.Cmd.default
"""
value = ati.value(line)
print(" >>> "+repr(value))
def do_EOF(self, arg):
# Not intended to be used as a command. Performs action when
# /dev/null is redirected to standard input and when Ctrl+D
# is pressed. This is intentionally not a docstring so that
# EOF will not appear as a 'documented command'.
return True # see postcmd
def do_abort(self, arg):
"""Abort the running program.
This is modelled after the ATI Abort error recovery
selection. ABORT aborts the error handler. ATI exits
immediately with a return code of 127. Note that
ATI would have returned 999.
"""
if not self.__in_script:
print("Not in a program. Use EXIT to exit.")
return
raise _ZtiAbort(127)
def do_autosize(self, arg):
# ready for testing?
if getattr(curses, "prog_maxyx", curses) is curses:
print(">>> AUTOSIZE not available.")
return
autosize = not self.autosize
self.autosize = autosize
if not autosize:
curses.prog_maxyx = None
print(f"autosize = {self.autosize}")
def do_disc(self, arg):
# The intent would be to model this after the ATI DISC
# interactive trace command. DISC disconnects the parent ID,
# leaving ATI running disconnected, and leaving the session
# logged on and connected to the parent. This is
# intentionally not a docstring so that DISC will not appear
# as a 'documented command'.
print(">>> DISC command not supported.")
def do_discard(self, arg):
"""Discard a downloaded file.
Usage:
discard
discard all
discard number
With no parameters, the oldest download is discarded.
When 'all' is specified, all downloads are discarded.
When a number is specified, the indexed download is discarded.
"""
if len(self.downloads) <= 0:
print(">>> No downloads to discard")
self.__bg_wait_end() # needed
if arg.lower() == "all":
while self.downloads:
download = self.downloads.pop()
download.remove()
else:
if not arg:
arg = "0"
try:
i = int(arg)
except ValueError:
print(">>> No download "+repr(arg))
return
try:
download = self.downloads.pop(i)
except IndexError:
print(">>> No download "+repr(i))
return
download.remove()
def do_display(self, arg):
# Debug display for current session.
# This is intentionally not a docstring so that DISPLAY will not
# appear as a 'documented command'.
self.__bg_wait_end()
tns = ati.ati.get_tnz()
if tns:
print(tns.scrstr())
def do_downloads(self, arg):
"""List downloads.
"""
self.__bg_wait_end()
self.__downcnt = len(self.downloads)
for i in range(0, self.__downcnt):
download = self.downloads[i]
print(str(i)+" "+repr(download))
os.environ["d"+str(i)] = download.file.name
def do_drop(self, arg):
"""Drop and ATI-like variable.
Usage:
drop NAME
This is modelled after the DROP function of the ATI GOTO EXEC
that is often used with the ATI Session Manager. DROP
disconnects a session.
This is also modelled after the ATI DROP statement.
"""
self.__bg_wait_end()
if self.single_session and "SESSION" in arg.upper().split():
self.single_session = False
ati.drop(arg)
def do_exit(self, arg):
"""Exit to the system.
This is modelled after the ATI EXIT command/statement. EXIT
causes ATI processing to be halted.
1 - EXIT causes control (and any return code) to be passed to
the next ATI program specified on the execution line. If
no other programs follow, the return code is returned to
the caller of ATI.
2 - If an included programs exits, control is not returned to
the calling program, but to the next program listed on
the execution line.
3 - Task file execution terminates when either an EXIT is
encountered or after the last statement in the file is
executed
4 - A return code is optional; if not specified, it defaults
to 0.
"""
return True # see postcmd
def do_fields(self, arg):
# Debug fields for current session.
# This is intentionally not a docstring so that FIELDS will not
# appear as a 'documented command'.
self.__bg_wait_end()
tns = ati.ati.get_tnz()
if not tns:
return
saddr = 0
eaddr = 0
first = True
for faddr, _ in tns.fields():
if first:
first = False
eaddr = faddr
else:
text = tns.scrstr(saddr, faddr)
print(repr((saddr, faddr)), repr(text))
saddr = faddr
text = tns.scrstr(saddr, eaddr)
print(repr((saddr, eaddr)), repr(text))
def do_goto(self, arg):
"""Command to go to a session in full-screen.
This is modelled after the ATI GOTO interactive trace
command. GOTO suspends execution and transfers control to the
indicated session. If no session is supplied, the active
session is used. If the selected session does not exist, a
warning is issued. ATI execution resumes upon returning
(through use of hot-key, disconnection, logoff, etc.). GOTO
? will show all sessions under the parent ID.
This is also modelled after the ATI GOTO EXEC that is often
used with the ATI Session Manager:
<TODO need syntax diagram>
session
The name of a session to which control is transferred.
userid
A user ID on the same node as the current node. The
session name is simply userid.
userid AT node
Information by which the session name is hashed. The
session name is made by concatenating the last five
characters of userid, an = sign, and the sixth and
seventh characters of node. For example, MICHAELG AT
STLVM3 becomes HAELG=3, and RONB AT STLVM1 becomes
RONB=1.
nickname
A nickname from a NAMES file on the A-disk where a user
ID and node are stored that specify the session to which
control is to be transferred.
?
Lists the available current session names.
DROP
Disconnects a session. See DROP command.
INIT
Initiated a session (brings up a new one).
QUIET
Suppresses informational messages.
RENAME
Changes the name of the current session to the session
name specified. See RENAME command.
This is NOT modelled after the ATI GOTO statement.
"""
self.__bg_wait_end()
self.__downcnt = len(self.downloads)
sessions = ati.ati.sessions
session = ati.ati.session
oldsize = ati.ati["SESSION_PS_SIZE"]
newsize = oldsize
if arg == "" and sessions == "":
ati.ati.session = "A"
session = "A"
elif arg == "":
ati.ati.session = session
elif arg == "?":
print(sessions)
return
elif rexx.words(arg) == 1:
port = None
hostname = arg
sesname = arg
if ":" in arg:
parts = arg.split(":", 1)
hostname = parts[0]
sesname = parts[0]
port = parts[1]
if "." not in hostname:
fqdn = socket.getfqdn()
fqdn = fqdn.split(".")
if len(fqdn) > 1:
hostname += "."+".".join(fqdn[1:])
if "." in hostname:
parts = hostname.split(".", 1)
sesname = parts[0]
if sesname != arg:
basename = sesname
sesname = sesname.upper()
i = 1
while " "+sesname+" " in " "+sessions+" ":
sesname = basename+"~"+str(i)
i += 1
if port is not None:
oldport = ati.ati["SESSION_PORT"]
if oldport != port:
ati.set("SESSION_PORT", port)
session_host = ati.ati["SESSION_HOST"]
if session_host is None or sesname != arg:
ati.set("SESSION_HOST", hostname)
if not oldsize:
if os.environ.get("SESSION_PS_SIZE", "") == "MAX":
(columns, lines) = os.get_terminal_size()
lines -= 4
columns = min(columns - 17, 160)
lines, columns = session_ps_14bit(lines, columns)
newsize = f"{lines}x{columns}"
ati.set("SESSION_PS_SIZE", newsize)
elif os.environ.get("SESSION_PS_SIZE", "") == "MAX255":
(columns, lines) = os.get_terminal_size()
lines -= 4
columns = max(columns - 17, 255)
lines, columns = session_ps_14bit(lines, columns)
newsize = f"{lines}x{columns}"
ati.set("SESSION_PS_SIZE", newsize)
elif os.environ.get("SESSION_PS_SIZE", "") == "FULL":
(columns, lines) = os.get_terminal_size()
columns = min(columns, 160) # 160 for ispf
lines, columns = session_ps_14bit(lines, columns)
newsize = f"{lines}x{columns}"
ati.set("SESSION_PS_SIZE", newsize)
elif os.environ.get("SESSION_PS_SIZE", "") == "FULL255":
(columns, lines) = os.get_terminal_size()
columns = min(columns, 255)
lines, columns = session_ps_14bit(lines, columns)
newsize = f"{lines}x{columns}"
ati.set("SESSION_PS_SIZE", newsize)
ati.ati.session = sesname
if self.single_session and ati.ati.rc == 0:
if ati.ati.session != ati.ati.sessions:
self.single_session = False
if session_host is None:
ati.drop("SESSION_HOST")
elif sesname != arg:
ati.set("SESSION_HOST", session_host)
if port is not None:
if oldport != port:
if oldport is None:
ati.drop("SESSION_PORT")
else:
ati.set("SESSION_PORT", oldport)
else:
port = None
hostname = rexx.subword(arg, 2)
sesname = rexx.word(arg, 1)
if ":" in hostname:
parts = hostname.split(":", 1)
hostname = parts[0]
port = parts[1]
if "." not in hostname:
fqdn = socket.getfqdn()
fqdn = fqdn.split(".")
if len(fqdn) > 1:
hostname += "."+".".join(fqdn[1:])
if port is not None:
oldport = ati.ati["SESSION_PORT"]
if oldport != port:
ati.set("SESSION_PORT", port)
oldhost = ati.ati["SESSION_HOST"]
if oldhost != hostname:
ati.set("SESSION_HOST", hostname)
if not oldsize:
if os.environ.get("SESSION_PS_SIZE", "") == "MAX":
(columns, lines) = os.get_terminal_size()
lines -= 4
columns = min(columns - 17, 160)
lines, columns = session_ps_14bit(lines, columns)
newsize = f"{lines}x{columns}"
ati.set("SESSION_PS_SIZE", newsize)
elif os.environ.get("SESSION_PS_SIZE", "") == "MAX255":
(columns, lines) = os.get_terminal_size()
lines -= 4
columns = max(columns - 17, 255)
lines, columns = session_ps_14bit(lines, columns)
newsize = f"{lines}x{columns}"
ati.set("SESSION_PS_SIZE", newsize)
elif os.environ.get("SESSION_PS_SIZE", "") == "FULL":
(columns, lines) = os.get_terminal_size()
columns = min(columns, 160) # 160 for ispf
lines, columns = session_ps_14bit(lines, columns)
newsize = f"{lines}x{columns}"
ati.set("SESSION_PS_SIZE", newsize)
elif os.environ.get("SESSION_PS_SIZE", "") == "FULL255":
(columns, lines) = os.get_terminal_size()
columns = min(columns, 255)
lines, columns = session_ps_14bit(lines, columns)
newsize = f"{lines}x{columns}"
ati.set("SESSION_PS_SIZE", newsize)
ati.ati.session = sesname
if self.single_session and ati.ati.rc == 0:
if ati.ati.session != ati.ati.sessions:
self.single_session = False
if oldhost != hostname:
if oldhost is None:
ati.drop("SESSION_HOST")
else:
ati.set("SESSION_HOST", oldhost)
if port is not None:
if oldport != port:
if oldport is None:
ati.drop("SESSION_PORT")
else:
ati.set("SESSION_PORT", oldport)
if oldsize != newsize:
if oldsize is None:
ati.drop("SESSION_PS_SIZE")
else:
ati.set("SESSION_PS_SIZE", oldsize)
if ati.ati.seslost:
self.__session_check()
return
if len(self.cmdqueue) > 0: # if more from SOURCE command
return
try:
self.ddmrecv = True # allow host-initiated ind$file get
self.ddmsend = True # allow host-initiated ind$file put
self.__prog_mode()
try:
r2d2rv = self.__r2d2(self.stdscr, _WAIT_GOTO, -1)
except tnz.TnzTerminalError:
self.print_stack(exc=True)
print("Maybe set SESSION_PS_SIZE?")
r2d2rv = None
except tnz.TnzError:
self.print_stack(exc=True)
r2d2rv = None
finally:
self.ddmrecv = False # ind$file get unexpected
self.ddmsend = False # ind$file put unexpected
if r2d2rv == 10: # ddmdata
self.shell_mode()
tns = ati.ati.get_tnz()
ddmdata = tns.ddmdata
ddmdict = dict(tns.ddmdict)
tns.ddmdata = None
tns.ddmdict.clear()
plugin_name = ddmdict.get("plugin")
if plugin_name:
del ddmdict["plugin"]
self.__plugin_kwargs = ddmdict
self.cmdqueue.append(f"{plugin_name}\n")
downi = len(self.downloads) - 1
self.cmdqueue.append(f"discard {downi}\n")
self.__plugin_goto = ati.ati.session
else:
print(ddmdata)
return
if r2d2rv == 11: # downloadaction
actiontaken = False
for download in self.downloads:
if download.actiontaken:
continue
if download.downloadaction:
actiontaken = True
download.actiontaken = True
self.__shell_mode()
cmd = download.downloadaction
os.system(cmd)
if actiontaken:
self.cmdqueue += ["goto "+ati.ati.session+"\n"]
self.__session_check()
def do_ha(self, arg):
# The intent would be to model this after the ATI HA
# interactive trace command. HA terminates ATI execution.
# This is intentionally not a docstring so that HA will not
# appear as a 'documented command'.
print(">>> HA command not supported. Consider EXIT.")
def do_host(self, arg):
# The intent would be to model this after the ATI HOST
# interactive trace command. HOST executes a CMS or CP
# command. The command is passed to CMS for evaluation.
# This is intentionally not a docstring so that HA will not
# appear as a 'documented command'. The use of the word
# HOST is quite overused and has become ambiguous.
print(">>> HOST command not supported. Consider SHELL.")
def do_lines(self, arg):
self.__bg_wait_end()
tns = ati.ati.get_tnz()
if not tns:
return
lines = tns.readlines
if lines is None:
tns.start_readlines()
lines = tns.readlines
tns.readlines = []
if lines:
print(*lines, sep="\n")
def do_receive(self, arg):
"""Received a downloaded file.
"""
if not arg:
print(">>> Destination file is a required argument")
return
if len(self.downloads) <= 0:
print(">>> No downloads to receive")
self.__bg_wait_end() # needed
download = self.downloads[0]
name = download.file.name
dest = arg
if rexx.words(arg) > 1:
if rexx.word(arg, 1).lower() == "ascii":
dest = rexx.subword(arg, 2)
with open(name, "r+b") as file:
bstr = file.read()
if bstr.endswith(b'\x1a'):
bstr = bstr[:-1]
bstr = bstr.replace(b'\r\n', b'\n')
cstr = bstr.decode('iso8859-1') # ?
with open(dest, "w", encoding="utf-8") as file2:
file2.write(cstr)
dest = None
if dest:
import shutil
try:
shutil.copyfile(name, dest)
except Exception:
self.print_stack(exc=True)
self.downloads.pop(0)
download.remove()
def do_rename(self, arg):
"""Rename the current session.
This is modelled after the RENAME function of the ATI GOTO
EXEC that is often used with the ATI Session Manager. RENAME
changes the name of the current session to the session name
specified.
"""
if not arg:
print("new session name required")
return
self.__bg_wait_end()
ati.rename(arg)
def do_return(self, arg):
"""Return to the program.
This is modelled after the ATI Continue Executing error
recovery selection. RETURN lets the ATI program continue
executing at the line following the WAIT statement that timed
out.
This return to script execution function is also performed by
ATI when processing an unrecognized command during
interactive tracing.
"""
if not self.__in_script:
print("Not in a program. Use EXIT to exit.")
return
return -1 # or True?
def do_say(self, arg):
"""Print the value of an ATI-like variable.
This should be modelled after the ATI SAY interactive trace
command. SAY displays the value of a specified identifier,
using the following format:
Variable identifier = 'xxxxx'
When label identifier IS AN ACTIVE WHEN BLOCK PRIORITY=pri
When label identifier IS AN INACTIVE WHEN BLOCK
Statement label identifier IS A STATEMENT LABEL
Undefined identifier IS A NONEXISTENT IDENTIFIER
This is NOT modelled after the ATI SAY statement.
"""
if not arg:
print("variable name required")
return
name = rexx.word(arg, 1).upper()
if name in ("MAXWAIT", "SHOWLINE"):
print(f"{name} IS WRITE-ONLY")
return
self.__bg_wait_end()
value = ati.ati[arg]
if value is not None:
if value is True:
print(name+" = True")
elif value is False:
print(name+" = False")
elif callable(value):
if value.active:
print(name +
" is an active when callback priority=%d" %
value.pri[0])
else:
print(name+" is an inactive WHEN callback")
else:
value = str(value)
print(name+" = "+repr(value))
else:
print(name+" is a nonexistent identifier")
def do_session(self, arg):
"""Get information about the current session.
"""
self.__bg_wait_end()
tns = ati.ati.get_tnz()
if not tns:
print("NONE")
return
pnaddr, pnport = tns.getpeername()
print(f" SESSION_HOST={pnaddr}")
print(f" SESSION_PORT={pnport}")
lu_name = tns.lu_name
if lu_name:
print(f" SESSION_LU_NAME={lu_name}")
print(f" SESSION_CODE_PAGE={tns.codec_info[0].name}")
print(f" SESSION_PS_SIZE={tns.amaxrow}x{tns.amaxcol}")
print(f" SESSION_TN_ENHANCED={tns.tn3270e:d}")
print(f" SESSION_DEVICE_TYPE={tns.terminal_type}")
if tns.alt:
print(" Alternate code page IBM-"+str(tns.cp_01))
else:
print(" Alternate code page not supported")
print(" socket type: "+repr(tns.getsockettype()))
if tns.extended_color_mode():
print(" Extended color mode")
else:
print(" Basic color mode")
def do_set(self, arg):
"""Set the value of an ATI-like variable.
This is loosely modelled after the ATI SET statement.
"""
if not arg:
print("variable name required")
return
self.__bg_wait_end()
name = rexx.word(arg, 1)
value = rexx.subword(arg, 2)
ati.set(name, value)
if self.single_session:
sessions = ati.ati.sessions
if sessions and ati.ati.session != sessions:
self.single_session = False
def do_shell(self, arg):
"""Execute a shell/system command.
Usage:
shell command
!command
"""
os.system(arg)
def do_show(self, arg):
"""Show the current session.
This is modelled after the ATI SHOW interactive trace
command. SHOW displays the screen of the current session
immediately in the case where DISPLAY is set to HOST or
HOSTCODE and the host screen is being displayed due to an
interactive trace command being entered.
"""
self.__bg_wait_end()
self.__prog_mode()
self.__r2d2(self.stdscr, _WAIT_FOREVER, -1)
def do_size(self, arg):
# Get/Set terminal size
# This is intentionally not a docstring so that SIZE will not
# appear as a 'documented command'. Need to refine this command.
self.__shell_mode(init=True)
# curses.update_lines_cols() not in z/OS python3.6
maxy, maxx = self.stdscr.getmaxyx()
if arg:
print(">>> SIZE setting not implemented.")
return
print("TTY rows, cols: "+repr(maxy)+", "+repr(maxx))
def do_skip(self, arg):
"""Skip current wait.
This is modelled after the ATI SKIP interactive trace
command. SKIP can be used only when ATI is executing a WAIT
statement. It is used to force a WAIT expression to be
satisfied, setting the return code of the WAIT to 1.
"""
if not self.__in_wait:
print("Not in a wait")
return
self.__wait_rc = -2
return 0
def do_source(self, arg):
"""Execute a file of commands.
Usage:
source filename
"""
if not arg:
print(">>> Source file is a required argument")
return
try:
with open(os.path.expanduser(arg)) as myfile:
self.cmdqueue += myfile.readlines()
except FileNotFoundError:
print(">>> Source file "+repr(arg)+" not found.")
def do_strs(self, arg):
# Debug strings for current session.
# This is intentionally not a docstring so that STRS will not
# appear as a 'documented command'.
self.__bg_wait_end()
tns = ati.ati.get_tnz()
if not tns:
return
faddr, _ = tns.next_field(0, offset=0)
if faddr < 0:
faddr = 0
sa0 = 0
ea0 = 0
else:
sa0 = (faddr + 1) % tns.buffer_size
ea0 = sa0
print("adr1" +
"-adr2" +
" eh" +
" fg" +
" bg" +
" field attribute or text")
for sa1, ea1 in tns.char_addrs(sa0, ea0):
if faddr < 0:
faddr = 0
else:
faddr = (sa1 - 1) % tns.buffer_size
fav = tns.plane_fa[faddr]
feh = tns.plane_eh[faddr]
ffg = tns.plane_fg[faddr]
fbg = tns.plane_bg[faddr]
print(rexx.right(str(faddr), 4) +
"-"+rexx.right(str(ea1), 4) +
" "+rexx.right(rexx.substr(hex(feh), 3), 2, "0") +
" "+rexx.right(rexx.substr(hex(ffg), 3), 2, "0") +
" "+rexx.right(rexx.substr(hex(fbg), 3), 2, "0") +
" "+rexx.right(rexx.substr(hex(fav), 3), 2, "0") +
" ("+tns.fav_repr(fav)+")")
for sa2, ea2 in tns.group_addrs(sa1, ea1):
ceh = tns.plane_eh[sa2]
cfg = tns.plane_fg[sa2]
cbg = tns.plane_bg[sa2]
print(rexx.right(str(sa2), 4) +
"-"+rexx.right(str(ea2), 4) +
" "+rexx.right(rexx.substr(hex(ceh), 3), 2, "0") +
" "+rexx.right(rexx.substr(hex(cfg), 3), 2, "0") +
" "+rexx.right(rexx.substr(hex(cbg), 3), 2, "0") +
" "+repr(tns.scrstr(sa2, ea2).rstrip()))
def do_timeout(self, arg):
"""Timeout current wait in program.
This is modelled after the ATI TIMEOUT interactive trace
command. TIMEOUT can be used only when ATI is executing a
WAIT statement. It is used to force a WAIT timeout, and it
sets the return code of the WAIT to 0, meaning that the
expression was NOT satisfied.
"""
if not self.__in_wait:
print("Not in a wait")
return
self.__wait_rc = -1
return 0 # timeout - WAIT expression NOT satisfied
def do_trace(self, arg):
"""TRACE command
The intent would be to model this after the ATI TRACE
interactive trace command. TRACE ON sets the ATI internal
variable DISPLAY to ALL and begins interactive (single-step)
tracing mode. TRACE n will cause the next n ATI statements to
be traced independent of the trace mode).
"""
ati.ati.trace = arg
self.__tracing = arg.upper() == "ON"
if self.__tracing:
return -1 # or True?
def do_upload(self, arg):
self.__bg_wait_end()
tns = ati.ati.get_tnz()
if not tns:
print("No session")
return
try:
file = open(os.path.expanduser(arg), mode="rb")
except FileNotFoundError:
print(">>> Upload file "+repr(arg)+" not found.")
return
tns.upload(file)
return self.do_goto("")
def emptyline(self):
"""Override cmd.Cmd.emptyline. Without the override, Cmd will
repeat the last command. With this override, an empty line will
behave more like a typical shell.
"""
if self.__tracing:
return -1 # or True?
return
def erase(self, _):
"""Erase terminal screen
Called by tnz.py.
"""
if self.rewrite:
return
_logger.debug("schedule rewrite for erase")
self.rewrite = True
def extended_color(self, _):
"""Enter extended color mode
"""
if self.rewrite:
return
_logger.debug("schedule rewrite for extended color")
self.rewrite = True
def field(self, tns, addr):
"""Field defined
Called by tnz.py.
"""
if self.rewrite:
return
buffer_size = tns.buffer_size
epos = addr + 1 # next position
addr1 = (epos) % buffer_size # next address
self.__write_blanks(tns, addr, addr1)
self.__clean_range(addr, epos)
eaddr, _ = tns.next_field(addr)
if eaddr == addr1:
return # no data in field
if eaddr < 0:
eaddr = addr # only field that exists
self.__dirty_range(addr1, eaddr, tns=tns)
def help_keys(self):
"""Process the HELP KEYS command.
"""
print("""KEYS:
ATTN - Ctrl+C or Alt+A
CLEAR - Ctrl+L or Alt+C
DROP session - Ctrl+W
ENTER - Enter
End - End
eraseeof - Ctrl+K or Shift+End
Escape - Esc
Next session - Alt+PgDn
Open URL - Ctrl+O or mouse click
PA1 - Alt+1 or Alt+Insert
PA2 - Alt+2 or Alt+Home
PA3 - Alt+3
PF1-12 - F1-12
PF13-24 - Shift+F1-12
Prev session - Alt+PgUp
Word Left - Alt+Left
Word Right - Alt+Right
""")
def help_vars(self):
"""Process the HELP VARS command.
"""
print("""Variables used when creating a new session:
SESSION_CODE_PAGE - code page, e.g. cp037
SESSION_LU_NAME - LU name for TN3270E CONNECT
SESSION_HOST - tcp/ip hostname
SESSION_PORT - tcp/ip port, default is 992
SESSION_PS_SIZE - terminal size, e.g. 62x160
SESSION_SSL - set to 0 to not force SSL
SESSION_TN_ENHANCED - set to 1 allow TN3270E
SESSION_DEVICE_TYPE - device-type, e.g. IBM-DYNAMIC
""")
def onerror(self):
"""Ati error handler
"""
self.print_stack()
intro = """
ERROR RECOVERY FOR WAIT TIMEOUT
- abort (exit program with RC=127)
- goto (go to session, Esc to return)
- return (resume with wait RC=1)
- timeout (resume with wait RC=0)
This recovery process was initiated because the program used
ONERROR=1. Consider the above commands to recover. The above stack
trace provides context for where the error occurred. Use the HELP and
HELP KEYS commands for more information.
"""
in_script = self.__in_script
in_wait = self.__in_wait
try:
self.__in_script = True
self.__in_wait = True
self.__wait_rc = None
self.cmdloop(intro)
finally:
self.__in_script = in_script
self.__in_wait = in_wait
return self.__wait_rc
def pause(self):
"""Pause script
"""
self.shell_mode()
in_script = self.__in_script
in_wait = self.__in_wait
try:
self.__in_script = True
self.__in_wait = False
self.cmdloop("")
finally:
self.__in_script = in_script
self.__in_wait = in_wait
def postcmd(self, stop, line):
"""Override cmd.Cmd.postcmd
"""
if stop is not False and stop is not None:
if self.stdscr:
self.__shell_mode()
if line == "exit":
self.postloop()
raise SystemExit()
if line == "EOF":
print("") # newline
return True
if len(self.cmdqueue) <= 0: # if ready for prompt
if self.stdscr:
self.__shell_mode()
if self.single_session and not ati.ati.sessions:
self.postloop()
if ati.ati.seslost:
raise SystemExit(10)
else:
raise SystemExit(0)
if self.pend_intro is not None:
self.intro = self.pend_intro
self.pend_intro = None
self.stdout.write(str(self.intro)+"\n")
if self.__downcnt < len(self.downloads):
print("downloads:")
self.do_downloads("")
self.__update_prompt() # according to SESSION
self.__bg_wait_start()
return False
def postloop(self):
"""Override cmd.Cmd.postloop
Command prompt terminating
"""
self.__bg_wait_end()
if self.stdscr:
self.__shell_mode()
def precmd(self, line):
"""Override cmd.Cmd.precmd behavior. Make it so that the case
of the command does not matter by always 'executing' the
lowercase command name.
"""
if line == "EOF": # Ctrl+D pressed or end of input
return line
cmd = rexx.word(line, 1)
return line.replace(cmd, cmd.lower(), 1)
def preloop(self):
"""Override cmd.Cmd.preloop
Initialize
"""
self.__update_prompt() # according to SESSION
self.__bg_wait_start()
def print_stack(self, exc=False):
self.__shell_mode()
try:
current_tb_count = len(traceback.extract_stack())
limit = current_tb_count - self.__tb_count
if limit <= 1:
limit = None
if exc:
traceback.print_exc(limit=limit)
else:
traceback.print_stack(limit=limit)
except Exception:
print("<stack trace unavailable>")
def shell_mode(self):
self.__shell_mode()
def show(self):
# FIXME - wait (even with timeout=0
# will get updates from host
# this methods wants to NOT get
# updates from host - just display
# what we currently know
self.wait(0)
def showline(self, text):
"""ATI-like function for when SHOWLINE is set.
"""
if self.twin_beg is None:
return False
if not self.stdscr:
return False
self.__prog_mode()
maxy, _ = self.stdscr.getmaxyx()
xpos1, _ = self.twin_beg
xpos2, _ = self.twin_end
xpos3, _ = self.twin_loc
if len(text) > xpos2-xpos1:
text = text[:xpos2-xpos1]
self.__tty_write(maxy-1, xpos3, text, 0)
self.stdscr.refresh()
return True
def wait(self, timeout=0, keylock=False):
"""Wait for the screen to change
or keylock to change to zero.
Called by ATI to wait/pause/sleep
when DISPLAY is ALL, HOST, or HOSTCODE.
Return values:
0 The wait time expired
1 The condition was satisfied
12 Session lost or undefined
"""
self.__prog_mode()
if timeout == 0:
waitc = _WAIT_NONE
elif keylock:
waitc = _WAIT_KEYLOCK
else:
waitc = _WAIT_SCREEN
r2d2rv = self.__r2d2(self.stdscr, waitc, timeout)
if r2d2rv in (0, 1, 12): # if timeout, satisfied, seslost
if ((self.rewrite or
self.__dirty_ranges or
self.rewrite_cursor or
self.rewrite_keylock or
self.rewrite_status)):
self.__display(self.stdscr, False)
self.__refresh()
return r2d2rv # return timeout, satisfied, seslost
self.shell_mode()
in_script = self.__in_script
in_wait = self.__in_wait
try:
self.__in_script = True
if not keylock:
self.__in_wait = True
self.__wait_rc = None
self.print_stack()
intro = """
Program execution paused
- abort (exit program with RC=127)
- exit (exit to the system)
- goto (go to session, Esc to return)
- return (resume program with wait RC=1)
- say (display the value of the specified variable)
- shell (execute a shell/system command)
- show (display the current session)
- timeout (resume program with wait RC=0)
Consider the above commands for program interaction. The above stack
trace provides context for where the program was interrupted. Use the
HELP and HELP KEYS commands for more information.
"""
self.cmdloop(intro)
finally:
self.__in_wait = in_wait
self.__in_script = in_script
_logger.debug("Zti.wait returning %r", self.__wait_rc)
return self.__wait_rc
def write(self, tns, faddr, saddr, eaddr):
"""tnz calls when data written
Called by tnz.py.
"""
if self.rewrite:
return
for saddr1, eaddr1 in tns.group_addrs(saddr, eaddr):
self.__write_group(tns, faddr, saddr1, eaddr1)
if saddr < eaddr:
self.__clean_range(saddr, eaddr)
else:
self.__clean_range(saddr, tns.buffer_size)
if eaddr != 0:
self.__clean_range(0, eaddr)
def write_data(self, tns, daddr, dlen, force=False):
"""Write data
Called by tnz.py.
"""
if self.rewrite:
return
if not force:
self.rewrite_cursor = True # assume needed
faddr, _ = tns.field(daddr)
if faddr < 0:
faddr = 0
caddr2 = (daddr + dlen) % tns.buffer_size
return self.write(tns, faddr, daddr, caddr2)
def write_data_prep(self, tns, daddr, dlen):
"""Write data prep for when data may overlay field(s)
Called by tnz.py.
If data wipes out a field, it may change the attributes
of existing characters that are now in the "merged" field.
This will make sure such existing characters get refreshed.
"""
if self.rewrite or dlen <= 0:
return
faddr1, _ = tns.field(daddr)
if faddr1 < 0:
return
buffer_size = tns.buffer_size
if dlen >= buffer_size:
return
if faddr1 != daddr: # if first char not wiping out field
caddr2 = (daddr+dlen-1) % buffer_size
faddr2, _ = tns.field(caddr2)
if faddr1 == faddr2: # if no fields being wiped
return
saddr = (daddr+dlen) % buffer_size
eaddr, _ = tns.next_field(saddr, daddr)
if eaddr < 0:
self.__dirty_range(saddr, daddr, tns=tns)
else:
self.__dirty_range(saddr, eaddr, tns=tns)
# Private methods
def __bg_wait(self):
"""Keep sessions alive
while at the command prompt.
"""
ztl = []
while True:
self.__lock.acquire()
run_in_bg = self.__bg
if run_in_bg:
sessions = ati.ati.sessions.split()
if not sessions:
self.__bg = False
run_in_bg = False
self.__lock.release()
if not run_in_bg:
return
# TODO FIXME what if all sessions lost?
# choose a Zti to use for wait
# round robin that choice
# round robin might only matter
# if seslost is not handled properly
# which it is not yet
for session in sessions:
tns = ati.ati.get_tnz(session)
if tns not in ztl:
break
ztn = tns
tns = None
if tns:
ztl.append(tns)
else:
tns = ztn
ztl = [ztn]
tns.wait()
def __bg_wait_start(self):
"""Ensure the background
thread is running to keep
sessions alive while at
the command prompt.
"""
self.__lock.acquire()
run_in_bg = self.__bg
self.__lock.release()
if run_in_bg:
return # already running
if not ati.ati.sessions:
return # no sessions
self.__bg = True
self.__thread = threading.Thread(target=self.__bg_wait)
self.__thread.start()
def __bg_wait_end(self):
"""Ensure the background
thread that may use
sessions is NOT running.
Needed so that command
processing can use the
sessions. And needed
so that application
can shut down.
"""
if self.__thread is None:
return # not running
self.__lock.acquire()
self.__bg = False
self.__lock.release()
tnz.wakeup_wait()
self.__thread.join()
self.__thread = None
def __clean_range(self, start, end):
"""See __dirty_range.
"""
if start >= end:
raise ValueError(f"{start} >= {end}")
for i, (sidx, eidx) in enumerate(self.__dirty_ranges):
if start == sidx and end == eidx:
self.__dirty_ranges.pop(i)
return
if start <= sidx and end >= eidx:
self.__dirty_ranges.pop(i)
elif start <= sidx and end > sidx:
self.__dirty_ranges[i] = (end, eidx)
elif start < eidx and end >= eidx:
self.__dirty_ranges[i] = (sidx, start)
elif start > sidx and end < eidx:
self.__dirty_ranges[i] = (sidx, start)
self.__dirty_ranges.append((end, eidx))
return
def __color_setup(self):
if self.__has_color is True:
pass
elif self.__has_color is False:
return
elif not curses.has_colors():
self.__has_color = False
return
try:
self.__set_colors()
self.__has_color = True
except Exception:
if self.__has_color:
raise
self.__has_color = False
def __dirty_range(self, start, end, tns=None):
"""Mark a range of screen addresses as dirty.
Remote commands/orders explicitly update various
addressable character locations on the screen.
Some of these operations result in implicit
updates to presentation of characters on the
screen. Explicit updates are typically handled
explicitly. The set of dirty ranges built
by this method are used to handle the implicit
updates.
"""
if start >= end:
if not tns:
raise ValueError(f"{start} >= {end}")
if end:
self.__dirty_range(0, end)
self.__dirty_range(start, tns.buffer_size)
return
start1 = start
end1 = end
for i, (sidx, eidx) in enumerate(self.__dirty_ranges):
if start >= sidx and end <= eidx:
return
if start <= sidx and end >= eidx:
self.__dirty_ranges.pop(i)
elif start <= sidx and end >= sidx:
self.__dirty_ranges.pop(i)
end1 = eidx
elif start <= eidx and end >= eidx:
self.__dirty_ranges.pop(i)
start1 = sidx
if start1 != start or end1 != end:
return self.__dirty_range(start1, end1)
self.__dirty_ranges.append((start, end))
def __display(self, window, showcursor):
_logger.debug("begin __display")
rewrite = self.rewrite
rewrite_cursor = self.rewrite_cursor or rewrite
rewrite_keylock = self.rewrite_keylock or rewrite
rewrite_status = self.rewrite_status or rewrite
self.rewrite = False
self.rewrite_cursor = False
self.rewrite_keylock = False
self.rewrite_status = False
session = ati.ati.session
tns = ati.ati.get_tnz()
has_color = min(tns.colors, self.colors) >= 8
# Determine where host screen will displayed and what fits
row1 = 1
col1 = 1
maxrow = tns.maxrow
maxcol = tns.maxcol
currow = tns.curadd // maxcol + 1
curcol = tns.curadd % maxcol + 1
if self.autosize:
# set size of terminal to maxrow X maxcol
# but maybe not exactly... try to maintain
# the aspect ratio
arows, acols = self.autosize
aspect1 = arows / acols
srows, scols = self.__scale_size(maxrow, maxcol)
sr2, sc2 = self.__scale_size(maxrow, maxcol + 9)
sr3, sc3 = self.__scale_size(maxrow + 4, maxcol)
sr4, sc4 = self.__scale_size(maxrow + 4, maxcol + 9)
rat = srows / scols
rat2 = sr2 / sc2
rat3 = sr3 / sc3
rat4 = sr4 / sc4
if abs(aspect1-rat2) < abs(aspect1-rat):
srows, scols = sr2, sc2
rat = rat2
if abs(aspect1-rat3) < abs(aspect1-rat):
srows, scols = sr3, sc3
rat = rat3
if abs(aspect1-rat4) < abs(aspect1-rat):
srows, scols = sr4, sc4
rat = rat4
curses.prog_maxyx = srows, scols
if rewrite:
window.clear()
acs_hline = curses.ACS_HLINE
rows, cols = window.getmaxyx()
row1 = 1
col1 = 1
# check for room for header (and footer)
room_for_header = True
if (maxrow + 3) > rows:
room_for_header = False
if rows > maxrow:
row1 = 1 + ((rows - maxrow) // 2)
if cols > maxcol:
col1 = 1 + ((cols - maxcol) // 2)
if room_for_header and row1 <= 3:
row1 = 4
xpos = col1 - 1
ypos = row1 - 1
endy = ypos + maxrow
self.twin_beg = (0, 0)
self.twin_end = (min(maxcol, cols), min(maxrow, rows))
self.twin_loc = (xpos, ypos)
# Draw session selector
if showcursor and rewrite:
sesx = col1 + maxcol
if sesx + 8 <= cols: # if fits
sesx = cols - 8
sessions = ati.ati.sessions.split()
sescnt = len(sessions)
curidx = self.__sessel_y
if curidx is None:
curidx = len(sessions) // 2 # current in middle
elif curidx >= sescnt:
curidx = sescnt - 1
if curidx >= rows:
curidx = rows - 1
idx = sessions.index(session)
sessions2 = (sessions[idx-curidx:] +
sessions[:idx-curidx])
sessions2 = sessions2[:rows]
self.__sessel_x = sesx
self.__sessel = sessions2
for sesy, ses in enumerate(sessions2):
if ses == session:
text = rexx.left("*" + ses, 9)
if has_color:
attr = self.cv2attr[(247, 0)] # white
else:
attr = curses.A_BOLD
else:
text = rexx.left(" " + ses, 9)
if has_color:
attr = self.cv2attr[(244, 0)] # green
else:
attr = curses.A_NORMAL
self.__tty_write(sesy, sesx, text, attr)
# Window/Icon title
if rewrite or rewrite_status:
if os.getenv("ZTI_TITLE") == "1":
if hasattr(window, "set_title"):
window.set_title(session)
# Draw header (and footer)
if (room_for_header and
(rewrite or
rewrite_cursor or
rewrite_keylock or
rewrite_status)):
_logger.debug("before drawing header/footer")
keylock = ati.value("KEYLOCK", trace=False)
dashes = rexx.copies("\u2500", min(cols, maxcol))
if rewrite:
window.hline(ypos - 3, xpos, acs_hline, len(dashes))
if tns.ddm_in_progress():
statlen = 42
if maxrow >= 100:
statlen += 2
if maxcol >= 100:
statlen += 2
if rewrite or rewrite_status:
window.attron(curses.A_REVERSE)
bytecnt = "?"
download = tns.download()
if hasattr(download, "bytecnt"):
bytecnt = download.bytecnt
msg = f"File transfer in progress ({bytecnt} bytes)"
msg = rexx.left(msg, statlen)
self.__tty_write(ypos - 2, xpos, msg)
window.attroff(curses.A_REVERSE)
tsx = xpos + statlen
else:
if rewrite or rewrite_status:
self.__tty_write(ypos - 2, xpos, "Cursor= (")
rlen = len(str(maxrow))
clen = len(str(maxcol))
if rewrite or rewrite_cursor or rewrite_status:
self.__tty_write(ypos-2, xpos+9,
rexx.right(str(currow), rlen) +
"," +
rexx.right(str(curcol), clen))
tsx = xpos + 9 + clen + 1 + rlen
if rewrite or rewrite_status:
self.__tty_write(ypos-2, tsx,
"), Size= (" +
rexx.right(str(maxrow), rlen) +
"," +
rexx.right(str(maxcol), clen) +
"), ")
tsx += 10 + rlen + 1 + clen + 3
if rewrite or rewrite_status or rewrite_keylock:
if "1" == keylock:
window.attron(curses.A_REVERSE)
self.__tty_write(ypos-2, tsx,
"KeyLock= "+keylock)
if "1" == keylock:
window.attroff(curses.A_REVERSE)
tsx += 10
if rewrite:
self.__tty_write(", Session= ")
self.__tty_write(rexx.left(session, 12))
tsx += 23
if rewrite or rewrite_status:
self.__tty_write(ypos-2, tsx, ati.ati.time)
if rewrite:
window.hline(ypos-1, xpos, acs_hline, len(dashes))
if (row1 + maxrow) <= rows: # if room for footer
window.hline(endy, xpos, acs_hline, len(dashes))
_logger.debug("after drawing header/footer")
# Draw 3270 terminal screen
dirty_ranges = self.__dirty_ranges
if dirty_ranges:
if not rewrite:
buffer_size = tns.buffer_size
for sidx, eidx in dirty_ranges:
eidx %= buffer_size
faddr, _ = tns.field(sidx)
for saddr, eaddr in tns.group_addrs(sidx, eidx):
self.__write_group(tns, faddr, saddr, eaddr)
dirty_ranges.clear()
if rewrite:
_logger.debug("before drawing terminal")
faddr, _ = tns.next_field(tns.buffer_size-2)
if faddr < 0: # if no fields
self.write(tns, 0, 0, 0)
else:
paddr = faddr
xaddr = faddr
for saddr, eaddr in tns.char_addrs(faddr):
if paddr == xaddr:
xaddr = saddr
self.__write_blanks(tns, paddr, saddr)
paddr = eaddr
faddr = (saddr-1) % tns.buffer_size
self.write(tns, faddr, saddr, eaddr)
self.__write_blanks(tns, paddr, xaddr)
_logger.debug("after drawing terminal")
_logger.debug("end __display")
def __endwin(self):
selectr = self.__stdin_selectr
if selectr and self.__stdin_selected:
self.__stdin_selected = False
tnz.selector_del(selectr)
if _osname != "Windows":
sigx.del_handler(tnz.wakeup_wait)
curses.endwin()
def __install_plugins(self):
try:
from importlib.metadata import entry_points
except ImportError: # must be Python <3.8
self.plugins = ""
return
plugins = []
zti_plugins = entry_points().get("zti.commands", [])
for entry in zti_plugins:
name = entry.name
plugins.append(name)
def do_plugin(arg, entry=entry, **kwargs):
plugin = entry.load()
self.__bg_wait_end()
tb_count = self.__tb_count
plugin_goto = self.__plugin_goto
self.__plugin_goto = ""
kwargs.update(**self.__plugin_kwargs)
self.__plugin_kwargs.clear()
try:
try:
self.__tb_count = len(traceback.extract_stack())
except Exception:
pass
with ati.ati.new_program(share_sessions=True):
plugin(arg, **kwargs)
except _ZtiAbort:
pass
except Exception:
ati.say(f"{name} failed")
self.print_stack(exc=True)
else:
if plugin_goto:
self.cmdqueue.append(f"GOTO {plugin_goto}\n")
finally:
self.__tb_count = tb_count
sessions = ati.ati.sessions.split()
if sessions:
if ati.ati.session not in sessions:
ati.ati.session = sessions[0]
def help_plugin(entry=entry):
plugin = entry.load()
self.__shell_mode()
plugin("--help")
setattr(self, f"do_{name}", do_plugin)
setattr(self, f"help_{name}", help_plugin)
self.plugins = " ".join(plugins)
def __key_data(self, tns, data):
try:
tns.key_data(data, zti=self)
except UnicodeEncodeError:
curses.flash()
curses.beep()
_logger.exception("cannot encode")
def __paste_data(self, tns, data):
try:
tns.paste_data(data, zti=self)
except UnicodeEncodeError:
curses.flash()
curses.beep()
_logger.exception("cannot encode")
def __prep_wait(self):
if hasattr(curses, "selectr"):
selectr = curses.selectr
else:
selectr = self.__stdin_selectr
stdin_selected = self.__stdin_selected
if stdin_selected:
old_selectr = self.__stdin_selectr
else:
old_selectr = None
if selectr is not old_selectr:
if stdin_selected:
self.__stdin_selected = False
tnz.selector_del(old_selectr)
if selectr:
tnz.selector_set(selectr, data=self)
self.__stdin_selected = True
self.__stdin_selectr = selectr
def __prog_mode(self):
"""Set the TTY to prog mode.
Initialize curses if necessary.
"""
if self.__tty_mode == 1: # prog
return
if self.autosize:
(columns, lines) = os.get_terminal_size()
self.autosize = (lines, columns)
if not self.stdscr:
# Initialize curses
# The ESC key is important since it
# allows the user to drop back to the
# command line. The curses default delay
# to wait to see if ESC (x1b) is the
# start of an escape sequence or just
# an escape key feels too long. Set
# a more reasonable value. Note that
# this must be done BEFORE curses.initscr
# (or wrapper) is called. And the
# following line of code will only set
# it if it is not already set in the
# environment.
os.environ.setdefault("ESCDELAY", "25")
try:
self.stdscr = curses.initscr()
Zti._stdscr = self.stdscr
curses.cbreak()
curses.noecho()
curses.def_prog_mode()
self.__endwin()
self.stdscr.keypad(True)
self.stdscr.notimeout(0) # faster ESC?
# TODO only set up mouse if NOT "show" mode?
mousemask = (curses.BUTTON1_CLICKED |
curses.BUTTON1_DOUBLE_CLICKED)
masks = curses.mousemask(mousemask)
if masks != 0: # if successful
if self.__shell_mousemask is None:
self.__shell_mousemask = masks[1]
self.__color_setup()
except Exception:
if self.stdscr:
self.__endwin()
self.stdscr = None
raise
else:
self.stdscr.clear() # avoid showing old screen
self.stdscr.refresh() # for above clear
# TODO only set up mouse if NOT "show" mode?
mousemask = (curses.BUTTON1_CLICKED |
curses.BUTTON1_DOUBLE_CLICKED)
masks = curses.mousemask(mousemask)
if masks != 0: # if successful
if self.__shell_mousemask is None:
self.__shell_mousemask = masks[1]
self.__color_setup()
curses.flushinp()
if _osname != "Windows":
self.__stdout.write("\x1b[?2004h") # bracketed paste ON
self.__stdout.flush()
if self.__sigwinch_selected:
sigx.add_handler(signal.SIGWINCH, tnz.wakeup_wait)
self.rewrite = True
self.__tty_mode = 1
def __r2d2(self, win, waitc, timeout):
"""Read and Display 3270 Terminal
"""
# See https://en.wikipedia.org/wiki/Table_of_keyboard_shortcuts
# when assigning keyboard shortcuts.
show = (waitc != _WAIT_GOTO)
if waitc == _WAIT_NONE:
timeout = 0
elif waitc == _WAIT_FOREVER:
timeout = -1
elif waitc == _WAIT_GOTO:
timeout = -1
elif timeout <= 0:
raise ValueError("Expected timeout>0")
insmode = False
if show:
self.__prog_curs_vis = 0
else:
self.__prog_curs_vis = 1
tout = timeout
if timeout > 0:
etime = time.time() + timeout
self.__refresh()
refresh = False
paste = None
try:
while True:
if self.cmdqueue: # plugin added cmd to process?
self.shell_mode()
return 1 # timeout?
cstr = self.__tty_read(win, 0, refresh=refresh)
refresh = None
if (cstr == "" and # session change
(waitc == _WAIT_KEYLOCK or
waitc == _WAIT_SCREEN)):
# TODO be more specific about
# keylock and screen change
return 1 # condition sastisfied
if not cstr:
tns = ati.ati.get_tnz()
if not tns:
return 12
if tns.seslost:
return 12
if tns.ddmdata or tns.ddmdict:
return 10 # have ddmdata
if self.downloadaction:
self.downloadaction = False
return 11 # have download action
if ((self.rewrite or
self.__dirty_ranges or
self.rewrite_cursor or
self.rewrite_keylock or
self.rewrite_status)):
self.__display(win, not show)
# show cursor
if not show:
begx, begy = self.twin_beg
endx, endy = self.twin_end
currow = tns.curadd // tns.maxcol + 1
curcol = tns.curadd % tns.maxcol + 1
if (currow > begy and
currow <= endy and
curcol > begx and
curcol <= endx):
if insmode:
self.__prog_curs_vis = 2 # very visible
else:
self.__prog_curs_vis = 1 # visible
xpos, ypos = self.twin_loc
_logger.debug("before win.move")
win.move(ypos+currow-1, xpos+curcol-1)
_logger.debug("after win.move")
if self.cmdqueue: # plugin added cmd to process?
self.shell_mode()
return 1 # timeout?
cstr = self.__tty_read(win, tout)
if waitc in (_WAIT_KEYLOCK, _WAIT_SCREEN):
if cstr == "":
# TODO be more specific about
# keylock and screen change
return 1 # condition sastisfied
if cstr is None:
return 0 # timeout
tns = ati.ati.get_tnz()
if not tns:
return 12 # seslost
if tns.seslost:
return 12 # seslost
if tns.ddmdata or tns.ddmdict:
return 10 # have ddmdata
if self.downloadaction:
self.downloadaction = False
return 11 # have download action
# check for Alt+letter shortcut
altc = 0
if isinstance(cstr, str):
if ((cstr and cstr.startswith('\x1b') and
len(cstr) == 2 and
cstr.lower() == cstr) or
(cstr and cstr.startswith("ALT_") and
len(cstr) == 5 and
str.isalpha(cstr[-1]))):
maxcol = tns.maxcol
alet1 = cstr[-1].lower()
alet2 = alet1.upper()
elet1 = tns.codec_info[0].encode(alet1)[0][0]
elet2 = tns.codec_info[0].encode(alet2)[0][0]
for i in range(0, maxcol):
if ((tns.plane_dc[i] != elet1 and
tns.plane_dc[i] != elet2)):
continue
exn = tns.plane_eh[i]
if (exn & 0x0C0) == 0x0C0: # if underline
altc = i+1
break
# process input
if not cstr: # session update
pass
elif cstr is True:
# maybe resize?
(maxy, maxx) = win.getmaxyx()
(columns, lines) = os.get_terminal_size()
if (maxy != lines) or (maxx != columns):
win.clear()
win.noutrefresh()
win.resize(lines, columns)
self.rewrite = True
elif cstr == "\x1b" or cstr == "KEY_ESC": # ESC
_logger.debug("keyed Esc")
return cstr
elif cstr == "KEY_RESIZE":
_logger.warning("KEY_RESIZE")
self.rewrite = True
try:
curses.resize_term(0, 0) # hack for Windows
except Exception:
pass
(maxy, maxx) = win.getmaxyx()
(columns, lines) = os.get_terminal_size()
if (maxy != lines) or (maxx != columns):
win.resize(lines, columns)
win.erase()
win.noutrefresh()
elif show:
curses.flash()
elif cstr.startswith("\x1b[200~"): # bracketed paste
paste = cstr[6:]
if paste.endswith("\x1b[201~"):
paste = paste[:-6]
if paste:
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
self.__paste_data(tns, paste)
paste = None
elif cstr.endswith("\x1b[201~"): # bracketed paste end
paste += cstr[:-6]
if paste:
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
self.__paste_data(tns, paste)
paste = None
elif paste is not None: # more bracketed paste data
paste += cstr
elif cstr == "KEY_SIC":
_logger.debug("keyed Shift+Insert")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
elif _osname != "Windows":
curses.flash()
curses.beep()
else:
paste = None
fmt = 13 # CF_UNICODETEXT
k32 = ctypes.windll.kernel32
k32.GlobalLock.argtypes = [ctypes.c_void_p]
k32.GlobalLock.restype = ctypes.c_void_p
k32.GlobalUnlock.argtypes = [ctypes.c_void_p]
u32 = ctypes.windll.user32
u32.GetClipboardData.restype = ctypes.c_void_p
u32.OpenClipboard(0)
try:
if u32.IsClipboardFormatAvailable(fmt):
data = u32.GetClipboardData(fmt)
data_locked = k32.GlobalLock(data)
paste = ctypes.wstring_at(data_locked)
k32.GlobalUnlock(data_locked)
finally:
u32.CloseClipboard()
if paste:
self.__paste_data(tns, paste)
paste = None
elif cstr == "\x0c": # Ctrl+L
_logger.debug("keyed Ctrl+L")
self.rewrite = True
(maxy, maxx) = win.getmaxyx()
(columns, lines) = os.get_terminal_size()
if (maxy != lines) or (maxx != columns):
# assume Ctrl+L is for size change
win.resize(lines, columns)
win.erase()
win.noutrefresh()
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.clear()
elif len(cstr) == 1 and cstr.isprintable():
keylock = ati.value("KEYLOCK", trace=False)
if keylock == "1":
curses.flash()
else:
if insmode:
tns.key_ins_data(cstr, zti=self)
else:
self.__key_data(tns, cstr)
elif cstr == "\r":
_logger.debug("keyed Enter")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.enter()
self.rewrite_keylock = True
elif cstr == "\n":
_logger.debug("keyed Shift+Enter")
tns.key_newline()
elif cstr == "\t":
_logger.debug("keyed Tab")
tns.key_tab(zti=self)
self.rewrite_cursor = True
elif (cstr == "\b" or
cstr == "KEY_BACKSPACE" or
cstr == "\x7f"):
_logger.debug("keyed Backspace")
tns.key_backspace(zti=self)
self.rewrite_cursor = True
elif (cstr == "\x0b" or # Ctrl+K
cstr == "\x1b[4~" or # Shift+End
cstr == "\x1b[F" or # Shift+End
cstr == "KEY_SEND"): # Shift+End
_logger.debug("keyed Shift+End or Ctrl+K")
tns.key_eraseeof(zti=self)
elif cstr == "KEY_END": # End
_logger.debug("keyed End")
tns.key_end()
self.rewrite_cursor = True
elif (cstr == "\x1b[1~" or
cstr == "\x1b H" or
cstr == "KEY_HOME"):
_logger.debug("keyed Home")
tns.key_home(zti=self)
self.rewrite_cursor = True
elif cstr == "KEY_DC":
_logger.debug("keyed Delete")
tns.key_delete(zti=self)
elif (cstr == "KEY_BTAB" or # Shift+Tab
cstr == "\x1b[~"): # Shift+Tab Windows->ssh
_logger.debug("keyed Shift+Tab")
tns.key_backtab(zti=self)
self.rewrite_cursor = True
elif altc > 0:
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.set_cursor_position(1, altc)
tns.enter()
self.rewrite_keylock = True
self.rewrite_cursor = True
elif cstr == "KEY_PPAGE": # PgUp
_logger.debug("keyed PgUp")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf7()
self.rewrite_keylock = True
elif cstr == "KEY_NPAGE": # PgDn
_logger.debug("keyed PgDn")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf8()
self.rewrite_keylock = True
elif cstr == "KEY_UP":
_logger.debug("keyed Up")
tns.key_curup(zti=self)
self.rewrite_cursor = True
elif cstr == "KEY_DOWN":
_logger.debug("keyed Dn")
tns.key_curdown(zti=self)
self.rewrite_cursor = True
elif cstr == "KEY_LEFT":
_logger.debug("keyed Left")
tns.key_curleft(zti=self)
self.rewrite_cursor = True
elif cstr == "KEY_RIGHT":
_logger.debug("keyed Right")
tns.key_curright(zti=self)
self.rewrite_cursor = True
elif cstr in ("\x1b\x1b[D", # Alt+LEFT
"\x1bb", # Alt+LEFT (Terminal.app)
"\x1b[1;3D"): # Alt+LEFT (Windows)
_logger.debug("keyed Alt+Left")
tns.key_word_left()
self.rewrite_cursor = True
elif cstr in ("\x1b\x1b[C", # Alt+RIGHT
"\x1bf", # Alt+RIGHT (Terminal.app)
"\x1b[1;3C"): # Alt+RIGHT (Windows)
_logger.debug("keyed Alt+Right")
tns.key_word_right()
self.rewrite_cursor = True
elif cstr == "KEY_IC":
_logger.debug("keyed Insert")
insmode = (not insmode)
if insmode:
self.__prog_curs_vis = 2 # very visible
else:
self.__prog_curs_vis = 1 # visible
elif (cstr == "\x1b1" or # ESC+1 (Alt+1)
cstr == "ALT_1" or # ESC+1 (Alt+1)
cstr == "\x1bKEY_IC" or # ESC+Insert (Alt+Insert)
cstr == "ALT_INS"): # Alt+Insert
_logger.debug("keyed Alt+1 or Alt+Insert")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pa1()
self.rewrite_keylock = True
elif (cstr == "\x1b2" or # ESC+2 (Alt+2)
cstr == "ALT_2" or # ESC+2 (Alt+2)
cstr == "\x1b\x1b[1~" or # ESC+Home (Alt+Home)
cstr == "ALT_HOME"): # Alt+Home
_logger.debug("keyed Alt+2 or Alt+Home")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pa2()
self.rewrite_keylock = True
elif (cstr == "\x1b3" or # ESC+3 (Alt+3)
cstr == "ALT_3"): # ESC+3 (Alt+3)
_logger.debug("keyed Alt+3")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pa3()
self.rewrite_keylock = True
elif (cstr == "\x1ba" or # ESC+a (Alt+A)
cstr == "ALT_A" or # ESC+a (Alt+A)
cstr == "\x03"): # Ctrl+C
_logger.debug("keyed Alt+A or Ctrl+C")
tns.attn()
elif (cstr == "\x1bc" or # ESC+c (Alt+c)
cstr == "ALT_C"): # ESC+c (Alt+c)
_logger.debug("keyed Alt+C")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.clear()
self.rewrite = True
self.rewrite_keylock = True
elif (cstr == "KEY_F(1)" or
cstr == "\x1b[11~"):
_logger.debug("keyed F1")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf1()
self.rewrite_keylock = True
elif (cstr == "KEY_F(2)" or
cstr == "\x1b[12~"):
_logger.debug("keyed F2")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf2()
self.rewrite_keylock = True
elif (cstr == "KEY_F(3)" or
cstr == "\x1b[13~"):
_logger.debug("keyed F3")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf3()
self.rewrite_keylock = True
elif (cstr == "KEY_F(4)" or
cstr == "\x1b[14~"):
_logger.debug("keyed F4")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf4()
self.rewrite_keylock = True
elif cstr == "KEY_F(5)":
_logger.debug("keyed F5")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf5()
self.rewrite_keylock = True
elif cstr == "KEY_F(6)":
_logger.debug("keyed F6")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf6()
self.rewrite_keylock = True
elif cstr == "KEY_F(7)":
_logger.debug("keyed F7")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf7()
self.rewrite_keylock = True
elif cstr == "KEY_F(8)":
_logger.debug("keyed F8")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf8()
self.rewrite_keylock = True
elif cstr == "KEY_F(9)":
_logger.debug("keyed F9")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf9()
self.rewrite_keylock = True
elif cstr == "KEY_F(10)":
_logger.debug("keyed F10")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf10()
self.rewrite_keylock = True
elif cstr == "KEY_F(11)":
_logger.debug("keyed F11")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf11()
self.rewrite_keylock = True
elif cstr == "KEY_F(12)":
_logger.debug("keyed F12")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf12()
self.rewrite_keylock = True
elif cstr == "KEY_F(13)":
_logger.debug("keyed Shift+F1")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf13()
self.rewrite_keylock = True
elif cstr == "KEY_F(14)":
_logger.debug("keyed Shift+F2")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf14()
self.rewrite_keylock = True
elif (cstr == "KEY_F(15)" or # Shift+F3
cstr == "\x1b[25~"): # Shift+F3
_logger.debug("keyed Shift+F3")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf15()
self.rewrite_keylock = True
elif (cstr == "KEY_F(16)" or # Shift+F4
cstr == "\x1b[26~"): # Shift+F4
_logger.debug("keyed Shift+F4")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf16()
self.rewrite_keylock = True
elif (cstr == "KEY_F(17)" or # Shift+F5
cstr == "\x1b[28~"): # Shift+F5
_logger.debug("keyed Shift+F5")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf17()
self.rewrite_keylock = True
elif (cstr == "KEY_F(18)" or # Shift+F6
cstr == "\x1b[29~"): # Shift+F6
_logger.debug("keyed Shift+F6")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf18()
self.rewrite_keylock = True
elif (cstr == "KEY_F(19)" or # Shift+F7
cstr == "\x1b[31~"): # Shift+F7
_logger.debug("keyed Shift+F7")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf19()
self.rewrite_keylock = True
elif (cstr == "KEY_F(20)" or # Shift+F8
cstr == "\x1b[32~"): # Shift+F8
_logger.debug("keyed Shift+F8")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf20()
self.rewrite_keylock = True
elif (cstr == "KEY_F(21)" or # Shift+F9
cstr == "\x1b[33~"): # Shift+F9
_logger.debug("keyed Shift+F9")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf21()
self.rewrite_keylock = True
elif (cstr == "KEY_F(22)" or # Shift+F10
cstr == "\x1b[34~"): # Shift+F10
_logger.debug("keyed Shift+F10")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf22()
self.rewrite_keylock = True
elif cstr == "KEY_F(23)":
_logger.debug("keyed Shift+F11")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf23()
self.rewrite_keylock = True
elif cstr == "KEY_F(24)":
_logger.debug("keyed Shift+F12")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf24()
self.rewrite_keylock = True
elif (cstr == "\x1b\x1b[5~" or # Alt+PgUp Putty
cstr == "\x1b[5;3~" or # Alt+PgUp Git Bash
cstr == "kPRV3" or # Alt_PgUp Windows->ssh
cstr == "ALT_PGUP" or # Alt+PgDn Windows
cstr == "\x1bKEY_PPAGE"): # Alt+PgUp z/OS
_logger.debug("keyed Alt+PgUp")
session = ati.ati.session
sessions = ati.ati.sessions
pos = rexx.wordpos(session, sessions)
pos -= 1
if pos < 1:
pos = rexx.words(sessions)
session = rexx.word(sessions, pos)
ati.ati.session = session
if ati.ati.seslost:
return 12
self.rewrite = True
elif (cstr == "\x1b\x1b[6~" or # Alt+PgDn Putty
cstr == "\x1b[6;3~" or # Alt+PgDn Git Bash
cstr == "kNXT3" or # Alt_PgUp Windows->ssh
cstr == "ALT_PGDN" or # Alt+PgDn Windows
cstr == "\x1bKEY_NPAGE"): # Alt+PgDn z/OS
_logger.debug("keyed Alt+PgDn")
session = ati.ati.session
sessions = ati.ati.sessions
pos = rexx.wordpos(session, sessions)
pos += 1
if pos > rexx.words(sessions):
pos = 1
session = rexx.word(sessions, pos)
ati.ati.session = session
if ati.ati.seslost:
return 12
self.rewrite = True
elif cstr == "\x17": # Ctrl+W
_logger.debug("keyed Ctrl+W")
ati.drop("SESSION")
sessions = ati.ati.sessions
if not sessions:
return 12
session = ati.ati.session
ati.ati.session = session
if ati.ati.seslost:
return 12
self.rewrite = True
elif cstr == "\x0f": # Ctrl+O
_logger.debug("keyed Ctrl+O")
wstr = tns.word_at(tns.curadd)
import webbrowser
if wstr.startswith("http://"):
webbrowser.open_new_tab(wstr)
elif wstr.startswith("https://"):
webbrowser.open_new_tab(wstr)
else:
curses.flash()
curses.beep()
elif cstr == "KEY_MOUSE":
mouserr = False
try:
mid, mpx, mpy, mpz, mbstate = curses.getmouse()
except Exception:
mouserr = True
if ((not mouserr and
mpx >= self.twin_loc[0] and
mpy >= self.twin_loc[1] and
mpx < (self.twin_loc[0] + self.twin_end[0]) and
mpy < (self.twin_loc[1] + self.twin_end[1]))):
xpos = mpx - self.twin_loc[0]
ypos = mpy - self.twin_loc[1]
xpos += self.twin_beg[0]
ypos += self.twin_beg[1]
if ((mbstate &
curses.BUTTON1_DOUBLE_CLICKED) != 0):
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.set_cursor_position(ypos+1, xpos+1)
tns.enter()
self.rewrite_keylock = True
self.rewrite_cursor = True
elif (mbstate &
curses.BUTTON1_CLICKED) != 0:
addr = (ypos * tns.maxcol) + xpos
if tns.is_protected(addr):
wstr = tns.word_at(addr)
if ((wstr.startswith("http://") or
wstr.startswith("https://"))):
import webbrowser
webbrowser.open_new_tab(wstr)
else:
tns.set_cursor_position(ypos+1,
xpos+1)
self.rewrite_cursor = True
elif tns.is_pen_detectable(addr):
# right action ?
tns.set_cursor_position(ypos+1, xpos+1)
tns.enter()
self.rewrite_keylock = True
self.rewrite_cursor = True
else:
tns.set_cursor_position(ypos+1, xpos+1)
self.rewrite_cursor = True
elif (not mouserr and
mpx > self.__sessel_x and
mpy < len(self.__sessel)):
session = self.__sessel[mpy]
self.__sessel_y = mpy
if session:
oldses = ati.ati.session
if session == oldses:
session = None
if session:
ati.ati.session = session
if ati.ati.seslost:
return
self.rewrite = True
else:
if not mouserr:
_logger.debug("KEY_MOUSE: %r",
(mid, mpx, mpy, mpz, mbstate))
curses.flash()
elif cstr == "\x00":
pass # Windows key? handled by Windows
else:
_logger.warning("Unknown key: %r", cstr)
if tout == 0:
return 0 # timeout
if tout > 0:
tout = max(0, etime-time.time())
finally:
pass
def __refresh(self):
# put cursor in desired state before we may suspend
if self.__cur_curs_vis != self.__prog_curs_vis:
if self.__prog_curs_vis == 0:
self.stdscr.leaveok(True)
curses.curs_set(0)
else:
self.stdscr.leaveok(False)
curses.curs_set(self.__prog_curs_vis)
self.__cur_curs_vis = self.__prog_curs_vis
self.stdscr.refresh()
def __scale_size(self, maxrow, maxcol):
arows, acols = self.autosize
aspect1 = arows / acols
aspect2 = maxrow / maxcol
if aspect1 == aspect2:
srows = maxrow
scols = maxcol
elif aspect1 > aspect2:
srows = round(maxcol * aspect1)
scols = maxcol
else:
srows = maxrow
scols = round(maxrow / aspect1)
return srows, scols
def __session_check(self):
while 1:
seslost = ati.ati.seslost
if seslost:
self.__shell_mode()
exc_info = ati.ati.ses_exc
if exc_info:
msg = "".join(traceback.format_exception(*exc_info))
msg = msg.rstrip()
ati.say(msg)
self.do_say("SESLOST")
tns = ati.ati.get_tnz()
if not tns:
break
if not seslost and not tns.seslost:
break
session = ati.ati.session
ati.ati.session = session
def __set_colors(self):
curses.start_color()
curses.use_default_colors()
colors = curses.COLORS
color_pairs = curses.COLOR_PAIRS
self.colors = colors
_logger.debug("colors = %d", colors)
_logger.debug("color_pairs = %d", color_pairs)
# IBM PCOMM default colors:
# name r, g, b
# black 0, 0, 0 000000
# red 240, 24, 24 f01818
# green 36, 216, 48 24d830
# yellow 255, 255, 0 ffff00
# blue 120, 144, 240 7890f0
# pink 255, 0, 255 ff00ff
# turquoise 88, 240, 240 58f0f0
# white 255, 255, 255 ffffff
#
# Converting from 256 to 1000:
# name r, g, b
# black 0, 0, 0
# red 941, 94, 94
# green 141, 847, 188
# yellow 1000, 1000, 0
# blue 471, 565, 941
# pink 1000, 0, 1000
# turquoise 345, 941, 941
# white 1000, 1000, 1000
#
# curses defines the following:
# COLOR_BLACK
# COLOR_BLUE
# COLOR_CYAN
# COLOR_GREEN
# COLOR_MAGENTA
# COLOR_RED
# COLOR_WHITE
# COLOR_YELLOW
#
# approximate mapping from 3270 to curses:
# COLOR_BLACK black
# COLOR_BLUE blue
# COLOR_CYAN turquoise
# COLOR_GREEN green
# COLOR_MAGENTA pink
# COLOR_RED red
# COLOR_WHITE white
# COLOR_YELLOW yellow
#
# For 16 + 36*r + 6*g + b...
# Converting from 256 to 5:
# name r, g, b * **
# black 0, 0, 0 16 16
# red 4, 0, 0 160 196
# green 0, 4, 0 40 77
# yellow 5, 5, 0 226 226
# blue 2, 2, 4 104 111
# pink 5, 0, 5 201 201
# turquoise 1, 4, 4 80 123
# white 5, 5, 5 231 231
# * Using FLOOR function
# ** Using ROUND function
if colors >= 264:
black_rgb = (0, 0, 0)
red_rgb = (941, 94, 94)
green_rgb = (141, 847, 188)
yellow_rgb = (1000, 1000, 0)
blue_rgb = (471, 565, 941)
pink_rgb = (1000, 0, 1000)
turquoise_rgb = (345, 941, 941)
white_rgb = (1000, 1000, 1000)
color_start = 256
curses.init_color(color_start, *black_rgb)
curses.init_color(color_start + 1, *blue_rgb)
curses.init_color(color_start + 2, *red_rgb)
curses.init_color(color_start + 3, *pink_rgb)
curses.init_color(color_start + 4, *green_rgb)
curses.init_color(color_start + 5, *turquoise_rgb)
curses.init_color(color_start + 6, *yellow_rgb)
curses.init_color(color_start + 7, *white_rgb)
cv2color = {0: color_start, # default/black
241: color_start + 1, # blue
242: color_start + 2, # red
243: color_start + 3, # pink
244: color_start + 4, # green
245: color_start + 5, # turquoise
246: color_start + 6, # yellow
247: color_start + 7, # white
}
elif colors >= 256:
cv2color = {0: 16, # default/black
241: 104, # blue
242: 196, # red
243: 201, # pink
244: 40, # green
245: 80, # turquoise
246: 226, # yellow
247: 231, # white
}
else:
cv2color = {0: curses.COLOR_BLACK, # default/black
241: curses.COLOR_BLUE, # blue
242: curses.COLOR_RED, # red
243: curses.COLOR_MAGENTA, # pink
244: curses.COLOR_GREEN, # green
245: curses.COLOR_CYAN, # turquoise
246: curses.COLOR_YELLOW, # yellow
247: curses.COLOR_WHITE, # white
}
color_pair = 0
for fgid, fg_v in cv2color.items():
if fgid == 0:
fg_v = cv2color[244] # green default?
for bgid, bg_v in cv2color.items():
if fg_v == bg_v:
continue
color_pair += 1
curses.init_pair(color_pair, fg_v, bg_v)
attr = curses.color_pair(color_pair)
self.cv2attr[(fgid, bgid)] = attr
def __shell_mode(self, init=False):
"""Set the TTY to shell mode.
When init==True, curses will
be initialized if it is not
yet initialized... and
perform what is needed to
get the current terminal
size recognized.
"""
if init:
# need to make sure that
# terminal size is refreshed
# At least on z/OS, seem to
# have to call initscr to
# get that to happen.
if not self.stdscr:
pass
elif self.__tty_mode == 0:
self.stdscr = None
else:
self.__endwin()
self.stdscr = None
self.__prog_mode()
self.__endwin()
if self.__shell_mousemask is not None:
curses.mousemask(self.__shell_mousemask)
curses.flushinp()
self.__tty_mode = 0
self.__cur_curs_vis = 1 # endwin set
return
if self.__tty_mode == 0: # shell
return
if _osname != "Windows":
self.__stdout.write("\x1b[?2004l") # bracketed paste OFF
self.__stdout.flush()
self.__endwin()
if self.__shell_mousemask is not None:
curses.mousemask(self.__shell_mousemask)
curses.flushinp()
self.__cur_curs_vis = 1 # endwin set
self.__tty_mode = 0
# I have seen behavior where traceback.print_stack
# does not end up printing anything without doing the
# following flush.
self.__stdout.flush()
def __tty_read(self, win, timeout, refresh=None):
"""Read from tty
Considers session i/o while waiting for
input from tty.
returns a null string if something happened
with the current session
"""
if refresh or (timeout != 0 and refresh is None):
self.__refresh()
try:
tns = ati.ati.get_tnz()
k = None
win.timeout(0)
other_selected = False
self.__prep_wait()
if self.__stdin_selectr:
tout = None
else:
tout = 0.04 # effective keyboard response time
if timeout is not None and timeout >= 0:
etime = time.time() + timeout
if tout is None or timeout < tout:
tout = timeout
else:
etime = None
while True:
try:
k = win.getkey()
if k == "\x1b": # ESC
while True:
k += win.getkey()
# TODO stop loop if char says so
except curses.error:
pass
if k is not None:
return k
if other_selected:
# must be for wakeup_fd
return True
waitrv = self.__wait(tns, tout, zti=self)
if waitrv is True:
return ""
if waitrv is None:
# stdin or wakeup_fd
other_selected = True
if tout == 0:
return None
if etime is not None:
tout = min(tout, max(0, etime-time.time()))
except KeyboardInterrupt:
return "\x03" # Ctrl+c
def __tty_write(self, *argv, spos=0, epos=None):
"""Write to the tty (terminal)
Parameters similiar to addstr.
__tty_write(text[, attr])
__tty_write(y, x, text[, attr])
text may be a string or bytes
y is the zero-based row
x is the zero-based column
attr is the curses attributes
When y and x are not specified, text is
written at the current cursor position.
When attr is not specified, text is written
with the current attributes.
"""
self.__prog_mode()
if self.__cur_curs_vis != 0:
self.stdscr.leaveok(True)
curses.curs_set(0)
self.__cur_curs_vis = 0
if len(argv) <= 2:
instr = argv[0]
if len(argv) > 1:
attr = argv[1]
else:
attr = None
if spos == 0 and (epos is None or epos == len(instr)):
if attr is None:
self.stdscr.addstr(instr)
else:
self.stdscr.addstr(instr, attr)
elif epos is None:
if attr is None:
self.stdscr.addstr(instr[spos:])
else:
self.stdscr.addstr(instr[spos:], attr)
else:
if attr is None:
self.stdscr.addstr(instr[spos:epos])
else:
self.stdscr.addstr(instr[spos:epos], attr)
else: # len(argv) != 1
ypos = argv[0]
xpos = argv[1]
instr = argv[2]
if len(argv) > 3:
attr = argv[3]
else:
attr = None
rows, cols = self.stdscr.getmaxyx()
if ypos >= rows:
return
if xpos >= cols:
return
if epos is None:
width = len(instr) - spos
else:
width = epos - spos
if width > (cols - xpos): # tail of string too long
epos = spos + (cols - xpos) # shorten
insch = None
if ypos >= (rows - 1) and (xpos + width) >= (cols - 1):
# curses does not like use of bottom right
if epos is None:
epos = len(instr) - 1
else:
epos -= 1
insch = instr[epos:epos+1] # bottom-right char
if spos == 0 and (epos is None or epos == len(instr)):
if attr is None:
self.stdscr.addstr(ypos, xpos, instr)
else:
self.stdscr.addstr(ypos, xpos, instr, attr)
elif epos is None:
if attr is None:
self.stdscr.addstr(ypos, xpos, instr[spos:])
else:
self.stdscr.addstr(ypos, xpos, instr[spos:], attr)
elif attr is None:
self.stdscr.addstr(ypos, xpos, instr[spos:epos])
else:
self.stdscr.addstr(ypos, xpos, instr[spos:epos], attr)
if insch: # if skipped bottom-right char
# use insch to fill in bottom-right char
if attr is None:
self.stdscr.insch(insch)
else:
self.stdscr.insch(insch, attr)
def __update_prompt(self):
"""Set prompt according to SESSION.
"""
session = ati.ati.session
self.prompt = "Session: "+session+"> "
if self.__in_script:
self.prompt = "(paused) "+self.prompt
def __wait(self, tns, timeout=0, zti=None, key=None):
self.__prep_wait()
return tns.wait(timeout, zti=zti, key=key)
def __write_blanks(self, tns, saddr, eaddr):
"""call to write where field attributes are
"""
has_color = min(tns.colors, self.colors) >= 8
# Determine location on screen
xpos1, ypos1 = self.twin_beg
xpos2, ypos2 = self.twin_end
xpos, ypos = self.twin_loc
if has_color:
attr = self.cv2attr[(0, 0)]
else:
attr = curses.A_NORMAL
for rsp, rep in tns.iterow(saddr, eaddr):
absy = rsp // tns.maxcol
if absy < ypos1 or absy >= ypos2:
continue
gsx = rsp % tns.maxcol
gex = (rep-1) % tns.maxcol
if gex < xpos1 or gsx >= xpos2:
continue
# Trim displayed characters, if needed
if gsx < xpos1:
rsp += xpos1 - gsx
gsx = xpos2
if gex >= xpos2:
rep -= gex - xpos2 + 1
gsx += xpos
absy += ypos
clen = rep - rsp
blanks = " " * clen
self.__tty_write(absy, gsx, blanks, attr)
def __write_group(self, tns, faddr, caddr1, endpos):
"""Write group of field characters.
The group of characters all have the
same attributes and they are on a single
line/row.
"""
has_color = min(tns.colors, self.colors) >= 8
# Determine location on screen
xpos1, ypos1 = self.twin_beg
xpos2, ypos2 = self.twin_end
xpos, ypos = self.twin_loc
absy = caddr1 // tns.maxcol
if absy < ypos1 or absy >= ypos2:
return
gsx = caddr1 % tns.maxcol
gex = (endpos - 1) % tns.maxcol
if gex < xpos1 or gsx >= xpos2:
return
# Trim displayed characters, if needed
if gsx < xpos1:
caddr1 += xpos1 - gsx
gsx = xpos2
if gex >= xpos2:
endpos -= gex - xpos2 + 1
gsx += xpos
absy += ypos
# Determine attributes - get field attributes
fattr = tns.plane_fa[faddr]
f_display = tns.is_displayable_attr(fattr)
if not f_display:
clen = endpos - caddr1
blanks = " " * clen
if has_color:
attr = self.cv2attr[(0, 0)]
else:
attr = curses.A_INVIS # ?
self.__tty_write(absy, gsx, blanks, attr)
return
f_protected = tns.is_protected_attr(fattr)
f_intensified = tns.is_intensified_attr(fattr)
f_normal = tns.is_normal_attr(fattr)
f_eh = tns.plane_eh[faddr]
f_fg = tns.plane_fg[faddr]
f_bg = tns.plane_bg[faddr]
# Determine attributes - get character attributes
c_eh = tns.plane_eh[caddr1]
c_fg = tns.plane_fg[caddr1]
c_bg = tns.plane_bg[caddr1]
# Determine attributes - combine attributes
if c_eh:
aeh = c_eh
else:
aeh = f_eh
if c_fg:
afg = c_fg
else:
afg = f_fg
if c_bg:
abg = c_bg
else:
abg = f_bg
attr = 0
if aeh == 0: # Default
pass
elif aeh == 240: # xF0 Normal
attr |= curses.A_NORMAL
elif aeh == 241: # xF1 Blink
attr |= curses.A_BLINK
elif aeh == 242: # xF2 Reverse Video
attr |= curses.A_REVERSE
elif aeh == 244: # xF4 Underscore
attr |= curses.A_UNDERLINE
if afg == 0:
if f_intensified and not f_protected:
if tns.extended_color_mode():
afg = 247 # F7 White
else:
afg = 242 # F2 Red
elif f_normal and f_protected:
if tns.extended_color_mode():
afg = 244 # F4 Green
else:
afg = 245 # F5 Turquoise
elif f_intensified and f_protected:
afg = 247 # F7 White
if afg == 0:
afg = 244 # F4 Green
if has_color:
attr ^= self.cv2attr[(afg, abg)]
# Write to screen
if endpos == tns.buffer_size:
text = tns.scrstr(caddr1, 0)
else:
text = tns.scrstr(caddr1, endpos)
self.__tty_write(absy, gsx, text, attr)
# Readonly properties
@property
def prog_mode(self):
"""Read-only boolean. True for prog. False for shell.
"""
return self.__tty_mode != 0
class Download():
"""
A Download instance represents a file downloaded from
the remote system.
"""
def __del__(self):
self.remove()
def __init__(self, zti, tns):
"""
Create a Download instance associated with the input
Zti instance and Tnz instance.
"""
# delete=False is really for Windows
# in Windows, cannot do much with a file
# until it is closed. So, does not help
# to delete it when it is closed.
if not zti.registered_atexit:
zti.registered_atexit = True
atexit.register(zti.atexit)
self.start_time = time.time()
self.end_time = None
self.bytecnt = 0
self.name = tns.name
self.failed = False
self.closed = False
self.removed = False
self.lastcmd = tns.lastcmd
self.downloadaction = None
self.actiontaken = False
self.zti = zti
self.tns = tns
self.file = tempfile.NamedTemporaryFile(delete=False)
zti.downloads.append(self)
lastcmd = ""
lastcmd = self.lastcmd
if (((lastcmd.lower().startswith(".ztiv ") or
lastcmd.lower().startswith(".ztie ")) and
rexx.words(lastcmd) > 1)):
self.downloadaction = " ".join([
rexx.subword(lastcmd, 2), self.file.name])
# Methods
def close(self):
if self.end_time is None:
self.end_time = time.time()
if not self.closed:
self.closed = True
self.tns = None
self.file.close()
zti = self.zti
if zti and not zti.downloadaction:
if self.downloadaction:
zti.downloadaction = True
def error(self):
self.failed = True
self.close()
def flush(self):
self.file.flush()
def read(self, length):
return self.file.read(length)
def remove(self):
self.close()
if not self.removed:
try:
os.remove(self.file.name)
except PermissionError:
traceback.print_exc()
except FileNotFoundError:
self.removed = True
else:
self.removed = True
if self.removed and self.zti:
zti = self.zti
self.zti = None
try:
zti.downloads.remove(self)
except ValueError:
pass
def start_upload(self):
self.file = open(self.file.name, "rb")
def write(self, data):
self.file.write(data)
self.bytecnt += len(data)
# Private methods
def __repr__(self):
if self.failed:
stat = "Failed"
elif self.closed:
stat = ""
elif self.tns.seslost:
self.failed = True
self.close()
stat = "Failed"
else:
stat = "Open"
if self.downloadaction:
actcmd = self.downloadaction
else:
actcmd = self.lastcmd
if self.actiontaken:
actdone = "+"
else:
actdone = "_"
if self.end_time and not stat:
total_time = self.end_time - self.start_time
rate = self.bytecnt / total_time
if rate < 1024:
stat = f"({rate} B/s)"
elif rate < 10*1024*1024:
rate //= 1024
stat = f"({rate} KB/s)"
else:
rate //= 1024*1024
stat = f"({rate} MB/s)"
mtime = time.ctime(os.path.getmtime(self.file.name))
return " ".join((str(mtime),
self.file.name,
self.name,
actdone,
actcmd,
stat))
# Internal data and other attributes
_zti = None
_stdscr = None
class _ZtiAbort(BaseException):
pass
# Functions
def create():
"""Create zti instance.
"""
if Zti._zti:
return Zti._zti
try:
Zti._stdscr = curses.initscr()
except curses.error:
_logger.exception("initscr error")
return None
try:
curses.cbreak()
except curses.error:
_logger.exception("cbreak error")
return None
finally:
curses.endwin()
return Zti()
def main():
"""Process zti command.
"""
from argparse import ArgumentParser
parser = ArgumentParser(
prog="zti",
description="Z Terminal Interface",
add_help=True)
if __version__:
parser.add_argument("--version",
action="version",
version=f"%(prog)s {__version__}")
parser.add_argument("--nolog",
action="store_true",
help="Do not set LOGDEST to zti.log")
rcgroup = parser.add_mutually_exclusive_group()
rcgroup.add_argument("--noztirc",
action="store_true",
help="Do not SOURCE .ztirc in home directory")
rcgroup.add_argument("--rcfile",
metavar="rcfile",
default="~/.ztirc",
help="Filename to run using SOURCE")
parser.add_argument("host", nargs="?",
help="hostname[:port] to connect/go to")
args = parser.parse_args()
if not args.nolog:
ati.ati.logdest = "zti.log"
zti = Zti()
ati.ati.maxlostwarn = 0
if not args.noztirc:
try:
zti.do_source(args.rcfile)
except Exception:
pass
if args.host:
zti.cmdqueue.append(" ".join(("goto", args.host)))
zti.single_session = True
if args.rcfile != "~/.ztirc":
zti.single_session = True
intro = """
Welcome to the Z terminal interface!
Enter the GOTO hostname command to get started and
use the Esc key to get back to this command prompt.
Use the HELP and HELP KEYS commands for more information.
"""
if zti.plugins:
intro = f"{intro}Installed plugins: {zti.plugins}\n"
else:
intro = f"{intro}No plugins installed.\n"
zti.cmdloop(intro)
# Private data
_WAIT_NONE = 0
_WAIT_KEYLOCK = 1
_WAIT_SCREEN = 2
_WAIT_FOREVER = 3
_WAIT_GOTO = 4
_WAIT_DSR = 5
_WAIT_MORE = 6
_WAIT_HOLDING = 7
_WAIT_NOT_MORE = 8
_WAIT_NOT_HOLDING = 9
_osname = platform.system()
_logger = logging.getLogger("tnz.zti")
|
# This program prints and counts how many unique words are in the romeo.txt file
txt = "c:/Users/TechFast Australia/Dropbox/Programming/Python/List Exercises/romeo.txt"
uniqueWords = [] # List created to store unique words
f = open(txt, "r")
for x in f: # Loop through romeo.txt
splitWords = x.split() # Split each word in romeo.txt
for y in splitWords: # Loop created to check each word
if y not in uniqueWords: # Check if word is not in list
uniqueWords.append(y) # If the word isn't in the list it gets added
print("There are " + str(len(uniqueWords)) + " unique words used in this file.")
print(uniqueWords)
f.close() |
"""团队习题集实现
Revision ID: 22ed937444ea
Revises: aacb30cf5f40
Create Date: 2020-10-27 16:49:50.311575
"""
import ormtypes
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '22ed937444ea'
down_revision = 'aacb30cf5f40'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('team', sa.Column('team_problemsets', ormtypes.json_pickle.JsonPickle(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('team', 'team_problemsets')
# ### end Alembic commands ###
|
import requests
import json
class AlreadySent(Exception):
pass
class Query:
def __init__(self, uri="http://localhost:3000", node=None):
if node:
self.node = node
else:
self.node = Node(uri)
self._find_clause = None
self._where_clauses = []
self._values = None
self.error = None
def find(self, clause):
if self._values:
raise AlreadySent()
self._find_clause = clause
return self
def where(self, clause):
if self._values:
raise AlreadySent()
self._where_clauses.append(clause)
return self
def values(self):
if not self._where_clauses:
raise Exception("No Where Clause")
_query = """
{
:find [%s]
:where [
[%s]
]
}
""" % (self._find_clause, ']\n['.join(self._where_clauses))
result = self.node.query(_query)
if type(result) is dict:
self.error = json.dumps(result, indent=4, sort_keys=True)
return []
return result
def __iter__(self):
self._values = self.values()
return self
def __next__(self):
if len(self._values):
return self._values.pop()
else:
raise StopIteration
class Node:
def __init__(self, uri="http://localhost:3000"):
self.uri = uri
def find(self, find_clause):
return Query(node=self).find(find_clause)
def call_rest_api(self, action, params):
endpoint = "{}/_xtdb/{}".format(self.uri, action)
headers = {
"Content-Type": "application/json; charset=utf-8",
"Accept": "application/json",
}
rsp = requests.get(endpoint, headers=headers, params=params)
return rsp.json()
def submitTx(self, tx):
action = "submit-tx"
endpoint = "{}/_xtdb/{}".format(self.uri, action)
headers = {
"Content-Type": "application/json; charset=utf-8",
"Accept": "application/json",
}
rsp = requests.post(endpoint, headers=headers, json=tx)
return rsp.json()
def put(self, rec):
transaction = {"tx-ops": [["put", rec]]}
return self.submitTx(transaction)
def delete(self, where):
transaction = {"tx-ops": [["delete", where]]}
return self.submitTx(transaction)
def evict(self, id):
transaction = {"tx-ops": [["evict", id]]}
return self.submitTx(transaction)
def query(self, query):
query=query.strip()
# basic syntax check
assert query.startswith("{") and query.endswith("}")
action = "query"
endpoint = "{}/_xtdb/{}".format(self.uri, action)
headers = {"Accept": "application/json", "Content-Type": "application/edn"}
query = "{:query %s}" % query
rsp = requests.post(endpoint, headers=headers, data=query)
return rsp.json()
# -- TODO: explicit optional params for these, as named Python kwargs.
def status(self, params):
action = "status"
return self.call_rest_api(action, params)
def entity(self, params):
action = "entity"
return self.call_rest_api(action, params)
def entityHistoryTrue(self, params):
action = "entity?history=true"
return self.call_rest_api(action, params)
def entityTx(self, params):
action = "entity-tx"
return self.call_rest_api(action, params)
def attributeStats(self, params):
action = "attribute-stats"
return self.call_rest_api(action, params)
def sync(self, params):
action = "sync"
return self.call_rest_api(action, params)
def awaitTx(self, params):
action = "await-tx"
return self.call_rest_api(action, params)
def awaitTxTime(self, params):
action = "await-tx-time"
return self.call_rest_api(action, params)
def txLog(self, params):
action = "tx-log"
return self.call_rest_api(action, params)
def txCommitted(self, params):
action = "tx-committed"
return self.call_rest_api(action, params)
def latestCompletedTx(self, params):
action = "latest-completed-tx"
return self.call_rest_api(action, params)
def latestSubmittedTx(self, params):
action = "latest-submitted-tx"
return self.call_rest_api(action, params)
def activeQueries(self, params):
action = "active-queries"
return self.call_rest_api(action, params)
def recentQueries(self, params):
action = "recent-queries"
return self.call_rest_api(action, params)
def slowestQueries(self, params):
action = "slowest-queries"
return self.call_rest_api(action, params)
|
# import sys
# import json
import time
from User import User
from SerialTransceiver import SerialTransceiver
import _thread
if __name__ == "__main__":
# userinfo = getinfo()
# user = User(int(userinfo['instanceID'],0),userinfo['port'])
user = User() #获取用户信息
#user.print()
serialTransceiver = SerialTransceiver(user)
_thread.start_new_thread(serialTransceiver.recieve,())
while True:
#time.sleep(1)
serialTransceiver.sendMsg()
# usermsg = input(f'0x0{user.instanceID}:')
# ser.write(f'0x0{user.instanceID} {time.asctime( time.localtime(time.time()))}: {usermsg}\n'.encode('utf8'))
# # Wait until there is data waiting in the serial buffer
# if ser.in_waiting > 0:
# # Read data out of the buffer until a carraige return / new line is found
# serialString = ser.readline()
# # Print the contents of the serial data
# try:
# print(serialString.decode("utf8"))
# except:
# pass
|
#!/usr/bin/env python3
import subprocess
import os
from shutil import copy
import sys
import shutil
import time
from itertools import cycle
import logging
from logging import info, error
from typing import NamedTuple, List, Sequence
import json
from src.file_io import File
from src.liberty_parser import LibertyParser, LibertyResult
from src.netlist import Netlist
from src.cirkopt_json import ObjectEncoder
LIBERATE_DEFAULT_CMD: str = "liberate"
LiberateCompletedProcess = NamedTuple(
"LiberateCompletedProcess",
[("args", List[str]), ("returncode", int), ("stdout", str)],
)
def _waiting_animation(complete_condition, refresh_rate_Hz: int = 10) -> None:
if logging.getLogger().getEffectiveLevel() > logging.INFO:
# Don't print anything
while complete_condition() is None:
# Wait for completion
pass
return
# Copy of [1]
# [1] https://stackoverflow.com/questions/22029562/python-how-to-make-simple-animated-loading-while-process-is-running
for c in cycle(["|", "/", "-", "\\"]):
if complete_condition() is not None:
break
loading_msg = "Running Liberate..." + c
sys.stdout.write("\r" + loading_msg)
sys.stdout.flush()
time.sleep(1 / refresh_rate_Hz)
# Clear text then print message
sys.stdout.write("\r" + " " * len(loading_msg) + "\r")
info("Liberate completed.")
def _run_liberate(
candidates: Sequence[Netlist],
tcl_script: str,
liberate_dir: str,
netlist_dir: str,
liberate_log: str,
out_dir: str,
ldb_name: str,
liberate_cmd: str = LIBERATE_DEFAULT_CMD,
) -> LiberateCompletedProcess:
"""Run Cadence Liberate
characterizes SPICE (.sp) files and generate Liberty library (.lib or .ldb)
"""
# TODO: Run setup script before
if not os.path.isfile(tcl_script):
raise FileNotFoundError(tcl_script)
if not os.path.isdir(liberate_dir):
raise NotADirectoryError(liberate_dir)
if shutil.which(liberate_cmd) is None:
error(
f"'{liberate_cmd}' does not appear to be an executable, "
+ "did you forget to source the setup script?"
)
sys.exit()
if not os.path.isdir(out_dir):
info(f"Creating output directory {out_dir}")
os.mkdir(out_dir)
if not os.path.isdir(netlist_dir):
info(f"Creating netlist working directory {netlist_dir}")
os.mkdir(netlist_dir)
for netlist in candidates:
netlist_file = File(os.path.join(netlist_dir, netlist.cell_name + ".sp"))
netlist.persist(netlist_file)
cell_names = ",".join(tuple(netlist.cell_name for netlist in candidates))
info("Running liberate.")
with open(
file=liberate_log,
mode="w",
) as log_file: # TODO: use MockFile interface
r: subprocess.Popen = subprocess.Popen(
args=[liberate_cmd, tcl_script],
stderr=subprocess.STDOUT,
stdout=log_file,
text=True,
env={
**os.environ,
"NETLIST_DIR": netlist_dir,
"OUT_DIR": out_dir,
"CELL_NAMES": cell_names,
"LDB_NAME": ldb_name,
"LIBERATE_DIR": liberate_dir,
},
)
_waiting_animation(complete_condition=r.poll)
# Convert to CompletedProcess so we can check the return code
results: subprocess.CompletedProcess = subprocess.CompletedProcess(
args=r.args,
returncode=r.returncode,
stdout=r.stdout,
stderr=r.stderr,
)
results.check_returncode()
return LiberateCompletedProcess(
args=results.args, returncode=results.returncode, stdout=results.stdout
)
def liberate_simulator(
candidates: Sequence[Netlist],
iteration: int,
tcl_script: str,
liberate_dir: str,
out_dir: str,
) -> LibertyResult:
run_folder = os.path.join(out_dir, "iteration-" + str(iteration))
if not os.path.isdir(run_folder):
info(f"Creating output directory {run_folder}")
os.mkdir(run_folder)
# Serialize candidates
json_file: File = File(os.path.join(run_folder, "candidates.json"))
candidates_json: str = json.dumps(candidates, indent=4, cls=ObjectEncoder)
json_file.write(candidates_json)
netlist_dir = os.path.join(run_folder, "netlist")
# Place current log in same place to allow `tail -f`
liberate_log_working = os.path.join(out_dir, "liberate.log")
ldb_name = "CIRKOPT"
# Run simulations
_run_liberate(
candidates=candidates,
tcl_script=tcl_script,
liberate_dir=liberate_dir,
netlist_dir=netlist_dir,
liberate_log=liberate_log_working,
out_dir=run_folder,
ldb_name=ldb_name,
)
# Keep copy so to prevent overwritting
copy(liberate_log_working, run_folder)
# Parse results
ldb_path = os.path.join(run_folder, "lib", ldb_name + ".lib")
ldb_file = File(ldb_path)
parser = LibertyParser()
return parser.parse(ldb_file)
|
#import requests
import geopandas as gpd
import pandas as pd
from .utils_geo import generate_tiles, geometry_to_gdf, generate_manifest, generate_folium_choropleth_map, get_html_iframe
from shapely.geometry import shape, MultiPolygon, Polygon
from .settings import DEFAULT_CRS, DEFAULT_COORDS
#save manifest as wkt to reduce size of manifest
class CollectionOsm:
"""
This is the main CollectionOsm class. This collection class will produce a tile manifest at an especific zoom level
to keep track of the OSM retrieving process.
Parameters
----------
geometry: shapely.geometry.Polygon or shapely.geometry.MultiPolygon
geographic boundaries to fetch geometries within
if None, default coordinates will be set to [(-180,-85), (180, -85), (180, 85), (-180, 85), (-180, -85)]
zoom: int
zoom levels to generate the tiles
crs: str
the starting CRS of the passed-in geometry. if None, it will be set to "EPSG:4326"
geom_tiles: bool
if True the manifest will be generated for the tile geometry. False will provide the
manifest for the input geometry.
Returns
----------
manifest: geopandas.GeoDataFrame
manifest geodataframe.
"""
def __init__(self, geometry=None, zoom=5, crs=None, geom_tiles=True):
self.zoom = zoom
self.crs = crs or DEFAULT_CRS
self.tiles = None
self.geometry = geometry or Polygon(DEFAULT_COORDS)
self.geom_tiles = geom_tiles
#generate geometry gdf
self.geometry_gdf = self.get_geom_gdf()
self.tiles_gdf = self.get_tiles_gdf()
#generate manifest
self.manifest = self.get_manifest()
self.vizzualise = self.get_choropleth_map()
#methods
def _repr_html_(self):
return get_html_iframe(self.vizzualise)
def get_tiles_gdf(self):
"""
Generate tiles for a determined zoom level.
Parameters
---------
crs: string
coordinate system for the output tiles
zoom: int
zoom level for the generation of tiles
Returns
--------
gdf: geopandas.GeoDataFrame
"""
gdf = generate_tiles(crs=self.crs, zoom=self.zoom)
return gdf
def get_geom_gdf(self):
"""
Parse input geometry to geopandas.GeoDataFrame
Parameters
-----------
geometry: shapely.geometry.Polygon or shapely.geometry.Multipolygon
geometry to parse into a geopandas.GeoDataFrame
crs: string
coordinate system for the output file
Returns
-------
gdf: geopandas.GeoDataFrame
"""
gdf = geometry_to_gdf(geometry=self.geometry, crs=self.crs)
return gdf
def get_manifest(self):
"""
Generates geopandas.GeoDataFrame for tracking the osm retrieving process
Parameters
----------
geometry: geopandas.GeoDataFrame
geometry parsed in a geopandas.GeoDataFrame
tiles: geopandas.GeoDataFrame
tiles parsed in a geopandas.GeoDataFrame
geom_tiles: bool
if True the manifest will be generated for the tile geometry. False will provide the
manifest for the input geometry.
Returns
--------
manifest: geopandas.GeoDataFrame
"""
manifest = generate_manifest(geometry=self.geometry_gdf, tiles=self.tiles_gdf, geom_tiles=self.geom_tiles)
return manifest
def get_choropleth_map(self):
folium_map = generate_folium_choropleth_map(gdf=self.manifest)
return folium_map
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 27 11:20:21 2020
@author: Shivani
"""
#1.PRE-PROCESSING
#open file containinng genome
MT=open("D:\MEng\SPRING\INTRO BIOINFOR\MTG.txt", "r")
contents=MT.read() #store contents of file in an object
forw=[]
for i in range(0, len(contents)):
if contents[i]=='A':
forw.append('A')
elif contents[i]=='T':
forw.append('T')
elif contents[i]=='G':
forw.append('G')
elif contents[i]=='C':
forw.append('C')
forward= ''.join([str(elem) for elem in forw])
reverse=contents[::-1]
comp=[]
#get complementary strand
for i in range(0, len(reverse)):
if reverse[i]=='A':
comp.append('T')
elif reverse[i]=='T':
comp.append('A')
elif reverse[i]=='G':
comp.append('C')
elif reverse[i]=='C':
comp.append('G')
rev = ''.join([str(elem) for elem in comp])
#-----------------------------------------------------------------------------
#initialize objects
#arrays to store ORFs that store positions of start and stop codons
ORF1start=[]
ORF1stop=[]
ORF2start=[]
ORF2stop=[]
ORF3start=[]
ORF3stop=[]
ORF1Cstart=[]
ORF1Cstop=[]
#arrays to organize start and stop codons in their respective reading frames
RF1start=[]
RF2start=[]
RF3start=[]
RF1stop=[]
RF2stop=[]
RF3stop=[]
RF1Cstart=[]
RF2Cstart=[]
RF3Cstart=[]
RF1Cstop=[]
RF2Cstop=[]
RF3Cstop=[]
#----------------------------------------------------------------------------
#2. FIND START AND STOP CODONS
#finding positions of start codons
startFor = [i for i in range(len(forward)) if forward.startswith("ATG", i) | forward.startswith("ATA", i)]
startRev= [i for i in range(len(rev)) if rev.startswith("ATG", i) | rev.startswith("ATA", i)]
#finding positions of stop codons
stopFor = [i for i in range(len(forward)) if forward.startswith("TAA", i) | forward.startswith("TAG", i)| forward.startswith("AGA", i)| forward.startswith("AGG", i)]
stopRev= [i for i in range(len(rev)) if rev.startswith("TAA", i) | rev.startswith("TAG", i)| rev.startswith("AGA", i)| rev.startswith("AGG", i)]
#--------------------------------------------------------------------------------
#3. ASSIGN START AND STOP CODONS TO READING FRAMES:
#assigning startand stop codons to their respective reading frame
def findRF(startFor,stopFor, startRev, stopRev):
for i in startFor:
if i%3==0:
RF1start.append(i)
elif i%3==1:
RF2start.append(i)
elif i%3==2:
RF3start.append(i)
#assigning stop codons to their respective reading frame
for i in stopFor:
if i%3==0:
RF1stop.append(i)
elif i%3==1:
RF2stop.append(i)
elif i%3==2:
RF3stop.append(i)
for i in startRev:
if i%3==0:
RF1Cstart.append(i)
elif i%3==1:
RF2Cstart.append(i)
elif i%3==2:
RF3Cstart.append(i)
#assigning stop codons to their respective reading frame
for i in stopRev:
if i%3==0:
RF1Cstop.append(i)
elif i%3==1:
RF2Cstop.append(i)
elif i%3==2:
RF3Cstop.append(i)
#-----------------------------------------------------------------------------
#4. FIND ORFS IN ALL READING FRAMES
#call functions for all reading frames
def getORF(startframe,stopframe, ORFstart, ORFstop):
i=0
j=0
while(i<len(startframe) and j<len(stopframe)):
if startframe[i]>stopframe[j] :
j+=1
elif startframe[i]<stopframe[j] :
ORFstart.append(startframe[i])
ORFstop.append(stopframe[j])
i+=1
#-----------------------------------------------------------------------------
#5. GET NON-OVERLAPPING ORFs
def getnonoverlapORFs(ORFstop, ORFstart):
ORFstopn = list(set(ORFstop))
ORFstopn.sort()
j=0
for i in ORFstopn:
latest = ORFstart[j]
while(ORFstart[j] <= i):
ORFstart[j] = latest
if(j<len(ORFstart)-1):
j +=1
else:
break
ORFstartn = list(set(ORFstart))
ORFstartn.sort()
return (ORFstartn,ORFstopn)
#-----------------------------------------------------------------------------
#6. FILTERING ORFS BASED ON LENGTH AND GC CONTENT
def filterORFs(strand, ORFstartn,ORFstopn):
for i, j in zip(ORFstartn,ORFstopn):
if (j+3-i)>200:
GC=strand[i:j+3]
Ccont=GC.count('C')
Gcont=GC.count('G')
GCcontent=((Ccont+Gcont)/len(GC))*100
if(GCcontent>30):
print("PUTATIVE GENE START (bp):"+ str(i))
print("PUTATIVE GENE END (bp):"+ str(j+3))
print("Length="+ str(j+3-i))
print("% GC Content="+ str(GCcontent))
print(strand[i:j+3])
print("---------------xxxxxx------------------")
#-----------------------------------------------------------------------------
#7. FUNCTION CALLS FOR READING FRAMES
# a.FORWARD STRAND
findRF(startFor, stopFor,startRev, stopRev)
print("READING FRAME +1")
getORF(RF1start, RF1stop, ORF1start,ORF1stop)
ORF1startn,ORF1stopn=getnonoverlapORFs(ORF1stop,ORF1start)
filterORFs(forward, ORF1startn, ORF1stopn)
print("READING FRAME +2")
getORF(RF2start, RF2stop, ORF2start,ORF2stop)
ORF2startn,ORF2stopn=getnonoverlapORFs(ORF2stop,ORF2start)
filterORFs(forward, ORF2startn, ORF2stopn)
print("READING FRAME +3")
getORF(RF3start, RF3stop, ORF3start,ORF3stop)
ORF3startn,ORF3stopn=getnonoverlapORFs(ORF3stop,ORF3start)
filterORFs(forward, ORF3startn, ORF3stopn)
#b.COMPLEMENTARY STRAND
findRF(startFor, stopFor,startRev, stopRev)
print("READING FRAME -1")
getORF(RF1Cstart, RF1Cstop, ORF1Cstart,ORF1Cstop)
ORFstartn,ORFstopn=getnonoverlapORFs(ORF1Cstop,ORF1Cstart)
filterORFs(rev, ORFstartn, ORFstopn)
#-----------------------------------------------------------------------------
#8.ACCURACY OF PREDICTION
total=0
#Actual values computed as a sum of length + Percent GC content for each of the 13 genes
Actual=[1003.7,1084.99,1588.24,730.2,246.61,725.2,830.56,386.17,340.1,1422.27,1856.92,567.67,1187.28]
#Predicted/Epected values computed as a sum of length + Percent GC content for each of the 13 genes
Predicted=[1004.64,981.35, 1588.23,730.19 ,246.61 , 725.19, 896.83,747.83 ,343.46 , 1468.78, 1865.75,570.42 ,1206.99 ]
#assign score based on difference between actual and predicted value, more the difference, less the score
for i, j in zip(Actual,Predicted):
if abs(i-j)<5:
score=4
elif abs(i-j)<10 and abs(i-j)>=5:
score=3
elif abs(i-j)>15 and abs(i-j)<=20:
score=2
else:
score=1
total+=score
#compute accuracy
print("Score earned (out of "+ str(4*len(Actual))+ ")="+ str(total))
print("Accuracy(in %)=Score earned/Total score=")
print(total/(4*len(Actual))*100)
#------------------------------------------------------------------------------
#close file
MT.close()
|
import unittest
from io import StringIO
from unittest import mock
from scrapy.utils import trackref
class Foo(trackref.object_ref):
pass
class Bar(trackref.object_ref):
pass
class TrackrefTestCase(unittest.TestCase):
def setUp(self):
trackref.live_refs.clear()
def test_format_live_refs(self):
o1 = Foo() # NOQA
o2 = Bar() # NOQA
o3 = Foo() # NOQA
self.assertEqual(
trackref.format_live_refs(),
'''\
Live References
Bar 1 oldest: 0s ago
Foo 2 oldest: 0s ago
''')
self.assertEqual(
trackref.format_live_refs(ignore=Foo),
'''\
Live References
Bar 1 oldest: 0s ago
''')
@mock.patch('sys.stdout', new_callable=StringIO)
def test_print_live_refs_empty(self, stdout):
trackref.print_live_refs()
self.assertEqual(stdout.getvalue(), 'Live References\n\n\n')
@mock.patch('sys.stdout', new_callable=StringIO)
def test_print_live_refs_with_objects(self, stdout):
o1 = Foo() # NOQA
trackref.print_live_refs()
self.assertEqual(stdout.getvalue(), '''\
Live References
Foo 1 oldest: 0s ago\n\n''')
def test_get_oldest(self):
o1 = Foo() # NOQA
o2 = Bar() # NOQA
o3 = Foo() # NOQA
self.assertIs(trackref.get_oldest('Foo'), o1)
self.assertIs(trackref.get_oldest('Bar'), o2)
self.assertIsNone(trackref.get_oldest('XXX'))
def test_iter_all(self):
o1 = Foo() # NOQA
o2 = Bar() # NOQA
o3 = Foo() # NOQA
self.assertEqual(
set(trackref.iter_all('Foo')),
{o1, o3},
)
|
from .models import Profile, User, QA
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from .utils import crypt, get_key, create_user_key
@receiver(post_save, sender=User)
def create_profile(sender, instance,created,**kwargs ):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_profile(sender, instance,**kwargs ):
instance.profile.save()
@receiver(pre_save, sender=QA)
def pre_save_qa(sender, instance, *args, **kwargs):
instance.question = instance.question.lower()
instance.answer = instance.answer.lower()
@receiver(post_save, sender=Profile)
def post_save_profile(sender, instance, *args, **kwargs):
print('IIIII')
if not instance.user_key:
instance.user_key = crypt(get_key('TODOAPP_KEY'),message=create_user_key(instance))
instance.save()
# def post_save_user(sender, instance, *args, **kwargs):
# try:
# instance.profile
# if not instance.profile:
# print(instance)
# instance.profile = Profile(user=instance)
# instance.save()
# post_save.connect(post_save_user, User) |
# Copyright 2015 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyghmi.ipmi.oem.lenovo import inventory
cpu_fields = (
inventory.EntryField("index", "B"),
inventory.EntryField("Cores", "B"),
inventory.EntryField("Threads", "B"),
inventory.EntryField("Manufacturer", "13s"),
inventory.EntryField("Family", "30s"),
inventory.EntryField("Model", "30s"),
inventory.EntryField("Stepping", "3s"),
inventory.EntryField("Maximum Frequency", "<I",
valuefunc=lambda v: str(v) + " MHz"),
inventory.EntryField("Reserved", "h", include=False))
def parse_cpu_info(raw):
return inventory.parse_inventory_category_entry(raw, cpu_fields)
def get_categories():
return {
"cpu": {
"idstr": "CPU {0}",
"parser": parse_cpu_info,
"command": {
"netfn": 0x06,
"command": 0x59,
"data": (0x00, 0xc1, 0x01, 0x00)
}
}
}
|
"""
File for structure checks:
- Duplicates
- Physical Structure
- Molecule in structure checks
"""
import os,json
import numpy as np
from pymove import Structure,StructDict
from pymove.io import read,write
from pymatgen.analysis.structure_matcher import (StructureMatcher,
ElementComparator,
SpeciesComparator,
FrameworkComparator)
from pymove.libmpi.base import _JobParallel
class pymatgen_compare():
def __init__(self, pymatgen_kw=
{
"ltol": 0.2,
"stol": 0.3,
"angle_tol": 5,
"primitive_cell": True,
"scale": True,
"attempt_supercell": False
}):
self.kw = pymatgen_kw
def __call__(self, struct1, struct2):
sm = StructureMatcher(
**self.kw,
comparator=SpeciesComparator())
pstruct1 = struct1.get_pymatgen_structure()
pstruct2 = struct2.get_pymatgen_structure()
return sm.fit(pstruct1, pstruct2)
class DuplicateCheck():
"""
Checks if there are duplicate structures in an entire structure dictionary.
Duplicaate check may be ran in a serial or parallel way using MPI. This is
automatically detected and handled by the program.
Arguments
---------
struct_dict: StructDict
Dictionary containing all structures that should be checked for
duplicates.
mode: str
Mode of operation. Can be one of pair or complete. Pair will do the
minimal number of comparisons (n chose 2). Complete will always compare
each structure to every other structure in the struct_dict.
compare_fn: callable
Callable that performs the comparison. This function can be arbitrary,
but it must take in two structures as an argument and return True if
the structures are duplicates, or False if they are not. See the
pymatgen_compare class for an example of what this might look like.
"""
def __init__(self,
struct_dict={},
mode="pair",
compare_fn=pymatgen_compare()):
self.struct_dict = struct_dict
self.compare_fn = compare_fn
self._check_mode(mode)
self.duplicates_dict = {}
if len(struct_dict) != 0:
self._comparisons()
## Instantial JobParallel if DuplicateCheck called with MPI
self.jp = _JobParallel()
def _check_mode(self, mode):
self.modes = ["pair", "complete"]
if mode not in self.modes:
raise Exception("DuplicateCheck mode {} is not available. "
.format(mode) +
"Please use one of {}.".format(self.modess))
else:
self.mode = mode
def _comparisons(self):
"""
Get the dictionary of comparisons that have to be made for pair mode
esxecution.
"""
# keys = np.array([x for x in self.struct_dict.keys()])
# square = np.char.add(np.char.add(keys, "_"), keys[:,None])
### Just doing this to get the correct shape and index values for a
## pairwise comparison
temp = np.arange(0,len(self.struct_dict),1)
square = temp + temp[:,None]
idx = np.triu_indices(n=square.shape[0],
k=1,
m=square.shape[1])
#### Build the dictionary of indicies that each structure must be
## compared to for pairwise comparison.
keys = [x for x in self.struct_dict.keys()]
comparison_dict = {}
for key in keys:
comparison_dict[key] = []
for idx_pair in zip(idx[0],idx[1]):
key = keys[idx_pair[0]]
comparison_dict[key].append(idx_pair[1])
self.comparisons = comparison_dict
for key in self.struct_dict.keys():
self.duplicates_dict[key] = []
def calc(self, struct_obj):
"""
General calc wrapper
Arguments
---------
struct_obj: str,dict,Structure
Can be a string, a dictionary, or a Structure.
str: Assume this is a directory path.
dict: Structure dictionary
Structure: Currently not supported
"""
if type(struct_obj) == str:
if not os.path.isdir(struct_obj):
raise Exception("{} is not a directory".format(struct_obj))
if self.jp.size == 1:
self.struct_dict = read(struct_obj)
self._comparisons()
self.calc_dict(self.struct_dict)
else:
### Just read onto rank 0 for efficiency. Jobs will be
### communicated later.
if self.jp.rank == 0:
self.struct_dict = read(struct_obj)
self._comparisons()
## Don't need any arguments for parallel implementation
self.calc_dict(None)
else:
## Don't need any arguments for parallel implementation
self.calc_dict(None)
if type(struct_obj) == StructDict or \
type(struct_obj) == dict:
self.calc_dict(struct_obj)
elif type(struct_obj) == Structure:
self.calc_struct(struct_obj)
def calc_dict(self, struct_dict):
if self.jp.size == 1:
## Serial Mode
if len(self.struct_dict) == 0:
self.struct_dict = struct_dict
self._comparisons()
for struct_id,struct in struct_dict.items():
self.calc_struct(struct)
else:
## Parallel Mode Calculation
self.get_job_list()
self.jp.calc()
def calc_struct(self, struct):
eval("self.calc_struct_{}(struct)".format(self.mode))
def calc_struct_pair(self, struct):
"""
Pair mode implementation.
"""
keys = [x for x in self.struct_dict.keys()]
if struct.struct_id not in keys:
raise Exception("Structure ID {} was not found "
.format(struct.struct_id)+
"in the DuplicateCheck.struct_dict.")
struct_dup_pool = [struct.struct_id]
for idx in self.comparisons[struct.struct_id]:
struct2 = self.struct_dict[keys[idx]]
if self.compare(struct, struct2):
struct_dup_pool.append(struct2.struct_id)
print(struct.struct_id, struct2.struct_id)
## Now update the duplicates dict of all found duplicates with the
# same values
for struct_id in struct_dup_pool:
self.duplicates_dict[struct_id] += struct_dup_pool
# Only use unique values
self.duplicates_dict[struct_id] = \
np.unique(self.duplicates_dict[struct_id]).tolist()
def calc_struct_complete(self, struct):
"""
Compare structure to all other structures in the structure dictionary.
"""
raise Exception("Complete mode is not implemented yet.")
def compare(self, struct1, struct2):
"""
Compare structures using pymatgen's StructureMatcher
"""
return self.compare_fn(struct1, struct2)
def write(self, file_name="duplicates.json"):
"""
Output a format for the duplicates. Can outputs all the duplciates
for each structure and a section for only the unique list of
duplicates found in the duplicates_dict.
"""
## Parallel Mode
if self.jp.size > 1:
self.parallel_write(file_name)
return
### First find unique duplicates in the duplicates_dict
id_used = []
unique = []
for name,duplicates in self.duplicates_dict.items():
if len(duplicates) == 1:
continue
elif name in id_used:
continue
else:
unique.append(duplicates)
[id_used.append(x) for x in duplicates]
output_dict = {}
output_dict["struct"] = self.duplicates_dict
output_dict["dups"] = unique
with open(file_name,"w") as f:
f.write(json.dumps(output_dict, indent=4))
def get_job_list(self):
"""
Obtains job list for JobParallel calculation.
"""
## Use rank 0 to communicate jobs
if self.jp.rank == 0:
keys = [x for x in self.struct_dict.keys()]
job_list = []
for struct1_id,value in self.comparisons.items():
for idx in value:
struct2_id = keys[idx]
job_list.append([self.compare_fn,
{"struct1": self.struct_dict[struct1_id],
"struct2": self.struct_dict[struct2_id]}])
self.jp.job_list = job_list
## Send jobs from rank 0
self.jp.send_jobs()
return
def parallel_write(self, file_name="duplicates.json"):
if self.jp.rank != 0:
return
all_results = self.jp.all_results
if all_results == None:
raise Exception("Cannot call write without calculating.")
for entry in all_results:
settings = entry[0]
result = entry[1]
if result == True:
arguments = settings[1]
struct1_id = arguments["struct1"].struct_id
struct2_id = arguments["struct2"].struct_id
## Update both in duplicates dict
self.duplicates_dict[struct1_id].append(struct2_id)
self.duplicates_dict[struct2_id].append(struct1_id)
else:
pass
for name,duplicates in self.duplicates_dict.items():
self.duplicates_dict[name] = [name] + self.duplicates_dict[name]
### Then same algorithm as serial write
id_used = []
unique = []
for name,duplicates in self.duplicates_dict.items():
if len(duplicates) == 1:
continue
elif name in id_used:
continue
else:
unique.append(duplicates)
[id_used.append(x) for x in duplicates]
output_dict = {}
output_dict["struct"] = self.duplicates_dict
output_dict["dups"] = unique
with open(file_name,"w") as f:
f.write(json.dumps(output_dict, indent=4))
if __name__ == "__main__":
from pymove.io import read,write
test_dir = "/Users/ibier/Research/Results/Hab_Project/genarris-runs/GIYHUR/20191103_Full_Relaxation/GIYHUR_Relaxed_spg"
s = read(test_dir)
keys = [x for x in s.keys()]
keys = keys[0:5]
test_s = {}
for entry in keys:
test_s[entry] = s[entry]
test_s = read("/Users/ibier/Research/Results/Hab_Project/GAtor-runs/BZOXZT/20200726_Multi_GAtor_Report_Test/Dup_Check_Test/test")
dc = DuplicateCheck(test_s)
### Parallel Testing
dc.jp.job_list = dc.get_job_list()
# dc.jp.calc()
|
import gi
gi.require_version("Gtk","3.0")
from gi.repository import Gtk
import ntpath
from pathlib import Path
class sushape(Gtk.Box):
def __init__(self,parent):
self.parent = parent
self.glade_file=str(Path(__file__).parent / "sushape.glade")
self.builder = Gtk.Builder()
self.builder.add_from_file(self.glade_file)
self.main_box = self.builder.get_object("sushape")
self.title_box = self.builder.get_object("tab_title")
self.builder.connect_signals(self)
self.entry1 = self.builder.get_object("w")
self.entry2 = self.builder.get_object("d")
self.entry3 = self.builder.get_object("pnoise")
self.entry4 = self.builder.get_object("nshape")
self.showshaper = self.builder.get_object("showshaper")
self.entry5 = self.builder.get_object("shape_file")
self.wfile = self.builder.get_object("wfile")
self.dfile = self.builder.get_object("dfile")
self.output_files = self.builder.get_object("output_files")
self.append_wf_cmd = ""
self.parameters=""
def set_parameters(self, widget):
self.parameters = ""
if self.entry1.get_text()!="" and self.entry2.get_text()!="":
str1= self.entry1.get_text().split(',')
str2= self.entry2.get_text().split(',')
if len(str1)==len(str2):
self.parameters = "w="+self.entry1.get_text() + " d="+self.entry2.get_text()+" "
else:
self.parent.send_message("'d' and 'w' Array lengths must be same",1)
else:
try:
self.parameters += "wfile="+ntpath.basename(self.wfile.get_filename())+" "
self.parameters += "dfile="+ntpath.basename(self.dfile.get_filename())+" "
except:
self.parent.send_message("Required Parameters not choosen",1)
self.parameters=""
if self.parameters!="":
if self.entry3.get_text()!="":
self.parameters+="pnoise="+self.entry3.get_text()+" "
if self.entry4.get_text()!="":
self.parameters+="nshape="+self.entry4.get_text()+" "
if self.showshaper.get_active():
self.parameters+="showshaper=1 "
self.command = "sushape < infile "+ self.parameters + " >" + "outfile >&"+self.entry5.get_text()
else:
self.command = "sushape < infile "+ self.parameters + " >" + "outfile"
self.append_wf_cmd ="sushape < $infile "+ self.parameters + " >" + "$prefix.$infile"
self.parent.send_command(self.command,1)
def run(self,widget):
self.temp = self.output_files.get_text()
if self.temp!="":
self.parent.output_files = self.temp.split(',')
else:
self.parent.send_message("No Output File names given",1)
if self.parent.input_files==[]:
self.parent.send_message("No Input Files selected",1)
return True
if len(self.parent.input_files)==len(self.parent.output_files):
self.parent.console.set_current_page(2)
for i,input_file in enumerate(self.parent.input_files):
if self.showshaper.get_active():
cmd = "sushape"+ " < " + input_file + " "+self.parameters \
+" > "+self.parent.output_files[i] +"> &"+self.entry5.get_text()
else:
cmd = "sushape"+ " < " + input_file + " "+self.parameters \
+" > "+self.parent.output_files[i]
self.parent.terminal.run_command(cmd+"\n")
self.parent.history.append(cmd)
self.parent.refresh_file_ex(self.parent.props.curr_project)
else:
self.parent.send_message("No. of Output Files not equal to No. of Input Files",1)
def refresh(self, widget):
self.append_wf_cmd=""
self.parameters=""
self.output_files.set_text("")
self.parent.output_files=[]
self.entry1.set_text("")
self.entry2.set_text("")
self.vfile.unselect_filename(self.vfile.get_filename())
def append(self,widget):
if self.parent.props.curr_project!=None:
if self.parameters!="":
self.parent.workflow_append(self.append_wf_cmd,1)
else:
self.parent.send_message("No Parameters choosen",1)
else:
self.parent.send_message("Can not append command without opening a Project",1)
def help(self, widget):
builder = Gtk.Builder()
builder.add_from_file(self.glade_file)
help_dlg = builder.get_object("help")
help_dlg.set_title("SUSHAPE Help")
help_dlg.show_all()
def destroy(self, widget):
self.main_box.destroy()
self.parent.tabs['SUSHAPE'] = 0
class nb_page(Gtk.Box):
def __init__(self,parent):
self.main_class = sushape(parent)
self.page = self.main_class.main_box
self.title = self.main_class.title_box
|
from cuescience_shop.models import Client, Address, Order
from natspec_utils.decorators import TextSyntax
from cart.cart import Cart
from django.test.client import Client as TestClient
class ClientTestSupport(object):
def __init__(self, test_case):
self.test_case = test_case
self.client = TestClient()
@TextSyntax("Create address #1 #2 #3 #4", types=["str", "str", "str", "str"], return_type="Address")
def create_address(self, street, number, postcode, city):
address = Address(street=street, number=number, postcode=postcode, city=city)
address.save()
return address
@TextSyntax("Create client #1 #2", types=["str", "str", "Address"], return_type="Client")
def create_client(self, first_name, last_name, address):
client = Client(first_name=first_name, last_name=last_name, shipping_address=address, billing_address=address)
client.save()
return client
@TextSyntax("Create order", types=["Client"], return_type="Order")
def create_order(self, client):
cart = Cart(self.client)
cart.create_cart()
cart = cart.cart
order = Order(client=client, cart=cart)
order.save()
return order
@TextSyntax("Assert client number is #1", types=["str", "Client"])
def assert_client_number(self, client_number, client):
self.test_case.assertEqual(client_number, client.client_number)
@TextSyntax("Assert order number is #1", types=["str", "Order"])
def assert_order_number(self, order_number, order):
self.test_case.assertEqual(order_number, order.order_number)
|
from torch import tensor, long
def tensor_loader(*adatas, protein_boolean, sampler, device, celltypes = None, categories = None):
if (celltypes is not None) and (categories is not None):
return tensor_loader_trihead(*adatas, protein_boolean = protein_boolean, sampler = sampler, device = device, celltypes = celltypes, categories = categories)
return tensor_loader_dualhead(*adatas, protein_boolean = protein_boolean, sampler = sampler, device = device)
class tensor_loader_dualhead:
def __init__(self, *adatas, protein_boolean, sampler, device):
assert all([adata.shape[0] == adatas[0].shape[0] for adata in adatas])
self.arrs = [adata.X for adata in adatas]
self.bools = protein_boolean
self.sampler = sampler
self.device = device
def __iter__(self):
for idxs, bool_set in self.sampler:
arrays = [arr[idxs] for arr in self.arrs]
arrays = [tensor(arr, device = self.device) for arr in arrays]
arrays.append(tensor(self.bools[bool_set], device = self.device))
arrays.append(None)
yield arrays
def __len__(self):
return len(self.sampler)
class tensor_loader_trihead:
def __init__(self, *adatas, protein_boolean, sampler, device, celltypes, categories):
assert all([adata.shape[0] == adatas[0].shape[0] for adata in adatas])
self.arrs = [adata.X for adata in adatas]
self.celltypes, self.categories = celltypes, categories
self.bools = protein_boolean
self.sampler = sampler
self.device = device
def __iter__(self):
for idxs, bool_set in self.sampler:
arrays = [arr[idxs] for arr in self.arrs]
arrays = [tensor(arr, device = self.device) for arr in arrays]
arrays.append(tensor(self.bools[bool_set], device = self.device))
arrays.append(tensor([self.categories[cat] for cat in self.celltypes[idxs]], device = self.device, dtype = long))
yield arrays
def __len__(self):
return len(self.sampler)
class tensor_loader_basic:
def __init__(self, adata, batch_size, device):
self.arr = adata.X.copy()
self.batch_size = batch_size
self.device = device
def __iter__(self):
batch = []
for idx in range(len(self.arr)):
batch.append(idx)
if len(batch) == self.batch_size:
yield tensor(self.arr[batch], device = self.device)
batch = []
if batch:
yield tensor(self.arr[batch], device = self.device)
def __len__(self):
return len(self.arr) |
"""
Remove annotatation kmers
- Integrate multiple background kmer files (eg. somtic, somatic_and_germline, germline, ref annotation
- Add a column in graph (foreground) kmer file to indicate if the kmer also appear in given annotation kmer set
"""
import pandas as pd
import logging
import os
import sys
from .io_ import read_pq_with_dict
from .io_ import save_pd_toparquet
def mode_samplespecif(arg):
logging.info(">>>>>>>>> Remove annotation: Start")
bg = [os.path.exists(kmer_file) for kmer_file in arg.annot_kmer_files]
fg = [os.path.exists(junction_kmer_file) for junction_kmer_file in arg.junction_kmer_files]
if not sum(bg):
logging.error("None of the sample kmer files exists, consider changing --junction_kmer_files")
sys.exit(1)
if not sum(fg):
logging.error("None of the annotation kmer files exists, consider changing --annot_kmer_file")
sys.exit(1)
bg_kmer_set = set()
if not os.path.exists(arg.bg_file_path):
for kmer_file in arg.annot_kmer_files:
if os.path.exists(kmer_file):
logging.info("...consider annotation file:{}".format(kmer_file))
f = read_pq_with_dict(kmer_file, ['kmer'])
bg_kmer_set.update(f['kmer'])
else:
logging.info("WARNING annotation file: {} does not exist".format(kmer_file))
save_pd_toparquet(arg.bg_file_path , pd.DataFrame(bg_kmer_set, columns = ['kmer'], dtype='str'),
compression=compression, verbose=True)
logging.info("generated unique background kmer file in {} \n ".format(arg.bg_file_path))
else:
bg_kmer_set = set(read_pq_with_dict(arg.bg_file_path, ['kmer'])['kmer'])
logging.info("reading unique background kmer file in {} \n ".format(arg.bg_file_path)
)
for junction_kmer_file in arg.junction_kmer_files:
if os.path.exists(junction_kmer_file):
logging.info("...consider foreground file:{}".format(junction_kmer_file))
kmer_df = read_pq_with_dict(junction_kmer_file, ['kmer']).to_pandas()
uniq_ref_kmer_set = set(kmer_df['kmer']).difference(bg_kmer_set)
bg_flag = kmer_df['kmer'].apply(lambda x: x in uniq_ref_kmer_set)
if arg.remove_bg:
kmer_df = kmer_df[bg_flag]
else:
kmer_df['is_neo_flag'] = bg_flag
output_file_path = os.path.join(arg.output_dir, junction_kmer_file.split('/')[-1].replace('.pq', '') + '_' + arg.output_suffix + '.pq')
save_pd_toparquet( output_file_path, kmer_df,
compression='SNAPPY', verbose=True)
logging.info("output bg-removed kmer file : {} \n ".format(output_file_path))
else:
logging.info("WARNING foreground file: {} does not exist".format(junction_kmer_file))
logging.info(">>>>>>>>> Remove annotation: Finish\n")
|
ThriftInfo = provider(fields = [
"srcs", # The source files in this rule
"transitive_srcs", # the transitive version of the above
])
|
# Membership Operators
my_list = [1, 2, 3, "oh no"]
if "oh no" in my_list:
print("oh no is an element in my_list")
else:
print("oh no is not an element in my_list")
if 4 not in my_list:
print("4 is not an element in my_list")
else:
print("4 is an element in my_list")
|
class F(object):
pass
class E(F):
pass
class G(F, E):
pass
|
from datetime import datetime
import pytest
from django.core.exceptions import FieldDoesNotExist, ValidationError
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser, Group
from django.contrib.sites.models import Site
from django.test import TestCase, RequestFactory
from molo.commenting.models import MoloComment
from molo.core.models import ArticlePage, SectionPage
from molo.core.tests.base import MoloTestCaseMixin
from molo.surveys.models import SurveysIndexPage
from wagtail_personalisation.models import Segment
from molo.surveys.models import (
PersonalisableSurveyFormField, PersonalisableSurvey)
from ..rules import CommentCountRule, ProfileDataRule
@pytest.mark.django_db
class TestProfileDataRuleSegmentation(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.request_factory = RequestFactory()
# Fabricate a request with a logged-in user
# so we can use it to test the segment rule
user = get_user_model().objects \
.create_user(username='tester',
email='tester@example.com',
password='tester')
self.request = self.request_factory.get('/')
self.request.user = user
def set_user_to_male(self):
# Set user to male
self.request.user.profile.gender = 'm'
self.request.user.save()
def set_user_to_female(self):
self.request.user.profile.gender = 'f'
self.request.user.save()
def set_user_to_unspecified(self):
self.request.user.profile.gender = '-'
self.request.user.save()
def test_unspecified_passes_unspecified_rule(self):
self.set_user_to_unspecified()
unspecified_rule = ProfileDataRule(
field='profiles.userprofile__gender', value='-')
self.assertTrue(unspecified_rule.test_user(self.request))
def test_male_passes_male_rule(self):
self.set_user_to_male()
male_rule = ProfileDataRule(field='profiles.userprofile__gender',
value='m')
self.assertTrue(male_rule.test_user(self.request))
def test_female_passes_female_rule(self):
self.set_user_to_female()
female_rule = ProfileDataRule(field='profiles.userprofile__gender',
value='f')
self.assertTrue(female_rule.test_user(self.request))
def test_unspecified_fails_female_rule(self):
self.set_user_to_unspecified()
female_rule = ProfileDataRule(field='profiles.userprofile__gender',
value='f')
self.assertFalse(female_rule.test_user(self.request))
def test_female_fails_unspecified_rule(self):
self.set_user_to_female()
unspecified_rule = ProfileDataRule(
field='profiles.userprofile__gender', value='-')
self.assertFalse(unspecified_rule.test_user(self.request))
def test_male_fails_unspecified_rule(self):
self.set_user_to_male()
unspecified_rule = ProfileDataRule(
field='profiles.userprofile__gender', value='-')
self.assertFalse(unspecified_rule.test_user(self.request))
def test_unexisting_profile_field_fails(self):
rule = ProfileDataRule(field='auth.User__non_existing_field',
value='l')
with self.assertRaises(FieldDoesNotExist):
rule.test_user(self.request)
def test_not_implemented_model_raises_exception(self):
rule = ProfileDataRule(field='lel.not_existing_model__date_joined',
value='2')
with self.assertRaises(LookupError):
rule.test_user(self.request)
def test_not_logged_in_user_fails(self):
rule = ProfileDataRule(field='auth.User__date_joined',
value='2012-09-23')
self.request.user = AnonymousUser()
self.assertFalse(rule.test_user(self.request))
def test_none_value_on_related_field_fails(self):
rule = ProfileDataRule(field='auth.User__date_joined',
value='2012-09-23')
self.request.user.date_joined = None
self.assertFalse(rule.test_user(self.request))
def test_none_value_with_not_equal_rule_field_passes(self):
rule = ProfileDataRule(field='auth.User__date_joined',
operator=ProfileDataRule.NOT_EQUAL,
value='2012-09-23')
self.request.user.date_joined = None
self.assertTrue(rule.test_user(self.request))
@pytest.mark.django_db
class TestProfileDataRuleValidation(TestCase):
def setUp(self):
self.segment = Segment.objects.create()
def test_invalid_regex_value_raises_validation_error(self):
rule = ProfileDataRule(segment=self.segment,
operator=ProfileDataRule.REGEX,
field='aith.User__date_joined',
value='[')
with self.assertRaises(ValidationError) as context:
rule.full_clean()
found = False
for msg in context.exception.messages:
if msg.startswith('Regular expression error'):
found = True
break
self.failIf(not found)
def test_age_operator_on_non_date_field_raises_validation_error(self):
rule = ProfileDataRule(segment=self.segment,
operator=ProfileDataRule.OF_AGE,
field='profiles.UserProfile__gender',
value='1')
with self.assertRaises(ValidationError) as context:
rule.full_clean()
self.assertIn('You can choose age operators only on date and '
'date-time fields.', context.exception.messages)
def test_age_operator_on_negative_numbers_raises_validation_error(self):
rule = ProfileDataRule(segment=self.segment,
operator=ProfileDataRule.OF_AGE,
field='profiles.UserProfile__date_of_birth',
value='-1')
with self.assertRaises(ValidationError) as context:
rule.full_clean()
self.assertIn('Value has to be non-negative since it represents age.',
context.exception.messages)
class TestCommentCountRuleSegmentation(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.request_factory = RequestFactory()
self.request = self.request_factory.get('/')
self.request.user = get_user_model().objects.create_user(
username='tester', email='tester@example.com', password='tester')
self.other_user = get_user_model().objects.create_user(
username='other', email='other@example.com', password='other')
self.section = SectionPage(title='test section')
self.section_index.add_child(instance=self.section)
self.article = self.add_article('first')
self.other_article = self.add_article('other')
def add_article(self, title):
new_article = ArticlePage(title=title)
self.section.add_child(instance=new_article)
new_article.save_revision()
return new_article
def add_comment(self, user, article, **kwargs):
return MoloComment.objects.create(
comment="test comment",
user=user,
site=Site.objects.get_current(),
content_type=article.content_type,
object_pk=article.id,
submit_date=datetime.now(),
**kwargs
)
def test_user_passes_rule_when_they_comment(self):
rule = CommentCountRule(count=1)
self.add_comment(self.request.user, self.article)
self.assertTrue(rule.test_user(self.request))
def test_other_user_doesnt_get_counted(self):
rule = CommentCountRule(count=1)
self.add_comment(self.request.user, self.article)
self.add_comment(self.other_user, self.article)
self.assertTrue(rule.test_user(self.request))
def test_user_fails_rule_when_they_comment_too_much(self):
rule = CommentCountRule(count=1)
self.add_comment(self.request.user, self.article)
self.add_comment(self.request.user, self.article)
self.assertFalse(rule.test_user(self.request))
def test_user_fails_rule_when_they_dont_comment_enough(self):
rule = CommentCountRule(count=2)
self.add_comment(self.request.user, self.article)
self.assertFalse(rule.test_user(self.request))
def test_user_passes_rule_when_they_comment_multiple_articles(self):
rule = CommentCountRule(count=2)
self.add_comment(self.request.user, self.article)
self.add_comment(self.request.user, self.other_article)
self.assertTrue(rule.test_user(self.request))
def test_user_fails_rule_when_comment_removed(self):
rule = CommentCountRule(count=1)
self.add_comment(self.request.user, self.article, is_removed=True)
self.assertFalse(rule.test_user(self.request))
def test_user_passes_lt(self):
rule = CommentCountRule(count=1, operator=CommentCountRule.LESS_THAN)
self.assertTrue(rule.test_user(self.request))
def test_user_fails_lt(self):
rule = CommentCountRule(count=1, operator=CommentCountRule.LESS_THAN)
self.add_comment(self.request.user, self.article)
self.assertFalse(rule.test_user(self.request))
def test_user_passes_gt(self):
rule = CommentCountRule(count=1, operator=CommentCountRule.GREATER_THAN)
self.add_comment(self.request.user, self.article)
self.add_comment(self.request.user, self.article)
self.assertTrue(rule.test_user(self.request))
def test_user_fails_gt(self):
rule = CommentCountRule(count=1, operator=CommentCountRule.GREATER_THAN)
self.add_comment(self.request.user, self.article)
self.assertFalse(rule.test_user(self.request))
|
from .unsupervised_model import unsupervised_model |
'''
@queenjobo @ksamocha
16/08/2019
Functions to assign weights to different variant classes
'''
import pandas as pd
import logging as lg
import numpy as np
import itertools
from scipy import stats
import sys
def get_loess_weight_dic(wdicpath,cqs, print_flag):
'''
create constrained/unconstrained dictionaries mapping CADD score to LOESS enrichment weight for missense variants
'''
wdic = pd.read_table(wdicpath)
wdic = wdic[wdic.cq.isin(cqs)]
#separate by missense constraint
con = wdic[wdic.constrained == True]
uncon = wdic[wdic.constrained == False]
#here key is based on shethigh (T/F) + missenseconstrained (T/F) + CADD score rounded to 3 figures
shethigh_con = dict(zip("True"+ "True" + con.score.round(3).astype(str),con.shethigh))
shetlow_con = dict(zip("False"+ "True" + con.score.round(3).astype(str),con.shetlow))
shethigh_uncon = dict(zip("True" + "False" + uncon.score.round(3).astype(str),uncon.shethigh))
shetlow_uncon = dict(zip("False" + "False" + uncon.score.round(3).astype(str),uncon.shetlow))
#merge dictionaries into one
alldic = {**shethigh_con, **shetlow_con, **shethigh_uncon, **shetlow_uncon}
shethigh_con_max = con.shethigh.max()
shetlow_con_max = con.shetlow.max()
shethigh_uncon_max = uncon.shethigh.max()
shetlow_uncon_max = uncon.shetlow.max()
caddmax = wdic.score.max()
return(alldic,shethigh_con_max,shetlow_con_max,shethigh_uncon_max,shetlow_uncon_max,caddmax)
def get_missense_weights(rates,wdicpath,print_flag):
'''
get weights for missense variants
'''
cqname = ["missense","missense_variant"]
wdic,shcm,slcm,shum,slum,caddmax = get_loess_weight_dic(wdicpath,cqname, print_flag)
if 'weight' in rates.columns:
# weight column has already been defined, so only replace for consequences under study
rates['weight'] = np.where(rates.cq.isin(cqname), rates.shethigh.astype(str) + rates.constrained.astype(str) + rates.score.round(3).astype(str), rates['weight'])
else:
rates['weight'] = np.where(rates.cq.isin(cqname), rates.shethigh.astype(str) + rates.constrained.astype(str) + rates.score.round(3).astype(str), 'NaN')
#when cadd score exceeds bin replace with max for that subset
rates['weight'] = np.where(rates.cq.isin(cqname), rates['weight'].map(wdic), rates['weight'])
rates['weight'] = np.where((rates.constrained == True) & (rates.shethigh == True) & (rates.score > caddmax) & (rates.cq.isin(cqname)), shcm, rates['weight'])
rates['weight'] = np.where((rates.constrained == True) & (rates.shethigh == False) & (rates.score > caddmax) & (rates.cq.isin(cqname)), slcm, rates['weight'])
rates['weight'] = np.where((rates.constrained == False) & (rates.shethigh == True) & (rates.score > caddmax) & (rates.cq.isin(cqname)), shum, rates['weight'])
rates['weight'] = np.where((rates.constrained == False) & (rates.shethigh == False) & (rates.score > caddmax) & (rates.cq.isin(cqname)), slum, rates['weight'])
return(rates)
def get_nonsense_weights(rates,wdicpath, print_flag):
'''
get weights for nonsense variants
'''
cqname = ["nonsense","stop_gained"]
wdic,shcm,slcm,shum,slum,caddmax = get_loess_weight_dic(wdicpath,cqname, print_flag)
if 'weight' in rates.columns:
# weight column has already been defined, so only replace for consequences under study
rates['weight'] = np.where(rates.cq.isin(cqname), rates.shethigh.astype(str) + rates.constrained.astype(str) + rates.score.round(3).astype(str), rates['weight'])
else:
rates['weight'] = np.where(rates.cq.isin(cqname), rates.shethigh.astype(str) + rates.constrained.astype(str) + rates.score.round(3).astype(str), 'NaN')
rates['weight'] = np.where(rates.cq.isin(cqname), rates['weight'].map(wdic), rates['weight'])
if print_flag:
sys.stderr.write('after line two\n{0}\n\n'.format(rates.head()))
rates['weight'] = np.where((rates.constrained == False) & (rates.shethigh == True) & (rates.score > caddmax) & (rates.cq.isin(cqname)), shum, rates['weight'])
rates['weight'] = np.where((rates.constrained == False) & (rates.shethigh == False) & (rates.score > caddmax) & (rates.cq.isin(cqname)), slum, rates['weight'])
return(rates)
def get_other_weight_dic(wdicpath):
'''
get weight dic for non missense variants
'''
wdic = pd.read_table(wdicpath)
wdic = wdic[wdic.cq.isin(["synonymous","splice_lof","inframe","frameshift"])]
shethigh = dict(zip("True"+ "False" + wdic.cq,wdic.shethigh))
shetlow = dict(zip("False"+ "False" + wdic.cq,wdic.shetlow))
alldic = {**shethigh, **shetlow}
return(alldic)
def get_other_weights(rates,wdicpath):
'''
get weights for non missense variants - just enrichment of entire class as opposed to stratified by CADD
'''
othercq = ["synonymous","splice_lof","inframe","frameshift"]
con= get_other_weight_dic(wdicpath)
rates.weight.loc[rates.cq.isin(othercq)] = rates.shethigh.astype(str) + rates.constrained.astype(str) + rates.cq
rates.weight.replace(con,inplace=True)
return(rates)
def get_indel_weights(wdicpath):
wdic = pd.read_table(wdicpath)
indel_weights = wdic[wdic.cq.str.contains("frame")]
return(indel_weights)
def get_weights(rates,wdicpath,print_flag):
'''
given rates df get weights columns using con, uncon dictionaries mapping to enrichment weights
'''
rates = get_missense_weights(rates,wdicpath, print_flag)
rates = get_nonsense_weights(rates,wdicpath, print_flag)
rates = get_other_weights(rates,wdicpath)
indel_weights = get_indel_weights(wdicpath)
return(rates,indel_weights)
|
from .transforms import (
Compose,
ImageResize,
RandomHorizontalFlip,
RandomVerticalFlip,
ImageNormalization,
ToTensor
)
__all__ = [
'Compose',
'ImageResize',
'RandomHorizontalFlip',
'RandomVerticalFlip',
'ImageNormalization',
'ToTensor'
]
|
#!/usr/bin/env python3
import os
import sys
from hashlib import md5
import Bio.SeqIO
from iggtools.common.argparser import add_subcommand, SUPPRESS
from iggtools.common.utils import tsprint, InputStream, retry, command, multithreading_map, find_files, upload, pythonpath
from iggtools.models.uhgg import UHGG, get_uhgg_layout, unified_genome_id, destpath
from iggtools.params import outputs
CONCURRENT_GENOME_IMPORTS = 20
@retry
def find_files_with_retry(f):
return find_files(f)
def decode_genomes_arg(args, genomes):
selected_genomes = set()
try: # pylint: disable=too-many-nested-blocks
if args.genomes.upper() == "ALL":
selected_genomes = set(genomes)
else:
for g in args.genomes.split(","):
if ":" not in g:
selected_genomes.add(g)
else:
i, n = g.split(":")
i = int(i)
n = int(n)
assert 0 <= i < n, f"Genome class and modulus make no sense: {i}, {n}"
for gid in genomes:
gid_int = int(gid.replace("GUT_GENOME", ""))
if gid_int % n == i:
selected_genomes.add(gid)
except:
tsprint(f"ERROR: Genomes argument is not a list of genome ids or slices: {g}")
raise
return sorted(selected_genomes)
# 1. Occasional failures in aws s3 cp require a retry.
# 2. In future, for really large numbers of genomes, we may prefer a separate wave of retries for all first-attempt failures.
# 3. The Bio.SeqIO.parse() code is CPU-bound and thus it's best to run this function in a separate process for every genome.
@retry
def clean_genome(genome_id, representative_id):
raw_genome = get_uhgg_layout(representative_id, "fna.lz4", genome_id)["raw_genome_file"]
output_genome = f"{genome_id}.fna"
with open(output_genome, 'w') as o_genome, \
InputStream(raw_genome, check_path=False) as genome:
for sn, rec in enumerate(Bio.SeqIO.parse(genome, 'fasta')):
contig_seq = str(rec.seq).upper()
contig_len = len(contig_seq)
ugid = unified_genome_id(genome_id)
contig_hash = md5(contig_seq.encode('utf-8')).hexdigest()[-6:]
new_contig_id = f"{ugid}_C{sn}_L{contig_len/1000.0:3.1f}k_H{contig_hash}"
o_genome.write(f">{new_contig_id}\n{contig_seq}\n")
return output_genome
def import_uhgg(args):
if args.zzz_slave_toc:
import_uhgg_slave(args)
else:
import_uhgg_master(args)
def import_uhgg_master(args):
# Fetch table of contents from s3.
# This will be read separately by each species build subcommand, so we make a local copy.
local_toc = os.path.basename(outputs.genomes)
command(f"rm -f {local_toc}")
command(f"aws s3 cp --only-show-errors {outputs.genomes} {local_toc}")
db = UHGG(local_toc)
species_for_genome = db.genomes
def genome_work(genome_id):
assert genome_id in species_for_genome, f"Genome {genome_id} is not in the database."
species_id = species_for_genome[genome_id]
dest_file = destpath(get_uhgg_layout(species_id, "fna", genome_id)["imported_genome_file"])
msg = f"Importing genome {genome_id} from species {species_id}."
if find_files_with_retry(dest_file):
if not args.force:
tsprint(f"Destination {dest_file} for genome {genome_id} already exists. Specify --force to overwrite.")
return
msg = msg.replace("Importing", "Reimporting")
tsprint(msg)
logfile = get_uhgg_layout(species_id, "", genome_id)["imported_genome_log"]
slave_log = os.path.basename(logfile)
slave_subdir = f"{species_id}__{genome_id}"
if not args.debug:
command(f"rm -rf {slave_subdir}")
if not os.path.isdir(slave_subdir):
command(f"mkdir {slave_subdir}")
# Recurisve call via subcommand. Use subdir, redirect logs.
slave_cmd = f"cd {slave_subdir}; PYTHONPATH={pythonpath()} {sys.executable} -m iggtools import_uhgg --genome {genome_id} --zzz_slave_mode --zzz_slave_toc {os.path.abspath(local_toc)} {'--debug' if args.debug else ''} &>> {slave_log}"
with open(f"{slave_subdir}/{slave_log}", "w") as slog:
slog.write(msg + "\n")
slog.write(slave_cmd + "\n")
try:
command(slave_cmd)
finally:
# Cleanup should not raise exceptions of its own, so as not to interfere with any
# prior exceptions that may be more informative. Hence check=False.
upload(f"{slave_subdir}/{slave_log}", destpath(logfile), check=False)
if not args.debug:
command(f"rm -rf {slave_subdir}", check=False)
genome_id_list = decode_genomes_arg(args, species_for_genome)
multithreading_map(genome_work, genome_id_list, num_threads=CONCURRENT_GENOME_IMPORTS)
def import_uhgg_slave(args):
"""
https://github.com/czbiohub/iggtools/wiki
"""
violation = "Please do not call import_uhgg_slave directly. Violation"
assert args.zzz_slave_mode, f"{violation}: Missing --zzz_slave_mode arg."
assert os.path.isfile(args.zzz_slave_toc), f"{violation}: File does not exist: {args.zzz_slave_toc}"
db = UHGG(args.zzz_slave_toc)
representatives = db.representatives
species_for_genome = db.genomes
genome_id = args.genomes
species_id = species_for_genome[genome_id]
representative_id = representatives[species_id]
dest = destpath(get_uhgg_layout(species_id, "fna", genome_id)["imported_genome_file"])
command(f"aws s3 rm --recursive {os.path.dirname(dest)}")
cleaned = clean_genome(genome_id, representative_id)
upload(cleaned, dest)
def register_args(main_func):
subparser = add_subcommand('import_uhgg', main_func, help='import selected genomes from UHGG')
subparser.add_argument('--genomes',
dest='genomes',
required=False,
help="genome[,genome...] to import; alternatively, slice in format idx:modulus, e.g. 1:30, meaning import genomes whose ids are 1 mod 30; or, the special keyword 'all' meaning all genomes")
subparser.add_argument('--zzz_slave_toc',
dest='zzz_slave_toc',
required=False,
help=SUPPRESS) # "reserved to pass table of contents from master to slave"
return main_func
@register_args
def main(args):
tsprint(f"Executing iggtools subcommand {args.subcommand} with args {vars(args)}.")
import_uhgg(args)
|
from urllib.parse import urlencode
def make_url(url, url_params):
params = {**url_params}
if "fields" in url_params:
if isinstance(params, str):
params["fields"] = url_params["fields"]
else:
params["fields"] = ",".join(url_params["fields"])
return "{}?{}".format(url, urlencode(params))
|
# Author: Christian Brodbeck <christianbrodbeck@nyu.edu>
# This script can be executed from the Terminal using $ pythonw
# dataset: mne_sample
from eelbrain import *
w = 3
# load sample data
ds = datasets.get_mne_sample(sub="modality=='A'", sns=True)
# interactive plot
plot.TopoButterfly('meg', ds=ds)
# plot topography at a specific time point
plot.Topomap("meg.sub(time=.07)", ds=ds)
# plot topography at a specific time point by condition
plot.Topomap("meg.sub(time=.07)", 'side', ds=ds)
# plot topography at a specific time point for one condition;
# clip with a circular outline
plot.Topomap("meg.sub(side=='L', time=.07)", ds=ds, clip='circle')
# plot topography with a specific projection
plot.Topomap("meg.sub(time=.07)", proj='back', ds=ds)
# same but without clipping the map
plot.Topomap("meg.sub(time=.07)", proj='back', clip=False, ds=ds)
# mark sensors (all sensors with z coordinate < 0
y = ds['meg'].sub(time=.07).mean('case')
mark = ds['meg'].sensor.z < 0
plot.Topomap(y, mark=mark)
# different cmap, and manually adjusted head outline
plot.Topomap(y, head_radius=(0.3, 0.33), head_pos=0.02, cmap='jet')
# sensor labels
plot.SensorMap(y, mark=mark, head_radius=(0.3, 0.33), head_pos=0.02,
labels='name')
# sensor map with labels
p = plot.SensorMap(y, mark=mark, w=9, head_radius=(0.3, 0.33), head_pos=0.02)
# move overlapping labels apart
p.separate_labels()
# run the GUI if the script is executed from the shell
if __name__ == '__main__':
gui.run()
|
# -*- coding: utf-8 -*-
"""TargetArea serializer module.
"""
from django.conf import settings
from django.contrib.postgres.fields.jsonb import KeyTextTransform
from django.core.cache import cache
from django.db import connection
from django.db.models import Case, Count, F, IntegerField, Sum, Value, When
from django.db.models.functions import Cast, Coalesce
from rest_framework import serializers
from rest_framework_gis.fields import GeometryField
from rest_framework_gis.serializers import GeoFeatureModelSerializer
from mspray.apps.main.models.location import Location
from mspray.apps.main.models.spray_day import SprayDay
from mspray.apps.main.models.spray_operator import SprayOperatorDailySummary
from mspray.apps.main.models.spraypoint import SprayPoint
from mspray.apps.main.models.target_area import TargetArea
from mspray.apps.main.utils import (
get_ta_in_location,
parse_spray_date,
sprayable_queryset,
)
from mspray.libs.common_tags import MOBILISED_FIELD, SENSITIZED_FIELD
FUNCTION_TEMPLATE = "%(function)s(%(expressions)s AS FLOAT)"
SPATIAL_QUERIES = settings.MSPRAY_SPATIAL_QUERIES
TA_LEVEL = settings.MSPRAY_TA_LEVEL
REASON_FIELD = settings.MSPRAY_UNSPRAYED_REASON_FIELD
REASON_REFUSED = settings.MSPRAY_UNSPRAYED_REASON_REFUSED
REASONS = settings.MSPRAY_UNSPRAYED_REASON_OTHER.copy()
REASONS.pop(REASON_REFUSED)
REASON_OTHER = REASONS.keys()
HAS_UNIQUE_FIELD = getattr(settings, "MSPRAY_UNIQUE_FIELD", None)
LOCATION_SPRAYED_PERCENTAGE = getattr(
settings, "LOCATION_SPRAYED_PERCENTAGE", 90
)
SPRAY_AREA_INDICATOR_SQL = """
SELECT
SUM(CASE WHEN "other" > 0 THEN 1 ELSE 0 END) AS "other",
SUM(CASE WHEN "not_sprayable" > 0 THEN 1 ELSE 0 END) AS "not_sprayable",
SUM(CASE WHEN "found" > 0 THEN 1 ELSE 0 END) AS "found",
SUM(CASE WHEN "sprayed" > 0 THEN 1 ELSE 0 END) AS "sprayed",
SUM(CASE WHEN "new_structures" > 0 THEN 1 ELSE 0 END) AS "new_structures",
SUM(CASE WHEN "not_sprayed" > 0 THEN 1 ELSE 0 END) AS "not_sprayed",
SUM(CASE WHEN "refused" >0 THEN 1 ELSE 0 END) AS "refused" FROM
(
SELECT
SUM(CASE WHEN (("main_sprayday"."data" @> '{"osmstructure:notsprayed_reASon": "refused"}' OR "main_sprayday"."data" @> '{"newstructure/gps_osm_file:notsprayed_reason": "refused"}') AND "main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NOT NULL AND "main_sprayday"."was_sprayed" = false) THEN 0 WHEN ("main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NOT NULL AND "main_sprayday"."was_sprayed" = true) THEN 0 ELSE 1 END) AS "other",
SUM(CASE WHEN ("main_sprayday"."data" ? 'osmstructure:way:id' AND "main_sprayday"."sprayable" = false AND "main_spraypoint"."id" IS NOT NULL AND "main_sprayday"."household_id" IS NOT NULL) THEN 1 ELSE 0 END) AS "not_sprayable",
SUM(CASE WHEN ("main_sprayday"."data" ? 'osmstructure:way:id' AND "main_sprayday"."sprayable" = false AND "main_spraypoint"."id" IS NOT NULL) THEN 0 WHEN ("main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NOT NULL) THEN 1 WHEN ("main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NULL AND "main_sprayday"."was_sprayed" = true) THEN 1 ELSE 0 END) AS "found",
SUM(CASE WHEN ("main_sprayday"."sprayable" = true AND "main_sprayday"."was_sprayed" = true) THEN 1 ELSE 0 END) AS "sprayed",
SUM(CASE WHEN ("main_sprayday"."data" ? 'newstructure/gps' AND "main_sprayday"."sprayable" = true) THEN 1 WHEN (("main_sprayday"."data" ? 'osmstructure:node:id' OR "main_sprayday"."data" ? 'newstructure/gps_osm_file:node:id') AND "main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NOT NULL) THEN 1 WHEN (("main_sprayday"."data" ? 'osmstructure:node:id' OR "main_sprayday"."data" ? 'newstructure/gps_osm_file:node:id') AND "main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NULL AND "main_sprayday"."was_sprayed" = true) THEN 1 WHEN ("main_sprayday"."data" ? 'osmstructure:way:id' AND "main_sprayday"."sprayable" = true AND "main_sprayday"."household_id" IS NULL) THEN 1 ELSE 0 END) AS "new_structures",
SUM(CASE WHEN ("main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NOT NULL AND "main_sprayday"."was_sprayed" = false) THEN 1 ELSE 0 END) AS "not_sprayed",
SUM(CASE WHEN (("main_sprayday"."data" @> '{"osmstructure:notsprayed_reASon": "refused"}' OR "main_sprayday"."data" @> '{"newstructure/gps_osm_file:notsprayed_reason": "refused"}') AND "main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NOT NULL AND "main_sprayday"."was_sprayed" = false) THEN 1 ELSE 0 END) AS "refused"
FROM "main_sprayday" LEFT OUTER JOIN "main_spraypoint" ON ("main_sprayday"."id" = "main_spraypoint"."sprayday_id") WHERE ("main_sprayday"."location_id" = %s) GROUP BY "main_sprayday"."id"
) AS "sub_query";
""" # noqa
SPRAY_AREA_INDICATOR_SQL_DATE_FILTERED = """
SELECT
SUM(CASE WHEN "other" > 0 THEN 1 ELSE 0 END) AS "other",
SUM(CASE WHEN "not_sprayable" > 0 THEN 1 ELSE 0 END) AS "not_sprayable",
SUM(CASE WHEN "found" > 0 THEN 1 ELSE 0 END) AS "found",
SUM(CASE WHEN "sprayed" > 0 THEN 1 ELSE 0 END) AS "sprayed",
SUM(CASE WHEN "new_structures" > 0 THEN 1 ELSE 0 END) AS "new_structures",
SUM(CASE WHEN "not_sprayed" > 0 THEN 1 ELSE 0 END) AS "not_sprayed",
SUM(CASE WHEN "refused" >0 THEN 1 ELSE 0 END) AS "refused" FROM
(
SELECT
SUM(CASE WHEN (("main_sprayday"."data" @> '{"osmstructure:notsprayed_reASon": "refused"}' OR "main_sprayday"."data" @> '{"newstructure/gps_osm_file:notsprayed_reason": "refused"}') AND "main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NOT NULL AND "main_sprayday"."was_sprayed" = false) THEN 0 WHEN ("main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NOT NULL AND "main_sprayday"."was_sprayed" = true) THEN 0 ELSE 1 END) AS "other",
SUM(CASE WHEN ("main_sprayday"."data" ? 'osmstructure:way:id' AND "main_sprayday"."sprayable" = false AND "main_spraypoint"."id" IS NOT NULL AND "main_sprayday"."household_id" IS NOT NULL) THEN 1 ELSE 0 END) AS "not_sprayable",
SUM(CASE WHEN ("main_sprayday"."data" ? 'osmstructure:way:id' AND "main_sprayday"."sprayable" = false AND "main_spraypoint"."id" IS NOT NULL) THEN 0 WHEN ("main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NOT NULL) THEN 1 WHEN ("main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NULL AND "main_sprayday"."was_sprayed" = true) THEN 1 ELSE 0 END) AS "found",
SUM(CASE WHEN ("main_sprayday"."sprayable" = true AND "main_sprayday"."was_sprayed" = true) THEN 1 ELSE 0 END) AS "sprayed",
SUM(CASE WHEN ("main_sprayday"."data" ? 'newstructure/gps' AND "main_sprayday"."sprayable" = true) THEN 1 WHEN (("main_sprayday"."data" ? 'osmstructure:node:id' OR "main_sprayday"."data" ? 'newstructure/gps_osm_file:node:id') AND "main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NOT NULL) THEN 1 WHEN (("main_sprayday"."data" ? 'osmstructure:node:id' OR "main_sprayday"."data" ? 'newstructure/gps_osm_file:node:id') AND "main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NULL AND "main_sprayday"."was_sprayed" = true) THEN 1 WHEN ("main_sprayday"."data" ? 'osmstructure:way:id' AND "main_sprayday"."sprayable" = true AND "main_sprayday"."household_id" IS NULL) THEN 1 ELSE 0 END) AS "new_structures",
SUM(CASE WHEN ("main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NOT NULL AND "main_sprayday"."was_sprayed" = false) THEN 1 ELSE 0 END) AS "not_sprayed",
SUM(CASE WHEN (("main_sprayday"."data" @> '{"osmstructure:notsprayed_reASon": "refused"}' OR "main_sprayday"."data" @> '{"newstructure/gps_osm_file:notsprayed_reason": "refused"}') AND "main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NOT NULL AND "main_sprayday"."was_sprayed" = false) THEN 1 ELSE 0 END) AS "refused"
FROM "main_sprayday" LEFT OUTER JOIN "main_spraypoint" ON ("main_sprayday"."id" = "main_spraypoint"."sprayday_id") WHERE ("main_sprayday"."location_id" = %s AND "main_sprayday"."spray_date" <= %s::date) GROUP BY "main_sprayday"."id"
) AS "sub_query";
""" # noqa
SPRAY_AREA_INDICATOR_SQL_WEEK = """
SELECT
SUM(CASE WHEN "other" > 0 THEN 1 ELSE 0 END) AS "other",
SUM(CASE WHEN "not_sprayable" > 0 THEN 1 ELSE 0 END) AS "not_sprayable",
SUM(CASE WHEN "found" > 0 THEN 1 ELSE 0 END) AS "found",
SUM(CASE WHEN "sprayed" > 0 THEN 1 ELSE 0 END) AS "sprayed",
SUM(CASE WHEN "new_structures" > 0 THEN 1 ELSE 0 END) AS "new_structures",
SUM(CASE WHEN "not_sprayed" > 0 THEN 1 ELSE 0 END) AS "not_sprayed",
SUM(CASE WHEN "refused" >0 THEN 1 ELSE 0 END) AS "refused" FROM
(
SELECT
SUM(CASE WHEN (("main_sprayday"."data" @> '{"osmstructure:notsprayed_reASon": "refused"}' OR "main_sprayday"."data" @> '{"newstructure/gps_osm_file:notsprayed_reason": "refused"}') AND "main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NOT NULL AND "main_sprayday"."was_sprayed" = false) THEN 0 WHEN ("main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NOT NULL AND "main_sprayday"."was_sprayed" = true) THEN 0 ELSE 1 END) AS "other",
SUM(CASE WHEN ("main_sprayday"."data" ? 'osmstructure:way:id' AND "main_sprayday"."sprayable" = false AND "main_spraypoint"."id" IS NOT NULL) THEN 1 ELSE 0 END) AS "not_sprayable",
SUM(CASE WHEN ("main_sprayday"."data" ? 'osmstructure:way:id' AND "main_sprayday"."sprayable" = false AND "main_spraypoint"."id" IS NOT NULL) THEN 0 WHEN ("main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NOT NULL) THEN 1 WHEN ("main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NULL AND "main_sprayday"."was_sprayed" = true) THEN 1 ELSE 0 END) AS "found",
SUM(CASE WHEN ("main_sprayday"."sprayable" = true AND "main_sprayday"."was_sprayed" = true) THEN 1 ELSE 0 END) AS "sprayed",
SUM(CASE WHEN ("main_sprayday"."data" ? 'newstructure/gps' AND "main_sprayday"."sprayable" = true) THEN 1 WHEN (("main_sprayday"."data" ? 'osmstructure:node:id' OR "main_sprayday"."data" ? 'newstructure/gps_osm_file:node:id') AND "main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NOT NULL) THEN 1 WHEN (("main_sprayday"."data" ? 'osmstructure:node:id' OR "main_sprayday"."data" ? 'newstructure/gps_osm_file:node:id') AND "main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NULL AND "main_sprayday"."was_sprayed" = true) THEN 1 ELSE 0 END) AS "new_structures",
SUM(CASE WHEN ("main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NOT NULL AND "main_sprayday"."was_sprayed" = false) THEN 1 ELSE 0 END) AS "not_sprayed",
SUM(CASE WHEN (("main_sprayday"."data" @> '{"osmstructure:notsprayed_reASon": "refused"}' OR "main_sprayday"."data" @> '{"newstructure/gps_osm_file:notsprayed_reason": "refused"}') AND "main_sprayday"."sprayable" = true AND "main_spraypoint"."id" IS NOT NULL AND "main_sprayday"."was_sprayed" = false) THEN 1 ELSE 0 END) AS "refused"
FROM "main_sprayday" LEFT OUTER JOIN "main_spraypoint" ON
("main_sprayday"."id" = "main_spraypoint"."sprayday_id") WHERE
("main_sprayday"."location_id" = %s AND EXTRACT('week' FROM "main_sprayday"."spray_date") <= %s) GROUP BY "main_sprayday"."id"
) AS "sub_query";
""" # noqa
def dictfetchall(cursor):
"Return all rows from a cursor as a dict"
columns = [col[0] for col in cursor.description]
return [dict(zip(columns, row)) for row in cursor.fetchall()]
def cached_queryset_count(key, queryset, query=None, params=[]):
count = cache.get(key) if settings.DEBUG is False else None
if count is not None:
return count
if query is None:
count = queryset.count()
else:
for item in queryset.raw(query, params):
count = item.id
if count is None:
count = 0
cache.set(key, count)
return count
def get_spray_data(obj, context):
request = context.get("request")
spray_date = parse_spray_date(request) if request else None
week_number = context.get("week_number")
if type(obj) == dict:
loc = Location.objects.get(pk=obj.get("pk"))
else:
loc = obj
if loc.level in ["RHC", "district"]:
if loc.level == "district":
kwargs = {"district": loc}
else:
kwargs = {"rhc": loc}
if week_number:
kwargs["spray_date__week__lte"] = week_number
qs = SprayDay.objects.filter(**kwargs)
if spray_date:
qs = qs.filter(spray_date__lte=spray_date)
return qs.values("location").annotate(
found=Sum(
Case(
When(spraypoint__isnull=False, sprayable=True, then=1),
When(
spraypoint__isnull=True,
sprayable=True,
was_sprayed=True,
then=1,
),
default=0,
output_field=IntegerField(),
)
),
sprayed=Sum(
Case(
When(
was_sprayed=True,
sprayable=True,
spraypoint__isnull=False,
then=1,
),
default=0,
output_field=IntegerField(),
)
),
not_sprayed=Sum(
Case(
When(
was_sprayed=False,
spraypoint__isnull=False,
sprayable=True,
household__isnull=False,
then=1,
),
default=0,
output_field=IntegerField(),
)
),
not_sprayable=Sum(
Case(
When(
sprayable=False,
data__has_key="osmstructure:way:id",
spraypoint__isnull=False,
then=1,
),
default=0,
output_field=IntegerField(),
)
),
new_structures=Sum(
Case(
When(
sprayable=True,
spraypoint__isnull=False,
data__has_key="newstructure/gps",
then=1,
),
When(
sprayable=True,
spraypoint__isnull=False,
data__has_key="osmstructure:node:id",
then=1,
),
When(
sprayable=True,
spraypoint__isnull=False,
household__isnull=True,
then=1,
),
default=0,
output_field=IntegerField(),
)
),
refused=Sum(
Case(
When(
sprayable=True,
was_sprayed=False,
spraypoint__isnull=False,
data__contains={REASON_FIELD: REASON_REFUSED},
then=1,
),
default=0,
output_field=IntegerField(),
)
),
other=Sum(
Case(
When(
sprayable=True,
was_sprayed=False,
spraypoint__isnull=False,
data__contains={REASON_FIELD: REASON_REFUSED},
then=0,
),
When(sprayable=True, was_sprayed=True, then=0),
default=1,
output_field=IntegerField(),
)
),
structures=F("location__structures"),
)
kwargs = {}
# if not week_number:
# week_number = int(timezone.now().strftime('%W'))
if week_number:
kwargs["spray_date__week__lte"] = week_number
queryset = loc.sprayday_set.filter(**kwargs)
if spray_date:
queryset = queryset.filter(spray_date__lte=spray_date)
cursor = connection.cursor()
if spray_date:
cursor.execute(
SPRAY_AREA_INDICATOR_SQL_DATE_FILTERED,
[loc.id, spray_date.strftime("%Y-%m-%d")],
)
elif week_number:
cursor.execute(SPRAY_AREA_INDICATOR_SQL_WEEK, [loc.id, week_number])
else:
cursor.execute(SPRAY_AREA_INDICATOR_SQL, [loc.id])
results = dictfetchall(cursor)
return results[0]
def get_duplicates(obj, was_sprayed, spray_date=None):
"""
Returns SprayDay queryset for structures visited more than once.
"""
def _duplicates(spray_status):
qs = loc.sprayday_set
if spray_date:
qs = qs.filter(spray_date__lte=spray_date)
return (
qs.filter(osmid__isnull=False, was_sprayed=spray_status)
.values("osmid")
.annotate(dupes=Count("osmid"))
.filter(dupes__gt=1)
)
loc = (
Location.objects.get(pk=obj.get("pk") or obj.get("location"))
if type(obj) == dict
else obj
)
dupes = _duplicates(was_sprayed)
if not was_sprayed:
sprayed = _duplicates(True).values_list("osmid", flat=True)
dupes = dupes.exclude(spraypoint__isnull=False).exclude(
osmid__in=sprayed
)
return dupes
def count_duplicates(obj, was_sprayed, spray_date=None):
"""
Count duplicate structures, does not include the first time visit.
"""
dupes = get_duplicates(obj, was_sprayed, spray_date)
return sum([i.get("dupes") - 1 for i in dupes])
def count_if(data, percentage, debug=False):
"""Count as 1 if it matches the percentage or greater"""
visited_sprayed = 0
for i in data:
pk = i.get("location")
sprayed = i.get("sprayed") or 0
not_sprayable = i.get("not_sprayable") or 0
new_structures = i.get("new_structures") or 0
structures = i.get("structures") or 0
if debug:
print(
pk,
"Sprayed:",
sprayed,
"S:",
structures,
"NW:",
new_structures,
"NS:",
not_sprayable,
"D:",
count_duplicates(i, True) if pk else "",
)
structures -= not_sprayable
structures += new_structures
if pk is not None:
structures += count_duplicates(i, True)
if structures != 0:
rate = round(sprayed * 100 / structures)
if rate >= percentage:
visited_sprayed += 1
return visited_sprayed
def get_spray_area_count(location, context=dict()):
data = get_spray_data(location, context)
sprayed = data.get("sprayed")
structures = (
location.structures
+ (data.get("new_structures") or 0)
+ count_duplicates(location, True)
- (data.get("not_sprayable") or 0)
)
return sprayed, structures
def get_spray_area_stats(location, context=dict()):
data = get_spray_data(location, context)
structures = (
location.structures
+ (data.get("new_structures") or 0)
+ count_duplicates(location, True)
- (data.get("not_sprayable") or 0)
)
return data, structures
def count_key_if_percent(obj, key, percentage, context=dict()):
def count_for(location):
counter = 0
for i in location.location_set.all():
sprayed, structures = get_spray_area_count(i, context)
ratio = round((sprayed * 100) / structures) if sprayed else 0
counter += 1 if ratio >= percentage else 0
return counter
loc = Location.objects.get(pk=obj.get("pk")) if type(obj) == dict else obj
if loc.level == "RHC":
return count_for(loc)
return sum(
[
count_key_if_percent(i, key, percentage, context)
for i in loc.location_set.all()
]
)
class TargetAreaMixin(object):
def get_queryset(self, obj):
qs = SprayDay.objects.filter(
location__pk__in=list(get_ta_in_location(obj))
)
if HAS_UNIQUE_FIELD:
qs = qs.filter(pk__in=SprayPoint.objects.values("sprayday"))
return qs
def get_rich_queryset(self, obj):
"""
Adds various annotations to the queryset
"""
qs = self.get_queryset(obj)
# sprayed
qs = qs.annotate(
sprayed_females=Cast(
KeyTextTransform("sprayable/sprayed/sprayed_females", "data"),
IntegerField(),
),
sprayed_males=Cast(
KeyTextTransform("sprayable/sprayed/sprayed_males", "data"),
IntegerField(),
),
sprayed_pregwomen=Cast(
KeyTextTransform(
"sprayable/sprayed/sprayed_pregwomen", "data"
),
IntegerField(),
),
sprayed_childrenU5=Cast(
KeyTextTransform(
"sprayable/sprayed/sprayed_childrenU5", "data"
),
IntegerField(),
),
sprayed_totalpop=Cast(
KeyTextTransform("sprayable/sprayed/sprayed_totalpop", "data"),
IntegerField(),
),
sprayed_rooms=Cast(
KeyTextTransform("sprayable/sprayed/sprayed_rooms", "data"),
IntegerField(),
),
sprayed_roomsfound=Cast(
KeyTextTransform(
"sprayable/sprayed/sprayed_roomsfound", "data"
),
IntegerField(),
),
sprayed_nets=Cast(
KeyTextTransform("sprayable/sprayed/sprayed_nets", "data"),
IntegerField(),
),
sprayed_total_uNet=Cast(
KeyTextTransform(
"sprayable/sprayed/sprayed_total_uNet", "data"
),
IntegerField(),
),
)
# unsprayed
qs = qs.annotate(
unsprayed_children_u5=Cast(
KeyTextTransform(
"sprayable/unsprayed/population/unsprayed_children_u5",
"data",
),
IntegerField(),
),
unsprayed_females=Cast(
KeyTextTransform(
"sprayable/unsprayed/unsprayed_females", "data"
),
IntegerField(),
),
unsprayed_males=Cast(
KeyTextTransform(
"sprayable/unsprayed/unsprayed_males", "data"
),
IntegerField(),
),
unsprayed_totalpop=Cast(
KeyTextTransform(
"sprayable/unsprayed/unsprayed_totalpop", "data"
),
IntegerField(),
),
unsprayed_pregnant_women=Cast(
KeyTextTransform(
"sprayable/unsprayed/population/unsprayed_pregnant_women", # noqa
"data",
),
IntegerField(),
),
unsprayed_roomsfound=Cast(
KeyTextTransform(
"sprayable/unsprayed/population/unsprayed_roomsfound",
"data",
),
IntegerField(),
),
unsprayed_nets=Cast(
KeyTextTransform(
"sprayable/unsprayed/population/unsprayed_nets", "data"
),
IntegerField(),
),
unsprayed_total_uNet=Cast(
KeyTextTransform(
"sprayable/unsprayed/population/unsprayed_total_uNet",
"data",
),
IntegerField(),
),
)
return qs
def get_spray_queryset(self, obj):
pk = obj["pk"] if isinstance(obj, dict) else obj.pk
key = "_spray_queryset_{}".format(pk)
if hasattr(self, key):
return getattr(self, key)
qs = sprayable_queryset(self.get_queryset(obj))
setattr(self, key, qs)
return qs
def get_spray_dates(self, obj):
if obj:
queryset = self.get_spray_queryset(obj)
return (
queryset.values_list("spray_date", flat=True)
.order_by("spray_date")
.distinct()
)
def get_visited_total(self, obj):
if obj:
level = obj["level"] if isinstance(obj, dict) else obj.level
if level == TA_LEVEL:
visited_found = self.get_found(obj)
else:
visited_found = count_key_if_percent(
obj, "sprayed", 20, self.context
)
return visited_found
def get_found(self, obj):
if obj:
level = obj["level"] if isinstance(obj, dict) else obj.level
location = (
Location.objects.get(pk=obj.get("pk"))
if isinstance(obj, dict)
else obj
)
data = get_spray_data(obj, self.context)
request = self.context.get("request")
spray_date = parse_spray_date(request) if request else None
week_number = self.context.get("week_number")
if spray_date or week_number:
if level == TA_LEVEL:
found = data.get("found") or 0
else:
found = data.count()
else:
found = location.visited_found
return found
def get_visited_sprayed(self, obj):
if obj:
level = obj["level"] if isinstance(obj, dict) else obj.level
if level == TA_LEVEL:
data = get_spray_data(obj, self.context)
visited_sprayed = data.get("sprayed") or 0
else:
visited_sprayed = count_key_if_percent(
obj, "sprayed", LOCATION_SPRAYED_PERCENTAGE, self.context
)
return visited_sprayed
def get_visited_not_sprayed(self, obj):
if obj:
level = obj["level"] if isinstance(obj, dict) else obj.level
data = get_spray_data(obj, self.context)
if level == TA_LEVEL:
visited_not_sprayed = data.get("not_sprayed") or 0
else:
visited_not_sprayed = (
data.aggregate(r=Sum("not_sprayed")).get("r") or 0
)
return visited_not_sprayed
def get_visited_refused(self, obj):
if obj:
level = obj["level"] if isinstance(obj, dict) else obj.level
data = get_spray_data(obj, self.context)
if level == TA_LEVEL:
refused = data.get("refused") or 0
else:
refused = data.aggregate(r=Sum("refused")).get("r") or 0
return refused
def get_visited_other(self, obj):
if obj:
level = obj["level"] if isinstance(obj, dict) else obj.level
data = get_spray_data(obj, self.context)
if level == TA_LEVEL:
other = data.get("other") or 0
else:
other = data.aggregate(r=Sum("other")).get("r") or 0
return other
def get_not_visited(self, obj):
if obj:
structures = (
obj["structures"] if isinstance(obj, dict) else obj.structures
)
data = get_spray_data(obj, self.context)
level = obj["level"] if isinstance(obj, dict) else obj.level
if level == TA_LEVEL:
not_sprayable = data.get("not_sprayable") or 0
new_structures = data.get("new_structures") or 0
structures -= not_sprayable
structures += new_structures + count_duplicates(obj, True)
count = data.get("found") or 0
else:
not_sprayable = (
data.aggregate(r=Sum("not_sprayable")).get("r") or 0
)
new_structures = (
data.aggregate(r=Sum("new_structures")).get("r") or 0
)
count = data.aggregate(r=Sum("found")).get("r") or 0
structures -= not_sprayable
structures += new_structures
return structures - count
def get_bounds(self, obj):
bounds = []
if obj:
if isinstance(obj, dict):
bounds = [
obj.get("xmin"),
obj.get("ymin"),
obj.get("xmax"),
obj.get("ymax"),
]
elif obj.geom:
bounds = list(obj.geom.boundary.extent)
return bounds
def get_district_name(self, obj):
return obj.get("name") if isinstance(obj, dict) else obj.name
def get_targetid(self, obj):
return obj.get("pk") if isinstance(obj, dict) else obj.pk
def get_new_structures(self, obj):
if obj:
level = obj["level"] if isinstance(obj, dict) else obj.level
data = get_spray_data(obj, self.context)
if level == TA_LEVEL:
new_structures = data.get("new_structures") or 0
else:
new_structures = (
data.aggregate(r=Sum("new_structures")).get("r") or 0
)
return new_structures
return 0
def get_structures(self, obj):
"""Return structures on ground in location."""
structures = (
obj.get("structures") if isinstance(obj, dict) else obj.structures
)
level = obj["level"] if isinstance(obj, dict) else obj.level
location = (
Location.objects.get(pk=obj.get("pk"))
if isinstance(obj, dict)
else obj
)
data = get_spray_data(obj, self.context)
request = self.context.get("request")
spray_date = parse_spray_date(request) if request else None
week_number = self.context.get("week_number")
if spray_date or week_number:
if level == TA_LEVEL:
not_sprayable = data.get("not_sprayable") or 0
new_structures = data.get("new_structures") or 0
structures -= not_sprayable
structures += new_structures + count_duplicates(
obj, was_sprayed=True
)
else:
not_sprayable = (
data.aggregate(r=Sum("not_sprayable")).get("r") or 0
)
new_structures = (
data.aggregate(r=Sum("new_structures")).get("r") or 0
)
structures -= not_sprayable
structures += new_structures
else:
structures = location.structures_on_ground
return structures
def get_total_structures(self, obj):
return self.get_structures(obj)
def get_not_sprayable(self, obj):
data = get_spray_data(obj, self.context)
level = obj["level"] if isinstance(obj, dict) else obj.level
if level == TA_LEVEL:
not_sprayable = data.get("not_sprayable") or 0
else:
not_sprayable = (
data.aggregate(r=Sum("not_sprayable")).get("r") or 0
)
return not_sprayable
def get_sprayed_total_uNet(self, obj):
qs = self.get_rich_queryset(obj)
sum_dict = qs.aggregate(
sprayed_total_uNet_sum=Coalesce(
Sum("sprayed_total_uNet"), Value(0)
)
)
return sum_dict["sprayed_total_uNet_sum"]
def get_sprayed_nets(self, obj):
qs = self.get_rich_queryset(obj)
sum_dict = qs.aggregate(
sprayed_nets_sum=Coalesce(Sum("sprayed_nets"), Value(0))
)
return sum_dict["sprayed_nets_sum"]
def get_sprayed_roomsfound(self, obj):
qs = self.get_rich_queryset(obj)
sum_dict = qs.aggregate(
sprayed_roomsfound_sum=Coalesce(
Sum("sprayed_roomsfound"), Value(0)
)
)
return sum_dict["sprayed_roomsfound_sum"]
def get_sprayed_rooms(self, obj):
qs = self.get_rich_queryset(obj)
sum_dict = qs.aggregate(
sprayed_rooms_sum=Coalesce(Sum("sprayed_rooms"), Value(0))
)
return sum_dict["sprayed_rooms_sum"]
def get_sprayed_totalpop(self, obj):
qs = self.get_rich_queryset(obj)
sum_dict = qs.aggregate(
sprayed_totalpop_sum=Coalesce(Sum("sprayed_totalpop"), Value(0))
)
return sum_dict["sprayed_totalpop_sum"]
def get_sprayed_childrenU5(self, obj):
qs = self.get_rich_queryset(obj)
sum_dict = qs.aggregate(
sprayed_childrenU5_sum=Coalesce(
Sum("sprayed_childrenU5"), Value(0)
)
)
return sum_dict["sprayed_childrenU5_sum"]
def get_sprayed_pregwomen(self, obj):
qs = self.get_rich_queryset(obj)
sum_dict = qs.aggregate(
sprayed_pregwomen_sum=Coalesce(Sum("sprayed_pregwomen"), Value(0))
)
return sum_dict["sprayed_pregwomen_sum"]
def get_sprayed_males(self, obj):
qs = self.get_rich_queryset(obj)
sum_dict = qs.aggregate(
sprayed_males_sum=Coalesce(Sum("sprayed_males"), Value(0))
)
return sum_dict["sprayed_males_sum"]
def get_sprayed_females(self, obj):
qs = self.get_rich_queryset(obj)
sum_dict = qs.aggregate(
sprayed_females_sum=Coalesce(Sum("sprayed_females"), Value(0))
)
return sum_dict["sprayed_females_sum"]
def get_unsprayed_nets(self, obj):
qs = self.get_rich_queryset(obj)
sum_dict = qs.aggregate(
unsprayed_nets_sum=Coalesce(Sum("unsprayed_nets"), Value(0))
)
return sum_dict["unsprayed_nets_sum"]
def get_unsprayed_total_uNet(self, obj):
qs = self.get_rich_queryset(obj)
sum_dict = qs.aggregate(
unsprayed_total_uNet_sum=Coalesce(
Sum("unsprayed_total_uNet"), Value(0)
)
)
return sum_dict["unsprayed_total_uNet_sum"]
def get_unsprayed_roomsfound(self, obj):
qs = self.get_rich_queryset(obj)
sum_dict = qs.aggregate(
unsprayed_roomsfound_sum=Coalesce(
Sum("unsprayed_roomsfound"), Value(0)
)
)
return sum_dict["unsprayed_roomsfound_sum"]
def get_unsprayed_pregnant_women(self, obj):
qs = self.get_rich_queryset(obj)
sum_dict = qs.aggregate(
unsprayed_pg_women_sum=Coalesce(
Sum("unsprayed_pregnant_women"), Value(0)
)
)
return sum_dict["unsprayed_pg_women_sum"]
def get_unsprayed_totalpop(self, obj):
qs = self.get_rich_queryset(obj)
sum_dict = qs.aggregate(
unsprayed_totalpop_sum=Coalesce(
Sum("unsprayed_totalpop"), Value(0)
)
)
return sum_dict["unsprayed_totalpop_sum"]
def get_unsprayed_males(self, obj):
qs = self.get_rich_queryset(obj)
sum_dict = qs.aggregate(
unsprayed_males_sum=Coalesce(Sum("unsprayed_males"), Value(0))
)
return sum_dict["unsprayed_males_sum"]
def get_unsprayed_females(self, obj):
qs = self.get_rich_queryset(obj)
sum_dict = qs.aggregate(
unsprayed_females_sum=Coalesce(Sum("unsprayed_females"), Value(0))
)
return sum_dict["unsprayed_females_sum"]
def get_unsprayed_children_u5(self, obj):
qs = self.get_rich_queryset(obj)
sum_dict = qs.aggregate(
unsprayed_children_u5_sum=Coalesce(
Sum("unsprayed_children_u5"), Value(0)
)
)
return sum_dict["unsprayed_children_u5_sum"]
def get_total_nets(self, obj):
sprayed_nets = self.get_sprayed_nets(obj)
unsprayed_nets = self.get_unsprayed_nets(obj)
return sprayed_nets + unsprayed_nets
def get_total_uNet(self, obj):
"""
Total people covered by nets
"""
sprayed_uNet = self.get_sprayed_total_uNet(obj)
unsprayed_uNet = self.get_unsprayed_total_uNet(obj)
return sprayed_uNet + unsprayed_uNet
def get_total_rooms(self, obj):
sprayed_rooms = self.get_sprayed_roomsfound(obj)
unsprayed_rooms = self.get_unsprayed_roomsfound(obj)
return sprayed_rooms + unsprayed_rooms
class TargetAreaQueryMixin(TargetAreaMixin):
def get_found(self, obj):
if obj:
pk = obj["pk"] if isinstance(obj, dict) else obj.pk
key = "%s_found" % pk
queryset = self.get_spray_queryset(obj)
query = (
"SELECT SUM((data->>'number_sprayable')::int) as id "
"FROM main_sprayday WHERE location_id IN %s"
)
location_pks = list(get_ta_in_location(obj))
if len(location_pks) == 0:
return 0
params = [tuple(location_pks)]
return cached_queryset_count(key, queryset, query, params)
def get_visited_sprayed(self, obj):
if obj:
pk = obj["pk"] if isinstance(obj, dict) else obj.pk
key = "%s_visited_sprayed" % pk
queryset = self.get_spray_queryset(obj)
query = (
"SELECT SUM((data->>'sprayed/sprayed_DDT')::int + "
"(data->>'sprayed/sprayed_Deltamethrin')::int) as id "
"FROM main_sprayday WHERE location_id IN %s"
)
location_pks = list(get_ta_in_location(obj))
if len(location_pks) == 0:
return 0
params = [tuple(location_pks)]
return cached_queryset_count(key, queryset, query, params)
def get_visited_not_sprayed(self, obj):
if obj:
pk = obj["pk"] if isinstance(obj, dict) else obj.pk
key = "%s_visited_not_sprayed" % pk
queryset = self.get_spray_queryset(obj)
query = (
"SELECT SUM((data->>'sprayed/sprayable_notsprayed')::int)"
" as id FROM main_sprayday WHERE location_id IN %s"
)
location_pks = list(get_ta_in_location(obj))
if len(location_pks) == 0:
return 0
params = [tuple(location_pks)]
return cached_queryset_count(key, queryset, query, params)
def get_visited_refused(self, obj):
if obj:
pk = obj["pk"] if isinstance(obj, dict) else obj.pk
key = "%s_visited_refused" % pk
queryset = self.get_spray_queryset(obj)
query = (
"SELECT SUM((data->>'sprayed/sprayable_notsprayed')::int)"
" as id FROM main_sprayday WHERE location_id IN %s"
" AND data->>%s = %s"
)
location_pks = list(get_ta_in_location(obj))
if len(location_pks) == 0:
return 0
params = [tuple(location_pks), REASON_FIELD, REASON_REFUSED]
return cached_queryset_count(key, queryset, query, params)
def get_visited_other(self, obj):
if obj:
pk = obj["pk"] if isinstance(obj, dict) else obj.pk
key = "%s_visited_other" % pk
queryset = self.get_spray_queryset(obj)
query = (
"SELECT SUM((data->>'sprayed/sprayable_notsprayed')::int)"
" as id FROM main_sprayday WHERE location_id IN %s"
" AND data->>%s IN %s"
)
location_pks = list(get_ta_in_location(obj))
if len(location_pks) == 0:
return 0
params = [tuple(location_pks), REASON_FIELD, tuple(REASON_OTHER)]
return cached_queryset_count(key, queryset, query, params)
class SprayOperatorDailySummaryMixin(object):
"""
Adds data from SprayOperatorDailySummary
"""
def get_sop_summary_queryset(self, obj):
sprayday_qs = self.get_queryset(obj)
formids = sprayday_qs.annotate(
sprayformid=KeyTextTransform("sprayformid", "data")
).values_list("sprayformid", flat=True)
formids = list(set([x for x in formids if x is not None]))
qs = SprayOperatorDailySummary.objects.filter(
spray_form_id__in=formids
)
qs = qs.annotate(
bottles_accounted=Cast(
KeyTextTransform("bottles_accounted", "data"), IntegerField()
),
bottles_empty=Cast(
KeyTextTransform("bottles_empty", "data"), IntegerField()
),
bottles_full=Cast(
KeyTextTransform("bottles_full", "data"), IntegerField()
),
bottles_start=Cast(
KeyTextTransform("bottles_start", "data"), IntegerField()
),
)
return qs
def get_bottles_start(self, obj):
qs = self.get_sop_summary_queryset(obj)
sum_dict = qs.aggregate(
bottles_start_sum=Coalesce(Sum("bottles_start"), Value(0))
)
if sum_dict["bottles_start_sum"] is not None:
return sum_dict["bottles_start_sum"]
return 0
def get_bottles_full(self, obj):
qs = self.get_sop_summary_queryset(obj)
sum_dict = qs.aggregate(
bottles_full_sum=Coalesce(Sum("bottles_full"), Value(0))
)
if sum_dict["bottles_full_sum"] is not None:
return sum_dict["bottles_full_sum"]
return 0
def get_bottles_accounted(self, obj):
qs = self.get_sop_summary_queryset(obj)
sum_dict = qs.aggregate(
bottles_accounted_sum=Coalesce(Sum("bottles_accounted"), Value(0))
)
if sum_dict["bottles_accounted_sum"] is not None:
return sum_dict["bottles_accounted_sum"]
return 0
def get_bottles_empty(self, obj):
qs = self.get_sop_summary_queryset(obj)
sum_dict = qs.aggregate(
bottles_empty_sum=Coalesce(Sum("bottles_empty"), Value(0))
)
if sum_dict["bottles_empty_sum"] is not None:
return sum_dict["bottles_empty_sum"]
return 0
class TargetAreaSerializer(TargetAreaMixin, serializers.ModelSerializer):
targetid = serializers.SerializerMethodField()
district_name = serializers.SerializerMethodField()
district = serializers.SerializerMethodField()
district_pk = serializers.SerializerMethodField()
rhc = serializers.SerializerMethodField()
rhc_pk = serializers.SerializerMethodField()
level = serializers.ReadOnlyField()
structures = serializers.SerializerMethodField()
total_structures = serializers.IntegerField()
num_new_structures = serializers.IntegerField()
found = serializers.SerializerMethodField()
visited_total = serializers.SerializerMethodField()
visited_sprayed = serializers.SerializerMethodField()
visited_not_sprayed = serializers.SerializerMethodField()
visited_refused = serializers.SerializerMethodField()
visited_other = serializers.SerializerMethodField()
not_visited = serializers.SerializerMethodField()
bounds = serializers.SerializerMethodField()
spray_dates = serializers.SerializerMethodField()
is_sensitized = serializers.NullBooleanField()
sensitized = serializers.SerializerMethodField()
is_mobilised = serializers.NullBooleanField()
mobilised = serializers.SerializerMethodField()
class Meta:
fields = (
"targetid",
"district_name",
"found",
"structures",
"visited_total",
"visited_sprayed",
"visited_not_sprayed",
"visited_refused",
"visited_other",
"not_visited",
"bounds",
"spray_dates",
"level",
"num_of_spray_areas",
"total_structures",
"district",
"rhc",
"district_pk",
"rhc_pk",
"num_new_structures",
"is_sensitized",
"sensitized",
"is_mobilised",
"mobilised",
"priority",
)
model = Location
def get_district(self, obj):
if obj:
try:
return (
obj.get("parent__parent__name")
if isinstance(obj, dict)
else obj.parent.parent.name
)
except: # noqa
pass
def get_district_pk(self, obj):
if obj:
try:
return (
obj.get("parent__parent__pk")
if isinstance(obj, dict)
else obj.parent.parent.pk
)
except: # noqa
pass
def get_rhc(self, obj):
if obj:
try:
return (
obj.get("parent__name")
if isinstance(obj, dict)
else obj.parent.name
)
except: # noqa
pass
def get_rhc_pk(self, obj):
if obj:
try:
return (
obj.get("parent__pk")
if isinstance(obj, dict)
else obj.parent.pk
)
except: # noqa
pass
def get_sensitized(self, obj): # pylint: disable=no-self-use
"""Return number of spray areas that have been sensitized."""
location = obj
if isinstance(obj, dict):
location = Location.objects.get(pk=obj["pk"])
queryset = location.sv_spray_areas
sensitized = queryset.first()
if sensitized:
return sensitized.data.get(SENSITIZED_FIELD)
return ""
def get_mobilised(self, obj): # pylint: disable=no-self-use
"""Return number of spray areas that have been mobilised."""
location = obj
if isinstance(obj, dict):
location = Location.objects.get(pk=obj["pk"])
queryset = location.mb_spray_areas
mobilisation = queryset.first()
if mobilisation:
return mobilisation.data.get(MOBILISED_FIELD)
return ""
class TargetAreaRichSerializer(
TargetAreaMixin,
SprayOperatorDailySummaryMixin,
serializers.ModelSerializer,
):
"""
Detailed spray data for target area
"""
district = serializers.SerializerMethodField()
found = serializers.SerializerMethodField()
visited_sprayed = serializers.SerializerMethodField()
visited_not_sprayed = serializers.SerializerMethodField()
sprayed_total_uNet = serializers.SerializerMethodField()
sprayed_nets = serializers.SerializerMethodField()
sprayed_roomsfound = serializers.SerializerMethodField()
sprayed_rooms = serializers.SerializerMethodField()
sprayed_totalpop = serializers.SerializerMethodField()
sprayed_childrenU5 = serializers.SerializerMethodField()
sprayed_pregwomen = serializers.SerializerMethodField()
sprayed_males = serializers.SerializerMethodField()
sprayed_females = serializers.SerializerMethodField()
unsprayed_nets = serializers.SerializerMethodField()
unsprayed_total_uNet = serializers.SerializerMethodField()
unsprayed_roomsfound = serializers.SerializerMethodField()
unsprayed_pregnant_women = serializers.SerializerMethodField()
unsprayed_totalpop = serializers.SerializerMethodField()
unsprayed_males = serializers.SerializerMethodField()
unsprayed_females = serializers.SerializerMethodField()
unsprayed_children_u5 = serializers.SerializerMethodField()
bottles_start = serializers.SerializerMethodField()
bottles_empty = serializers.SerializerMethodField()
bottles_full = serializers.SerializerMethodField()
bottles_accounted = serializers.SerializerMethodField()
total_nets = serializers.SerializerMethodField()
total_uNet = serializers.SerializerMethodField()
total_rooms = serializers.SerializerMethodField()
class Meta:
fields = [
"name",
"district",
"found",
"visited_sprayed",
"visited_not_sprayed",
"sprayed_total_uNet",
"sprayed_nets",
"sprayed_roomsfound",
"sprayed_rooms",
"sprayed_totalpop",
"sprayed_childrenU5",
"sprayed_pregwomen",
"sprayed_males",
"sprayed_females",
"unsprayed_nets",
"unsprayed_total_uNet",
"unsprayed_roomsfound",
"unsprayed_pregnant_women",
"unsprayed_totalpop",
"unsprayed_males",
"unsprayed_females",
"unsprayed_children_u5",
"bottles_start",
"bottles_empty",
"bottles_full",
"bottles_accounted",
"total_nets",
"total_uNet",
"total_rooms",
]
model = Location
def get_district(self, obj):
if obj:
try:
return (
obj.get("parent__parent__name")
if isinstance(obj, dict)
else obj.parent.parent.name
)
except: # noqa
pass
class TargetAreaQuerySerializer(
TargetAreaQueryMixin, serializers.ModelSerializer
):
targetid = serializers.SerializerMethodField()
district_name = serializers.SerializerMethodField()
level = serializers.ReadOnlyField()
structures = serializers.SerializerMethodField()
found = serializers.SerializerMethodField()
visited_total = serializers.SerializerMethodField()
visited_sprayed = serializers.SerializerMethodField()
visited_not_sprayed = serializers.SerializerMethodField()
visited_refused = serializers.SerializerMethodField()
visited_other = serializers.SerializerMethodField()
not_visited = serializers.SerializerMethodField()
bounds = serializers.SerializerMethodField()
spray_dates = serializers.SerializerMethodField()
class Meta:
fields = (
"targetid",
"district_name",
"found",
"structures",
"visited_total",
"visited_sprayed",
"visited_not_sprayed",
"visited_refused",
"visited_other",
"not_visited",
"bounds",
"spray_dates",
"level",
"priority",
)
model = TargetArea
class GeoTargetAreaSerializer(TargetAreaMixin, GeoFeatureModelSerializer):
id = serializers.SerializerMethodField("get_targetid")
targetid = serializers.SerializerMethodField()
district_name = serializers.SerializerMethodField()
level = serializers.ReadOnlyField()
structures = serializers.SerializerMethodField()
total_structures = serializers.IntegerField()
num_new_structures = serializers.IntegerField()
found = serializers.SerializerMethodField()
visited_total = serializers.SerializerMethodField()
visited_sprayed = serializers.SerializerMethodField()
visited_not_sprayed = serializers.SerializerMethodField()
visited_refused = serializers.SerializerMethodField()
visited_other = serializers.SerializerMethodField()
not_visited = serializers.SerializerMethodField()
is_sensitized = serializers.NullBooleanField()
is_mobilised = serializers.NullBooleanField()
geom = GeometryField()
class Meta:
fields = (
"targetid",
"structures",
"visited_total",
"visited_sprayed",
"visited_not_sprayed",
"visited_refused",
"visited_other",
"not_visited",
"level",
"district_name",
"found",
"total_structures",
"num_new_structures",
"num_of_spray_areas",
"id",
"is_sensitized",
"is_mobilised",
)
model = Location
geo_field = "geom"
class GeoHealthFacilitySerializer(GeoFeatureModelSerializer):
class Meta:
model = Location
geo_field = "geom"
|
from rest_framework import viewsets
from .models import Note, NotedModel
from .serializers import NoteSerializer, NotedModelSerializer
from rest_framework.permissions import IsAuthenticatedOrReadOnly
class NoteViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticatedOrReadOnly,)
queryset = Note.objects.all()
serializer_class = NoteSerializer
class NotedModelViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthenticatedOrReadOnly,)
queryset = NotedModel.objects.all()
serializer_class = NotedModelSerializer
|
'''
# This is an 80 character line #
INTENT: Pick out the cluster edges so that you can analyze edge statistics
1. Cluster algorithm
2. Count nearest neighbors in largest cluster only
--- Break here and plot this data ---
3. Look at contiguous set of low neighbor points (cluster them)
4. Perform usual statistics on this set (maybe the MSD?)
'''
import sys
pe_a = int(sys.argv[1]) # activity A
pe_b = int(sys.argv[2]) # activity B
part_perc_a = int(sys.argv[3]) # percentage A particles
part_frac_a = float(part_perc_a) / 100.0 # fraction A particles
hoomd_path = str(sys.argv[4]) # local path to hoomd-blue
gsd_path = str(sys.argv[5]) # local path to gsd
sys.path.append(hoomd_path) # ensure hoomd is in your python path
sys.path.append(gsd_path) # ensure gsd is in your python path
import hoomd
from hoomd import md
from hoomd import deprecated
import gsd
from gsd import hoomd
from gsd import pygsd
import freud
from freud import parallel
from freud import box
from freud import density
from freud import cluster
import numpy as np
import matplotlib.pyplot as plt
def getDistance(point1, point2x, point2y):
"""Find the distance between two points"""
distance = np.sqrt((point2x - point1[0])**2 + (point2y - point1[1])**2)
return distance
# Load the file, get output name style
try:
in_file = "pa" + str(pe_a) + \
"_pb" + str(pe_b) + \
"_xa" + str(part_perc_a) + \
".gsd"
# File to write all data to
out_file = "diam_pa" + str(pe_a) + \
"_pb" + str(pe_b) + \
"_xa" + str(part_perc_a)
f = hoomd.open(name=in_file, mode='rb') # open gsd file with hoomd
except:
in_file = "pa" + str(pe_a) + \
"_pb" + str(pe_b) + \
"_xa" + str(part_perc_a) + \
"_ep1" + \
".gsd"
# File to write all data to
out_file = "diam_pa" + str(pe_a) + \
"_pb" + str(pe_b) + \
"_xa" + str(part_perc_a) + \
"_ep1"
f = hoomd.open(name=in_file, mode='rb') # open gsd file with hoomd
dumps = f.__len__() # get number of timesteps dumped
start = 0 # gives first frame to read
start = dumps - 1
end = dumps # gives last frame to read
positions = np.zeros((end), dtype=np.ndarray) # array of positions
types = np.zeros((end), dtype=np.ndarray) # particle types
box_data = np.zeros((1), dtype=np.ndarray) # box dimensions
timesteps = np.zeros((end), dtype=np.float64) # timesteps
# Get relevant data from .gsd file
with hoomd.open(name=in_file, mode='rb') as t:
snap = t[0]
box_data = snap.configuration.box
for iii in range(start, end):
snap = t[iii] # snapshot of frame
types[iii] = snap.particles.typeid # get types
positions[iii] = snap.particles.position # get positions
timesteps[iii] = snap.configuration.step # get timestep
timesteps -= timesteps[0] # get rid of brownian run time
# Get number of each type of particle
partNum = len(types[start])
part_A = int(partNum * part_frac_a)
part_B = partNum - part_A
# Feed data into freud analysis software
l_box = box_data[0]
h_box = l_box / 2.0
a_box = l_box * l_box
f_box = box.Box(Lx = l_box, Ly = l_box, is2D = True) # make freud box
my_clust = cluster.Cluster(box = f_box, # init cluster class
rcut = 5.0) # distance to search
c_props = cluster.ClusterProperties(box = f_box) # compute properties
# Parameters for sorting dense from dilute
min_size_abso = 1000
min_size_perc = 0.05 * partNum # minimum cluster size 5% of all particles
min_size = min_size_abso if min_size_abso < min_size_perc else min_size_perc
# Make the mesh
r_cut = 1.122
sizeBin = r_cut
nBins = int(l_box / sizeBin)
nBins += 1 # account for integer rounding
### LOOP THROUGH GSD FRAMES AND ANALYZE ###
for j in range(start, end):
# Mesh array
binParts = [[[] for b in range(nBins)] for a in range(nBins)]
# Neighbor array
neighbors = np.zeros(partNum, dtype=np.int)
# Easier accessors
pos = positions[j]
# pos = np.delete(pos, 2, 1)
typ = types[j]
tst = timesteps[j]
# Put particles in their respective bins
for k in range(0, partNum):
# Get mesh indices
tmp_posX = pos[k][0] + h_box
tmp_posY = pos[k][1] + h_box
x_ind = int(tmp_posX / sizeBin)
y_ind = int(tmp_posY / sizeBin)
# Append particle id to appropriate bin
binParts[x_ind][y_ind].append(k)
# Compute distance, each pair will be counted twice
for k in range(0, partNum):
# Get mesh indices
tmp_posX = pos[k][0] + h_box
tmp_posY = pos[k][1] + h_box
x_ind = int(tmp_posX / sizeBin)
y_ind = int(tmp_posY / sizeBin)
# Get index of surrounding bins
l_bin = x_ind - 1 # index of left bins
r_bin = x_ind + 1 # index of right bins
b_bin = y_ind - 1 # index of bottom bins
t_bin = y_ind + 1 # index of top bins
if r_bin == nBins:
r_bin -= nBins # adjust if wrapped
if t_bin == nBins:
t_bin -= nBins # adjust if wrapped
h_list = [l_bin, x_ind, r_bin] # list of horizontal bin indices
v_list = [b_bin, y_ind, t_bin] # list of vertical bin indices
# Loop through all bins
for h in range(0, len(h_list)):
for v in range(0, len(v_list)):
# Take care of periodic wrapping for position
wrapX = 0.0
wrapY = 0.0
if h == 0 and h_list[h] == -1:
wrapX -= l_box
if h == 2 and h_list[h] == 0:
wrapX += l_box
if v == 0 and v_list[v] == -1:
wrapY -= l_box
if v == 2 and v_list[v] == 0:
wrapY += l_box
# Compute distance between particles
for b in range(0, len(binParts[h_list[h]][v_list[v]])):
ref = binParts[h_list[h]][v_list[v]][b]
r = getDistance(pos[k],
pos[ref][0] + wrapX,
pos[ref][1] + wrapY)
r = round(r, 4) # round value to 4 decimal places
# If particles are touching, they are neighbors
if 0.1 < r <= 1.0:
neighbors[k] += 1
# # Sanity check -- plot heatmap position w/ neighbors overlaid
# plt.scatter(pos[:, 0], pos[:, 1], c=neighbors, s=0.5, edgecolors='none')
# plt.xticks(())
# plt.yticks(())
# plt.xlim(-h_box, h_box)
# plt.ylim(-h_box, h_box)
# plt.colorbar()
# plt.title('Number of Nearest Neighbors')
# plt.savefig(out_file + '_frame' + str(j) + '.png', dpi=1000)
# Let's keep everything with 3,4,5 nearest neighbors and plot it
edgeX = []
edgeY = []
neigh = []
nMin = 2
nMax = 3
for k in range(0, partNum):
if (nMin <= neighbors[k] and neighbors[k] <= nMax):
edgeX.append(pos[k][0])
edgeY.append(pos[k][1])
neigh.append(neighbors[k])
neigh_pos = np.zeros((len(edgeX), 3), dtype=np.float64)
neigh_pos[:, 0] = edgeX[:]
neigh_pos[:, 1] = edgeY[:]
# # Sanity check -- plot only particles with specific number of neighbors
# plt.scatter(edgeX, edgeY, c=neigh, s=0.5, edgecolors='none')
# plt.xticks(())
# plt.yticks(())
# plt.xlim(-h_box, h_box)
# plt.ylim(-h_box, h_box)
# plt.colorbar()
# plt.title('3 < Neighbors < 5')
# plt.savefig('3-5neigh_' + out_file + '_frame' + str(j) + '.png', dpi=1000)
# Feed in the reduced particle set to the cluster algorithm
my_clust.computeClusters(neigh_pos) # feed in position
ids = my_clust.getClusterIdx() # get id of each cluster
c_props.computeProperties(neigh_pos, ids) # find cluster properties
clust_size = c_props.getClusterSizes() # find cluster sizes
# Querry array, that tells whether cluster ID is of sufficient size
q_clust = np.zeros((len(clust_size)), dtype=np.int)
clust_num = 0 # number of clusters
lcIndex = 0 # id of largest cluster
lc_numA = 0 # number of A particles in dense phase
lc_numB = 0 # number of B particles in dense phase
l_clust = 0 # any cluster is larger than this
for k in range(0, len(clust_size)):
if clust_size[k] > min_size:
q_clust[k] = 1
clust_num += 1
if clust_size[k] > l_clust:
l_clust = clust_size[k]
lcIndex = k
lcID = ids[k]
lc_posX = []
lc_posY = []
for k in range(0, len(ids)):
if ids[k] == lcID:
lc_posX.append(neigh_pos[k][0])
lc_posY.append(neigh_pos[k][1])
plt.scatter(lc_posX, lc_posY, s=0.5, edgecolors='none')
plt.xticks(())
plt.yticks(())
plt.xlim(-h_box, h_box)
plt.ylim(-h_box, h_box)
plt.title('Largest edge cluster')
plt.savefig('lcEdge_' + out_file + '_frame' + str(j) + '.png', dpi=1000)
|
import chainer
import torch
import chainer_pytorch_migration as cpm
def _named_children(link):
assert isinstance(link, chainer.Link)
if isinstance(link, chainer.Chain):
for name in link._children:
yield name, getattr(link, name)
def _named_params(link):
assert isinstance(link, chainer.Link)
for name in link._params:
yield name, getattr(link, name)
# Corresponding to ``torch._C._nn._parse_to``.
def _parse_to(*args, device=None, dtype=None, non_blocking=False):
args = list(args)
if len(args) > 0:
if isinstance(args[0], torch.Tensor):
tensor = args.pop(0)
device = tensor.device
dtype = tensor.dtype
elif isinstance(args[0], torch.dtype):
dtype = args.pop(0)
elif isinstance(args[0], (str, torch.device)):
device = args.pop(0)
if len(args) > 0 and isinstance(args[0], torch.dtype):
dtype = torch.dtype(args.pop(0))
else:
raise TypeError('Received an invalid combination of arguments.')
if len(args) > 0:
non_blocking = bool(args.pop(0))
if len(args) > 0:
raise TypeError('Received an invalid combination of arguments.')
if device is not None:
device = torch.device(device)
return device, dtype, non_blocking
def _setattr_recursive(obj, name, value):
attr_list = name.split('.')
for attr in attr_list[:-1]:
obj = getattr(obj, attr)
setattr(obj, attr_list[-1], value)
class LinkAsTorchModel(torch.nn.Module):
'''Converts a Chainer Link to a PyTorch module.
The parameters of the link are automatically
wrapped using `ChainerParameter` and added
to the module as its own parameters.
Args:
link (:class:`chainer.Link`): A link. Must have been initialized.
'''
def __init__(self, link, **kwargs):
super().__init__()
device = kwargs.pop('_device', None)
uninitialized_params = [
n for n, p in sorted(_named_params(link)) if p.array is None]
if uninitialized_params:
raise RuntimeError(
'Link with uninitialized parameters cannot be wrapped with '
'LinkAsTorchModel. '
'Please initialize parameters before wrapping, by feeding a '
'dummy batch to the Chainer model, for example. '
'Uninitialized params: [{}]'.format(
', '.join(repr(n) for n in uninitialized_params)))
for name, child in _named_children(link):
child_module = LinkAsTorchModel(child, _device=device)
setattr(self, name, child_module)
for name, param in sorted(_named_params(link)):
if device is not None:
param.to_device(device)
setattr(self, name, ChainerParameter(param))
self.link = link
def forward(self, *input):
# The computation graph should be done in Chainer.
# Forward converts the input tensors to numpy/cupy arrays
# as accepted by Chainer.
# The return value should be a tensor as well.
input = [cpm.tensor.asarray(x) if isinstance(x, torch.Tensor)
else x for x in input]
outputs = self.link.forward(*input)
ret = self.__as_tensor(outputs)
return ret
def __as_tensor(self, value):
if isinstance(value, tuple):
return tuple(self.__as_tensor(x) for x in value)
if isinstance(value, list):
return [self.__as_tensor(x) for x in value]
if isinstance(value, chainer.Variable):
return _ChainerTensor(value)
return value
def to(self, *args, **kwargs):
device, dtype, non_blocking = _parse_to(*args, **kwargs)
chainer_device = cpm.to_chainer_device(device)
if dtype is not None:
raise NotImplementedError
if non_blocking:
raise NotImplementedError
for name, value in self.named_parameters():
assert isinstance(value, ChainerParameter)
param = value._param
param.to_device(chainer_device)
value = ChainerParameter(param)
_setattr_recursive(self, name, value)
return self
class Optimizer(torch.optim.Optimizer):
def __init__(self, base_optimizer):
assert isinstance(base_optimizer, torch.optim.Optimizer)
super().__init__(base_optimizer.param_groups, base_optimizer.defaults)
self._base_optimizer = base_optimizer
def __getattr__(self, name):
if name in ('step', 'zero_grad', '_base_optimizer'):
return object.__getattribute__(self, name)
return getattr(self._base_optimizer, name)
def step(self, closure=None):
for param_group in self._base_optimizer.param_groups:
for param in param_group['params']:
assert isinstance(param, ChainerParameter)
param.grad.copy_(cpm.astensor(param._param.grad))
self._base_optimizer.step(closure)
def zero_grad(self):
self._base_optimizer.zero_grad()
for param_group in self._base_optimizer.param_groups:
for param in param_group['params']:
assert isinstance(param, ChainerParameter)
param._param.zerograd()
class _ChainerTensor(torch.Tensor):
'''
Torch tensor from which backprop can be performed.
'''
def __new__(cls, variable):
assert isinstance(variable, chainer.Variable)
obj = cpm.astensor(variable.array)
obj.__class__ = cls
return obj
def __init__(self, variable):
self._variable = variable
def backward(self, gradient=None, retain_graph=None, create_graph=False):
assert retain_graph is None or retain_graph == False # True not supported
assert self._variable is not None
var = self._variable
if gradient is not None:
var.grad = cpm.tensor.asarray(gradient)
var.backward(
enable_double_backprop=create_graph,
)
def zero_(self):
super().zero_()
self._variable.array[...] = 0
class ChainerParameter(torch.nn.Parameter):
'''Wraps a Chainer parameter for use with a PyTorch optimizer.
It is used to share the data, and more importantly, the gradient memory
buffer between Chainer and PyTorch, since :class:`chainer.Parameter.grad`
may be reassigned a new buffer after each backward. Computational graphs
must be constructed and backpropagated through on the Chainer-side.
Args:
param (:class:`chainer.Parameter`): A parameter to convert.
Returns:
A :class:`ChainerParameter`.
'''
__grad = None
def __new__(cls, param):
return super().__new__(cls, cpm.astensor(param.array))
def __init__(self, param):
super().__init__()
self._param = param
@property
def grad(self):
if self.__grad is None:
if self._param.grad is not None:
self.__grad = _ChainerTensor(self._param.grad_var)
return self.__grad
@grad.setter
def grad(self, g):
if self._param.grad is not None:
self.grad[...] = g
else:
self._param.grad = cpm.asarray(g)
def zero_(self):
super().zero_()
self._param.cleargrad()
self._param.array[...] = 0
self.__grad = None
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
INDIA_ISO_CODES = {
"Andhra Pradesh": "IN-AP",
"Andhra Pradesh Old": "IN-AP",
"Arunachal Pradesh": "IN-AR",
"Assam": "IN-AS",
"Bihar": "IN-BR",
"Chattisgarh": "IN-CT",
"Chhattisgarh": "IN-CT",
"Goa": "IN-GA",
"Gujarat": "IN-GJ",
"Haryana": "IN-HR",
"Himachal Pradesh": "IN-HP",
"Jharkhand": "IN-JH",
"Jharkhand#": "IN-JH",
"Karnataka": "IN-KA",
"Kerala": "IN-KL",
"Madhya Pradesh": "IN-MP",
"Madhya Pradesh#": "IN-MP",
"Maharashtra": "IN-MH",
"Manipur": "IN-MN",
"Meghalaya": "IN-ML",
"Mizoram": "IN-MZ",
"Nagaland": "IN-NL",
"Nagaland#": "IN-NL",
"Odisha": "IN-OR",
"Punjab": "IN-PB",
"Rajasthan": "IN-RJ",
"Sikkim": "IN-SK",
"Tamil Nadu": "IN-TN",
"Tamilnadu": "IN-TN",
"Telengana": "IN-TG",
"Telangana": "IN-TG",
"Tripura": "IN-TR",
"Uttarakhand": "IN-UT",
"Uttar Pradesh": "IN-UP",
"West Bengal": "IN-WB",
"Andaman and Nicobar Islands": "IN-AN",
"Andaman & Nicobar Islands": "IN-AN",
"Andaman & N. Island": "IN-AN",
"A & N Islands": "IN-AN",
"Chandigarh": "IN-CH",
"Dadra and Nagar Haveli": "IN-DN",
"Dadra & Nagar Haveli": "IN-DN",
"Dadar Nagar Haveli": "IN-DN",
"Daman and Diu": "IN-DD",
"Daman & Diu": "IN-DD",
"Delhi": "IN-DL",
"Jammu and Kashmir": "IN-JK",
"Jammu & Kashmir": "IN-JK",
"Ladakh": "IN-LA",
"Lakshadweep": "IN-LD",
"Lakshwadeep": "IN-LD",
"Pondicherry": "IN-PY",
"Puducherry": "IN-PY",
"Puduchery": "IN-PY",
"Dadra and Nagar Haveli and Daman and Diu": "IN-DN_DD",
"all India": "IN",
"all-India": "IN",
"All India": "IN"
}
TMCF_ISOCODE = """Node: E:{dataset_name}->E0
typeOf: schema:Place
isoCode: C:{dataset_name}->isoCode
"""
TMCF_NODES = """
Node: E:{dataset_name}->E{index}
typeOf: dcs:StatVarObservation
variableMeasured: dcs:indianNHM/{statvar}
measurementMethod: dcs:NHM_HealthInformationManagementSystem
observationAbout: C:{dataset_name}->E0
observationDate: C:{dataset_name}->Date
observationPeriod: "P1Y"
value: C:{dataset_name}->{statvar}
"""
MCF_NODES = """Node: dcid:indianNHM/{statvar}
description: "{description}"
typeOf: dcs:StatisticalVariable
populationType: schema:Person
measuredProperty: dcs:indianNHM/{statvar}
statType: dcs:measuredValue
"""
class NHMDataLoaderBase(object):
"""
An object to clean .xls files under 'data/' folder and convert it to csv
Attributes:
data_folder: folder containing all the data files
dataset_name: name given to the dataset
cols_dict: dictionary containing column names in the data files mapped to StatVars
(keys contain column names and values contains StatVar names)
"""
def __init__(self, data_folder, dataset_name, cols_dict, final_csv_path):
"""
Constructor
"""
self.data_folder = data_folder
self.dataset_name = dataset_name
self.cols_dict = cols_dict
self.final_csv_path = final_csv_path
self.raw_df = None
# Ignoring the first 3 elements (State, isoCode, Date) in the dictionary map
self.cols_to_extract = list(self.cols_dict.keys())[3:]
def generate_csv(self):
"""
Class method to preprocess the data file for each available year, extract t
he columns and map the columns to schema.
The dataframe is saved as a csv file in current directory.
Returns:
pandas dataframe: Cleaned dataframe.
"""
df_full = pd.DataFrame(columns=list(self.cols_dict.keys()))
# Loop through each year file
for file in os.listdir(self.data_folder):
fname, fext = os.path.splitext(
file) # fname contains year of the file
date = ''.join(['20', fname[-2:],
'-03']) # date is set as March of every year
if fext == '.xls':
# Reading .xls file as html and preprocessing multiindex
self.raw_df = pd.read_html(os.path.join(self.data_folder,
file))[0]
self.raw_df.columns = self.raw_df.columns.droplevel()
cleaned_df = pd.DataFrame()
cleaned_df['State'] = self.raw_df['Indicators']['Indicators.1']
cleaned_df['isoCode'] = cleaned_df['State'].map(INDIA_ISO_CODES)
cleaned_df['Date'] = date
# If no columns specified, extract all except first two (index and state name)
if not self.cols_to_extract:
self.cols_to_extract = list(
set(self.raw_df.columns.droplevel(-1)[2:]))
# Extract specified columns from raw dataframe if it exists
for col in self.cols_to_extract:
if col in self.raw_df.columns:
cleaned_df[col] = self.raw_df[col][fname]
else:
continue
df_full = df_full.append(cleaned_df, ignore_index=True)
# Converting column names according to schema and saving it as csv
df_full.columns = df_full.columns.map(self.cols_dict)
df_full.to_csv(self.final_csv_path, index=False)
return df_full
def create_mcf_tmcf(self):
"""
Class method to generate MCF and TMCF files for the current dataset.
"""
tmcf_file = "{}.tmcf".format(self.dataset_name)
mcf_file = "{}.mcf".format(self.dataset_name)
with open(tmcf_file, 'w+') as tmcf, open(mcf_file, 'w+') as mcf:
# Writing isoCODE entity
tmcf.write(TMCF_ISOCODE.format(dataset_name=self.dataset_name))
# Keeping track of written StatVars
# Some columns in NHM_FamilyPlanning have same StatVar for multiple cols
statvars_written = []
# Writing nodes for each StatVar
for idx, variable in enumerate(self.cols_to_extract):
if self.cols_dict[variable] not in statvars_written:
# Writing TMCF
tmcf.write(
TMCF_NODES.format(dataset_name=self.dataset_name,
index=idx + 1,
statvar=self.cols_dict[variable]))
# Writing MCF
mcf.write(
MCF_NODES.format(
statvar=self.cols_dict[variable],
description=str(variable).capitalize()))
statvars_written.append(self.cols_dict[variable])
|
#!/usr/bin/env python
# Copyright 2019 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" Generate a data set of toy examples. Examples can be cylinders, spheres, blocks, diamonds.
Distortions may be applied, e.g., noise to the radius ground truths.
Settings are configured in configs file.
"""
import plotting as plg
import os
import shutil
import warnings
import time
from multiprocessing import Pool
import numpy as np
import pandas as pd
import data_manager as dmanager
for msg in ["RuntimeWarning: divide by zero encountered in true_divide.*",]:
warnings.filterwarnings("ignore", msg)
class ToyGenerator(object):
""" Generator of toy data set.
A train and a test split with certain nr of samples are created and saved to disk. Samples can contain varying
number of objects. Objects have shapes cylinder or block (diamond, ellipsoid, torus not fully implemented).
self.mp_args holds image split and id, objects are then randomly drawn into each image. Multi-processing is
enabled for parallel creation of images, final .npy-files can then be converted to .npz.
"""
def __init__(self, cf):
"""
:param cf: configs file holding object specifications and output directories.
"""
self.cf = cf
self.n_train, self.n_test = cf.n_train_samples, cf.n_test_samples
self.sample_size = cf.pre_crop_size
self.dim = len(self.sample_size)
self.class_radii = np.array([label.radius for label in self.cf.pp_classes if label.id!=0])
self.class_id2label = {label.id: label for label in self.cf.pp_classes}
self.mp_args = []
# count sample ids consecutively over train, test splits within on dataset (one shape kind)
self.last_s_id = 0
for split in ["train", "test"]:
self.set_splits_info(split)
def set_splits_info(self, split):
""" Set info for data set splits, i.e., directory and nr of samples.
:param split: name of split, in {"train", "test"}.
"""
out_dir = os.path.join(self.cf.pp_rootdir, split)
os.makedirs(out_dir, exist_ok=True)
n_samples = self.n_train if "train" in split else self.n_test
req_exact_gt = "test" in split
self.mp_args += [[out_dir, self.last_s_id+running_id, req_exact_gt] for running_id in range(n_samples)]
self.last_s_id+= n_samples
def generate_sample_radii(self, class_ids, shapes):
# the radii set in labels are ranges to sample from in the form [(min_x,min_y,min_z), (max_x,max_y,max_z)]
all_radii = []
for ix, cl_radii in enumerate([self.class_radii[cl_id - 1].transpose() for cl_id in class_ids]):
if "cylinder" in shapes[ix] or "block" in shapes[ix]:
# maintain 2D aspect ratio
sample_radii = [np.random.uniform(*cl_radii[0])] * 2
assert len(sample_radii) == 2, "upper sr {}, cl_radii {}".format(sample_radii, cl_radii)
if self.cf.pp_place_radii_mid_bin:
bef_conv_r = np.copy(sample_radii)
bin_id = self.cf.rg_val_to_bin_id(bef_conv_r)
assert np.isscalar(bin_id)
sample_radii = self.cf.bin_id2rg_val[bin_id]*2
assert len(sample_radii) == 2, "mid before sr {}, sr {}, rgv2bid {}, cl_radii {}, bid2rgval {}".format(bef_conv_r, sample_radii, bin_id, cl_radii,
self.cf.bin_id2rg_val[bin_id])
else:
raise NotImplementedError("requested object shape {}".format(shapes[ix]))
if self.dim == 3:
assert len(sample_radii) == 2, "lower sr {}, cl_radii {}".format(sample_radii, cl_radii)
#sample_radii += [np.random.uniform(*cl_radii[2])]
sample_radii = np.concatenate((sample_radii, np.random.uniform(*cl_radii[2], size=1)))
all_radii.append(sample_radii)
return all_radii
def apply_gt_distort(self, class_id, radii, radii_divs, outer_min_radii=None, outer_max_radii=None):
""" Apply a distortion to the ground truth (gt). This is motivated by investigating the effects of noisy labels.
GTs that can be distorted are the object radii and ensuing GT quantities like segmentation and regression
targets.
:param class_id: class id of object.
:param radii: radii of object. This is in the abstract sense, s.t. for a block-shaped object radii give the side
lengths.
:param radii_divs: radii divisors, i.e., fractions to take from radii to get inner radii of hole-shaped objects,
like a torus.
:param outer_min_radii: min radii assignable when distorting gt.
:param outer_max_radii: max radii assignable when distorting gt.
:return:
"""
applied_gt_distort = False
for ambig in self.class_id2label[class_id].gt_distortion:
if self.cf.ambiguities[ambig][0] > np.random.rand():
if ambig == "outer_radius":
radii = radii * abs(np.random.normal(1., self.cf.ambiguities["outer_radius"][1]))
applied_gt_distort = True
if ambig == "radii_relations":
radii = radii * abs(np.random.normal(1.,self.cf.ambiguities["radii_relations"][1],size=len(radii)))
applied_gt_distort = True
if ambig == "inner_radius":
radii_divs = radii_divs * abs(np.random.normal(1., self.cf.ambiguities["inner_radius"][1]))
applied_gt_distort = True
if ambig == "radius_calib":
if self.cf.ambigs_sampling=="uniform":
radii = abs(np.random.uniform(outer_min_radii, outer_max_radii))
elif self.cf.ambigs_sampling=="gaussian":
distort = abs(np.random.normal(1, scale=self.cf.ambiguities["radius_calib"][1], size=None))
assert len(radii) == self.dim, "radii {}".format(radii)
radii *= [distort, distort, 1.] if self.cf.pp_only_distort_2d else distort
applied_gt_distort = True
return radii, radii_divs, applied_gt_distort
def draw_object(self, img, seg, undistorted_seg, ics, regress_targets, undistorted_rg_targets, applied_gt_distort,
roi_ix, class_id, shape, radii, center):
""" Draw a single object into the given image and add it to the corresponding ground truths.
:param img: image (volume) to hold the object.
:param seg: pixel-wise labelling of the image, possibly distorted if gt distortions are applied.
:param undistorted_seg: certainly undistorted, i.e., exact segmentation of object.
:param ics: indices which mark the positions within the image.
:param regress_targets: regression targets (e.g., 2D radii of object), evtly distorted.
:param undistorted_rg_targets: undistorted regression targets.
:param applied_gt_distort: boolean, whether or not gt distortion was applied.
:param roi_ix: running index of object in whole image.
:param class_id: class id of object.
:param shape: shape of object (e.g., whether to draw a cylinder, or block, or ...).
:param radii: radii of object (in an abstract sense, i.e., radii are side lengths in case of block shape).
:param center: center of object in image coordinates.
:return: img, seg, undistorted_seg, regress_targets, undistorted_rg_targets, applied_gt_distort, which are now
extended are amended to reflect the new object.
"""
radii_blur = hasattr(self.cf, "ambiguities") and hasattr(self.class_id2label[class_id],
"gt_distortion") and 'radius_calib' in \
self.class_id2label[class_id].gt_distortion
if radii_blur:
blur_width = self.cf.ambiguities['radius_calib'][1]
if self.cf.ambigs_sampling == "uniform":
blur_width *= np.sqrt(12)
if self.cf.pp_only_distort_2d:
outer_max_radii = np.concatenate((radii[:2] + blur_width * radii[:2], [radii[2]]))
outer_min_radii = np.concatenate((radii[:2] - blur_width * radii[:2], [radii[2]]))
#print("belt width ", outer_max_radii - outer_min_radii)
else:
outer_max_radii = radii + blur_width * radii
outer_min_radii = radii - blur_width * radii
else:
outer_max_radii, outer_min_radii = radii, radii
if "ellipsoid" in shape or "torus" in shape:
# sphere equation: (x-h)**2 + (y-k)**2 - (z-l)**2 = r**2
# ellipsoid equation: ((x-h)/a)**2+((y-k)/b)**2+((z-l)/c)**2 <= 1; a, b, c the "radii"/ half-length of principal axes
obj = ((ics - center) / radii) ** 2
elif "diamond" in shape:
# diamond equation: (|x-h|)/a+(|y-k|)/b+(|z-l|)/c <= 1
obj = abs(ics - center) / radii
elif "cylinder" in shape:
# cylinder equation:((x-h)/a)**2 + ((y-k)/b)**2 <= 1 while |z-l| <= c
obj = ((ics - center).astype("float64") / radii) ** 2
# set z values s.t. z slices outside range are sorted out
obj[:, -1] = np.where(abs((ics - center)[:, -1]) <= radii[2], 0., 1.1)
if radii_blur:
inner_obj = ((ics - center).astype("float64") / outer_min_radii) ** 2
inner_obj[:, -1] = np.where(abs((ics - center)[:, -1]) <= outer_min_radii[2], 0., 1.1)
outer_obj = ((ics - center).astype("float64") / outer_max_radii) ** 2
outer_obj[:, -1] = np.where(abs((ics - center)[:, -1]) <= outer_max_radii[2], 0., 1.1)
# radial dists: sqrt( (x-h)**2 + (y-k)**2 + (z-l)**2 )
obj_radial_dists = np.sqrt(np.sum((ics - center).astype("float64")**2, axis=1))
elif "block" in shape:
# block equation: (|x-h|)/a+(|y-k|)/b <= 1 while |z-l| <= c
obj = abs(ics - center) / radii
obj[:, -1] = np.where(abs((ics - center)[:, -1]) <= radii[2], 0., 1.1)
if radii_blur:
inner_obj = abs(ics - center) / outer_min_radii
inner_obj[:, -1] = np.where(abs((ics - center)[:, -1]) <= outer_min_radii[2], 0., 1.1)
outer_obj = abs(ics - center) / outer_max_radii
outer_obj[:, -1] = np.where(abs((ics - center)[:, -1]) <= outer_max_radii[2], 0., 1.1)
obj_radial_dists = np.sum(abs(ics - center), axis=1).astype("float64")
else:
raise Exception("Invalid object shape '{}'".format(shape))
# create the "original" GT, i.e., the actually true object and draw it into undistorted seg.
obj = (np.sum(obj, axis=1) <= 1)
obj = obj.reshape(seg[0].shape)
slices_to_discard = np.where(np.count_nonzero(np.count_nonzero(obj, axis=0), axis=0) <= self.cf.min_2d_radius)[0]
obj[..., slices_to_discard] = 0
undistorted_radii = np.copy(radii)
undistorted_seg[class_id][obj] = roi_ix + 1
obj = obj.astype('float64')
if radii_blur:
inner_obj = np.sum(inner_obj, axis=1) <= 1
outer_obj = (np.sum(outer_obj, axis=1) <= 1) & ~inner_obj
obj_radial_dists[outer_obj] = obj_radial_dists[outer_obj] / max(obj_radial_dists[outer_obj])
intensity_slope = self.cf.pp_blur_min_intensity - 1.
# intensity(r) = (i(r_max)-i(0))/r_max * r + i(0), where i(0)==1.
obj_radial_dists[outer_obj] = obj_radial_dists[outer_obj] * intensity_slope + 1.
inner_obj = inner_obj.astype('float64')
#outer_obj, obj_radial_dists = outer_obj.reshape(seg[0].shape), obj_radial_dists.reshape(seg[0].shape)
inner_obj += np.where(outer_obj, obj_radial_dists, 0.)
obj = inner_obj.reshape(seg[0].shape)
if not np.any(obj):
print("An object was completely discarded due to min 2d radius requirement, discarded slices: {}.".format(
slices_to_discard))
# draw the evtly blurred obj into image.
img += obj * (class_id + 1.)
if hasattr(self.cf, "ambiguities") and hasattr(self.class_id2label[class_id], "gt_distortion"):
radii_divs = [None] # dummy since not implemented yet
radii, radii_divs, applied_gt_distort = self.apply_gt_distort(class_id, radii, radii_divs,
outer_min_radii, outer_max_radii)
if applied_gt_distort:
if "ellipsoid" in shape or "torus" in shape:
obj = ((ics - center) / radii) ** 2
elif 'diamond' in shape:
obj = abs(ics - center) / radii
elif "cylinder" in shape:
obj = ((ics - center) / radii) ** 2
obj[:, -1] = np.where(abs((ics - center)[:, -1]) <= radii[2], 0., 1.1)
elif "block" in shape:
obj = abs(ics - center) / radii
obj[:, -1] = np.where(abs((ics - center)[:, -1]) <= radii[2], 0., 1.1)
obj = (np.sum(obj, axis=1) <= 1).reshape(seg[0].shape)
obj[..., slices_to_discard] = False
if self.class_id2label[class_id].regression == "radii":
regress_targets.append(radii)
undistorted_rg_targets.append(undistorted_radii)
elif self.class_id2label[class_id].regression == "radii_2d":
regress_targets.append(radii[:2])
undistorted_rg_targets.append(undistorted_radii[:2])
elif self.class_id2label[class_id].regression == "radius_2d":
regress_targets.append(radii[:1])
undistorted_rg_targets.append(undistorted_radii[:1])
else:
regress_targets.append(self.class_id2label[class_id].regression)
undistorted_rg_targets.append(self.class_id2label[class_id].regression)
seg[class_id][obj.astype('bool')] = roi_ix + 1
return img, seg, undistorted_seg, regress_targets, undistorted_rg_targets, applied_gt_distort
def create_sample(self, args):
""" Create a single sample and save to file. One sample is one image (volume) containing none, one, or multiple
objects.
:param args: out_dir: directory where to save sample, s_id: id of the sample.
:return: specs that identify this single created image
"""
out_dir, s_id, req_exact_gt = args
print('processing {} {}'.format(out_dir, s_id))
img = np.random.normal(loc=0.0, scale=self.cf.noise_scale, size=self.sample_size)
img[img<0.] = 0.
# one-hot-encoded seg
seg = np.zeros((self.cf.num_classes+1, *self.sample_size)).astype('uint8')
undistorted_seg = np.copy(seg)
applied_gt_distort = False
if hasattr(self.cf, "pp_empty_samples_ratio") and self.cf.pp_empty_samples_ratio >= np.random.rand():
# generate fully empty sample
class_ids, regress_targets, undistorted_rg_targets = [], [], []
else:
class_choices = np.repeat(np.arange(1, self.cf.num_classes+1), self.cf.max_instances_per_class)
n_insts = np.random.randint(1, self.cf.max_instances_per_sample + 1)
class_ids = np.random.choice(class_choices, size=n_insts, replace=False)
shapes = np.array([self.class_id2label[cl_id].shape for cl_id in class_ids])
all_radii = self.generate_sample_radii(class_ids, shapes)
# reorder s.t. larger objects are drawn first (in order to not fully cover smaller objects)
order = np.argsort(-1*np.prod(all_radii,axis=1))
class_ids = class_ids[order]; all_radii = np.array(all_radii)[order]; shapes = shapes[order]
regress_targets, undistorted_rg_targets = [], []
# indices ics equal positions within img/volume
ics = np.argwhere(np.ones(seg[0].shape))
for roi_ix, class_id in enumerate(class_ids):
radii = all_radii[roi_ix]
# enforce distance between object center and image edge relative to radii.
margin_r_divisor = (2, 2, 4)
center = [np.random.randint(radii[dim] / margin_r_divisor[dim], img.shape[dim] -
radii[dim] / margin_r_divisor[dim]) for dim in range(len(img.shape))]
img, seg, undistorted_seg, regress_targets, undistorted_rg_targets, applied_gt_distort = \
self.draw_object(img, seg, undistorted_seg, ics, regress_targets, undistorted_rg_targets, applied_gt_distort,
roi_ix, class_id, shapes[roi_ix], radii, center)
fg_slices = np.where(np.sum(np.sum(np.sum(seg,axis=0), axis=0), axis=0))[0]
if self.cf.pp_create_ohe_seg:
img = img[np.newaxis]
else:
# choosing rois to keep by smaller radius==higher prio needs to be ensured during roi generation,
# smaller objects need to be drawn later (==higher roi id)
seg = seg.max(axis=0)
seg_ids = np.unique(seg)
if len(seg_ids) != len(class_ids) + 1:
# in this case an object was completely covered by a succeeding object
print("skipping corrupt sample")
print("seg ids {}, class_ids {}".format(seg_ids, class_ids))
return None
if not applied_gt_distort:
assert np.all(np.flatnonzero(img>0) == np.flatnonzero(seg>0))
assert np.all(np.array(regress_targets).flatten()==np.array(undistorted_rg_targets).flatten())
# save the img
out_path = os.path.join(out_dir, '{}.npy'.format(s_id))
np.save(out_path, img.astype('float16'))
# exact GT
if req_exact_gt:
if not self.cf.pp_create_ohe_seg:
undistorted_seg = undistorted_seg.max(axis=0)
np.save(os.path.join(out_dir, '{}_exact_seg.npy'.format(s_id)), undistorted_seg)
else:
# if hasattr(self.cf, 'ambiguities') and \
# np.any([hasattr(label, "gt_distortion") and len(label.gt_distortion)>0 for label in self.class_id2label.values()]):
# save (evtly) distorted GT
np.save(os.path.join(out_dir, '{}_seg.npy'.format(s_id)), seg)
return [out_dir, out_path, class_ids, regress_targets, fg_slices, undistorted_rg_targets, str(s_id)]
def create_sets(self, processes=os.cpu_count()):
""" Create whole training and test set, save to files under given directory cf.out_dir.
:param processes: nr of parallel processes.
"""
print('starting creation of {} images.'.format(len(self.mp_args)))
shutil.copyfile("configs.py", os.path.join(self.cf.pp_rootdir, 'applied_configs.py'))
pool = Pool(processes=processes)
imgs_info = pool.map(self.create_sample, self.mp_args)
imgs_info = [img for img in imgs_info if img is not None]
pool.close()
pool.join()
print("created a total of {} samples.".format(len(imgs_info)))
self.df = pd.DataFrame.from_records(imgs_info, columns=['out_dir', 'path', 'class_ids', 'regression_vectors',
'fg_slices', 'undistorted_rg_vectors', 'pid'])
for out_dir, group_df in self.df.groupby("out_dir"):
group_df.to_pickle(os.path.join(out_dir, 'info_df.pickle'))
def convert_copy_npz(self):
""" Convert a copy of generated .npy-files to npz and save in .npz-directory given in configs.
"""
if hasattr(self.cf, "pp_npz_dir") and self.cf.pp_npz_dir:
for out_dir, group_df in self.df.groupby("out_dir"):
rel_dir = os.path.relpath(out_dir, self.cf.pp_rootdir).split(os.sep)
npz_out_dir = os.path.join(self.cf.pp_npz_dir, str(os.sep).join(rel_dir))
print("npz out dir: ", npz_out_dir)
os.makedirs(npz_out_dir, exist_ok=True)
group_df.to_pickle(os.path.join(npz_out_dir, 'info_df.pickle'))
dmanager.pack_dataset(out_dir, npz_out_dir, recursive=True, verbose=False)
else:
print("Did not convert .npy-files to .npz because npz directory not set in configs.")
if __name__ == '__main__':
import configs as cf
cf = cf.Configs()
total_stime = time.time()
toy_gen = ToyGenerator(cf)
toy_gen.create_sets()
toy_gen.convert_copy_npz()
mins, secs = divmod((time.time() - total_stime), 60)
h, mins = divmod(mins, 60)
t = "{:d}h:{:02d}m:{:02d}s".format(int(h), int(mins), int(secs))
print("{} total runtime: {}".format(os.path.split(__file__)[1], t))
|
"""
Check SFP status and configure SFP
This script covers test case 'Check SFP status and configure SFP' in the SONiC platform test plan:
https://github.com/Azure/SONiC/blob/master/doc/pmon/sonic_platform_test_plan.md
"""
import logging
import re
import os
import time
import copy
from ansible_host import ansible_host
def parse_output(output_lines):
"""
@summary: For parsing command output. The output lines should have format 'key value'.
@param output_lines: Command output lines
@return: Returns result in a dictionary
"""
res = {}
for line in output_lines:
fields = line.split()
if len(fields) != 2:
continue
res[fields[0]] = fields[1]
return res
def parse_eeprom(output_lines):
"""
@summary: Parse the SFP eeprom information from command output
@param output_lines: Command output lines
@return: Returns result in a dictionary
"""
res = {}
for line in output_lines:
if re.match(r"^Ethernet\d+: .*", line):
fields = line.split(":")
res[fields[0]] = fields[1].strip()
return res
def test_check_sfp_status_and_configure_sfp(localhost, ansible_adhoc, testbed):
"""
@summary: Check SFP status and configure SFP
This case is to use the sfputil tool and show command to check SFP status and configure SFP. Currently the
only configuration is to reset SFP. Commands to be tested:
* sfputil show presence
* show interface transceiver presence
* sfputil show eeprom
* show interface transceiver eeprom
* sfputil reset <interface name>
"""
hostname = testbed['dut']
ans_host = ansible_host(ansible_adhoc, hostname)
localhost.command("who")
lab_conn_graph_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), \
"../../ansible/files/lab_connection_graph.xml")
conn_graph_facts = localhost.conn_graph_facts(host=hostname, filename=lab_conn_graph_file).\
contacted['localhost']['ansible_facts']
cmd_sfp_presence = "sudo sfputil show presence"
cmd_sfp_eeprom = "sudo sfputil show eeprom"
cmd_sfp_reset = "sudo sfputil reset"
cmd_xcvr_presence = "show interface transceiver presence"
cmd_xcvr_eeprom = "show interface transceiver eeprom"
logging.info("Check output of '%s'" % cmd_sfp_presence)
sfp_presence = ans_host.command(cmd_sfp_presence)
parsed_presence = parse_output(sfp_presence["stdout_lines"][2:])
for intf in conn_graph_facts["device_conn"]:
assert intf in parsed_presence, "Interface is not in output of '%s'" % cmd_sfp_presence
assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'"
logging.info("Check output of '%s'" % cmd_xcvr_presence)
xcvr_presence = ans_host.command(cmd_xcvr_presence)
parsed_presence = parse_output(xcvr_presence["stdout_lines"][2:])
for intf in conn_graph_facts["device_conn"]:
assert intf in parsed_presence, "Interface is not in output of '%s'" % cmd_xcvr_presence
assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'"
logging.info("Check output of '%s'" % cmd_sfp_eeprom)
sfp_eeprom = ans_host.command(cmd_sfp_eeprom)
parsed_eeprom = parse_eeprom(sfp_eeprom["stdout_lines"])
for intf in conn_graph_facts["device_conn"]:
assert intf in parsed_eeprom, "Interface is not in output of 'sfputil show eeprom'"
assert parsed_eeprom[intf] == "SFP EEPROM detected"
logging.info("Check output of '%s'" % cmd_xcvr_eeprom)
xcvr_eeprom = ans_host.command(cmd_xcvr_eeprom)
parsed_eeprom = parse_eeprom(xcvr_eeprom["stdout_lines"])
for intf in conn_graph_facts["device_conn"]:
assert intf in parsed_eeprom, "Interface is not in output of '%s'" % cmd_xcvr_eeprom
assert parsed_eeprom[intf] == "SFP EEPROM detected"
logging.info("Test '%s <interface name>'" % cmd_sfp_reset)
for intf in conn_graph_facts["device_conn"]:
reset_result = ans_host.command("%s %s" % (cmd_sfp_reset, intf))
assert reset_result["rc"] == 0, "'%s %s' failed" % (cmd_sfp_reset, intf)
time.sleep(120) # Wait some time for SFP to fully recover after reset
logging.info("Check sfp presence again after reset")
sfp_presence = ans_host.command(cmd_sfp_presence)
parsed_presence = parse_output(sfp_presence["stdout_lines"][2:])
for intf in conn_graph_facts["device_conn"]:
assert intf in parsed_presence, "Interface is not in output of '%s'" % cmd_sfp_presence
assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'"
def test_check_sfp_low_power_mode(localhost, ansible_adhoc, testbed):
"""
@summary: Check SFP low power mode
This case is to use the sfputil tool command to check and set SFP low power mode
* sfputil show lpmode
* sfputil lpmode off
* sfputil lpmode on
"""
hostname = testbed['dut']
ans_host = ansible_host(ansible_adhoc, hostname)
localhost.command("who")
lab_conn_graph_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), \
"../../ansible/files/lab_connection_graph.xml")
conn_graph_facts = localhost.conn_graph_facts(host=hostname, filename=lab_conn_graph_file).\
contacted['localhost']['ansible_facts']
cmd_sfp_presence = "sudo sfputil show presence"
cmd_sfp_show_lpmode = "sudo sfputil show lpmode"
cmd_sfp_set_lpmode = "sudo sfputil lpmode"
logging.info("Check output of '%s'" % cmd_sfp_show_lpmode)
lpmode_show = ans_host.command(cmd_sfp_show_lpmode)
parsed_lpmode = parse_output(lpmode_show["stdout_lines"][2:])
original_lpmode = copy.deepcopy(parsed_lpmode)
for intf in conn_graph_facts["device_conn"]:
assert intf in parsed_lpmode, "Interface is not in output of '%s'" % cmd_sfp_show_lpmode
assert parsed_lpmode[intf].lower() == "on" or parsed_lpmode[intf].lower() == "off", "Unexpected SFP lpmode"
logging.info("Try to change SFP lpmode")
for intf in conn_graph_facts["device_conn"]:
new_lpmode = "off" if original_lpmode[intf].lower() == "on" else "on"
lpmode_set_result = ans_host.command("%s %s %s" % (cmd_sfp_set_lpmode, new_lpmode, intf))
assert lpmode_set_result["rc"] == 0, "'%s %s %s' failed" % (cmd_sfp_set_lpmode, new_lpmode, intf)
time.sleep(10)
logging.info("Check SFP lower power mode again after changing SFP lpmode")
lpmode_show = ans_host.command(cmd_sfp_show_lpmode)
parsed_lpmode = parse_output(lpmode_show["stdout_lines"][2:])
for intf in conn_graph_facts["device_conn"]:
assert intf in parsed_lpmode, "Interface is not in output of '%s'" % cmd_sfp_show_lpmode
assert parsed_lpmode[intf].lower() == "on" or parsed_lpmode[intf].lower() == "off", "Unexpected SFP lpmode"
logging.info("Try to change SFP lpmode")
for intf in conn_graph_facts["device_conn"]:
new_lpmode = original_lpmode[intf].lower()
lpmode_set_result = ans_host.command("%s %s %s" % (cmd_sfp_set_lpmode, new_lpmode, intf))
assert lpmode_set_result["rc"] == 0, "'%s %s %s' failed" % (cmd_sfp_set_lpmode, new_lpmode, intf)
time.sleep(10)
logging.info("Check SFP lower power mode again after changing SFP lpmode")
lpmode_show = ans_host.command(cmd_sfp_show_lpmode)
parsed_lpmode = parse_output(lpmode_show["stdout_lines"][2:])
for intf in conn_graph_facts["device_conn"]:
assert intf in parsed_lpmode, "Interface is not in output of '%s'" % cmd_sfp_show_lpmode
assert parsed_lpmode[intf].lower() == "on" or parsed_lpmode[intf].lower() == "off", "Unexpected SFP lpmode"
logging.info("Check sfp presence again after setting lpmode")
sfp_presence = ans_host.command(cmd_sfp_presence)
parsed_presence = parse_output(sfp_presence["stdout_lines"][2:])
for intf in conn_graph_facts["device_conn"]:
assert intf in parsed_presence, "Interface is not in output of '%s'" % cmd_sfp_presence
assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'"
|
# Copyright 2014 MadeiraCloud LTD.
import logging
from cliff.command import Command
class Delete(Command):
"Delete your stack, locally or on AWS"
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(Delete, self).get_parser(prog_name)
parser.add_argument('stack_id', nargs='?', default='')
return parser
def take_action(self, parsed_args):
self.app.stdout.write('stack delete TO-DO!\n')
|
# coding=UTF-8
# ex:ts=4:sw=4:et=on
# Copyright (c) 2013, Mathijs Dumon
# All rights reserved.
# Complete license can be found in the LICENSE file.
from contextlib import contextmanager
from mvc.models.base import Model
from pyxrd.generic.controllers import ObjectListStoreController
from pyxrd.mixture.models import InSituBehaviour, insitu_behaviours
from pyxrd.mixture.views import EditInSituBehaviourView
from pyxrd.mixture.views.add_insitu_behaviour_view import AddInSituBehaviourView
from .edit_insitu_behaviour_controller import EditInSituBehaviourController
from .add_insitu_behaviour_controller import AddInSituBehaviourController
class InSituBehavioursController(ObjectListStoreController):
treemodel_property_name = "behaviours"
treemodel_class_type = InSituBehaviour
columns = [ ("Mixture name", "c_name") ]
delete_msg = "Deleting a mixture is irreverisble!\nAre You sure you want to continue?"
obj_type_map = [
(cls, EditInSituBehaviourView, EditInSituBehaviourController)
for name, cls in list(insitu_behaviours.__dict__.items()) if hasattr(cls, 'Meta') and cls.Meta.concrete
]
def get_behaviours_tree_model(self, *args):
return self.treemodel
# ------------------------------------------------------------
# GTK Signal handlers
# ------------------------------------------------------------
def on_load_object_clicked(self, event):
pass # cannot load behaviours
def on_save_object_clicked(self, event):
pass # cannot save behaviours
def get_new_edit_view(self, obj):
"""
Gets a new 'edit object' view for the given obj, view and parent
view.
"""
if obj == None:
return self.view.none_view
else:
for obj_tp, view_tp, ctrl_tp in self.obj_type_map: # @UnusedVariable
if isinstance(obj, obj_tp):
return view_tp(obj.Meta, parent=self.view)
raise NotImplementedError("Unsupported object type; subclasses of"
" TreeControllerMixin need to define an obj_type_map attribute!")
def create_new_object_proxy(self):
def on_accept(behaviour_type):
if behaviour_type is not None:
self.add_object(behaviour_type(parent=self.model))
# TODO re-use this and reset the COMBO etc.
self.add_model = Model()
self.add_view = AddInSituBehaviourView(parent=self.view)
self.add_ctrl = AddInSituBehaviourController(
model=self.add_model, view=self.add_view, parent=self,
callback=on_accept
)
self.add_view.present()
return None
@contextmanager
def _multi_operation_context(self):
with self.model.data_changed.hold():
yield
pass # end of class |
import time
import random
def timeit(func, *args):
t1 = time.time()
ret = func(*args)
cost_time = (time.time() - t1) * 1000
print("cost time: %sms" % cost_time)
try:
randint_wrap = random.randint
except:
# micropython
def randint_wrap(a, b):
return a + random.getrandbits(32) % (b-a)
def rand_str(length):
v = ""
a = ord('A')
b = ord('Z')
for i in range(length):
v += chr(randint_wrap(a, b))
return v
def main(n):
print("n=%d" % n)
for i in range(n):
rand_str(5)
timeit(main, 100000)
|
import bz2
import json
import logging
import os.path
import pathlib
import sys
import click
@click.group()
def main():
"""Command line tools"""
pass
@main.command()
@click.argument("src")
@click.argument("dst")
@click.option("-f", "continue_on_error", is_flag=True)
@click.option("--inventory", multiple=True)
def mddocs(src, dst, continue_on_error, inventory=()):
"""Render a subset of sphinx commands to markdown"""
from chmp.tools.mddocs import transform_directories
if continue_on_error:
print("continue on errors", file=sys.stderr)
inventory = open_inventory(inventory)
print("translate", src, "->", dst, file=sys.stderr)
transform_directories(
src, dst, continue_on_error=continue_on_error, inventory=inventory
)
print("done", file=sys.stderr)
def open_inventory(inventory, cache_file=".inventory.json.bz2"):
from chmp.tools.mddocs import load_inventory
if not inventory:
return {}
if os.path.exists(cache_file):
with bz2.open(cache_file, "rt") as fobj:
cached_inventory = json.load(fobj)
print("use cached inventory from", cache_file, file=sys.stderr)
if cached_inventory["uris"] == list(inventory):
return cached_inventory["inventory"]
print("load inventory from", inventory, file=sys.stderr)
inventory = load_inventory(inventory)
print("write inventory cache", cache_file, file=sys.stderr)
with bz2.open(cache_file, "wt") as fobj:
json.dump(inventory, fobj, indent=2, sort_keys=True)
return inventory["inventory"]
@main.command()
@click.argument("source-path", type=click.Path(exists=True))
@click.argument("target-path", type=click.Path(exists=True))
@click.option("-y", "yes", is_flag=True, default=False)
def paper(source_path, target_path, yes=False):
"""Sort papers into a common normalized directory structure."""
from chmp.tools.papers import sort_arxiv_papers, sort_non_arxiv_papers
non_arxiv_source_path = pathlib.Path(source_path) / "PapersToSort"
print(f":: this script will perform the following actions:")
print(f":: - sort arxiv papers from {source_path!s} to {target_path!s}")
print(
f":: - sort non arxiv papers from {non_arxiv_source_path!s} to {target_path!s}"
)
if not yes and input(":: continue? [yN] ") != "y":
return
print()
print(":: sort arxiv papers")
sort_arxiv_papers(source_path, target_path)
print(":: sort non arxiv papers")
sort_non_arxiv_papers(non_arxiv_source_path, target_path)
@main.command()
@click.argument("source-path", type=click.Path(exists=True))
@click.argument("target-path", type=click.Path(), required=False)
def blog(source_path, target_path=None):
from chmp.tools.blog import main
main(source_path, target_path)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from cryptography import utils
from cryptography.exceptions import InvalidTag, UnsupportedAlgorithm, _Reasons
from cryptography.hazmat.primitives import ciphers
from cryptography.hazmat.primitives.ciphers import modes
@utils.register_interface(ciphers.CipherContext)
@utils.register_interface(ciphers.AEADCipherContext)
@utils.register_interface(ciphers.AEADEncryptionContext)
@utils.register_interface(ciphers.AEADDecryptionContext)
class _CipherContext(object):
_ENCRYPT = 1
_DECRYPT = 0
_MAX_CHUNK_SIZE = 2 ** 30 - 1
def __init__(self, backend, cipher, mode, operation):
self._backend = backend
self._cipher = cipher
self._mode = mode
self._operation = operation
self._tag = None
if isinstance(self._cipher, ciphers.BlockCipherAlgorithm):
self._block_size_bytes = self._cipher.block_size // 8
else:
self._block_size_bytes = 1
ctx = self._backend._lib.EVP_CIPHER_CTX_new()
ctx = self._backend._ffi.gc(
ctx, self._backend._lib.EVP_CIPHER_CTX_free
)
registry = self._backend._cipher_registry
try:
adapter = registry[type(cipher), type(mode)]
except KeyError:
raise UnsupportedAlgorithm(
"cipher {} in {} mode is not supported "
"by this backend.".format(
cipher.name, mode.name if mode else mode
),
_Reasons.UNSUPPORTED_CIPHER,
)
evp_cipher = adapter(self._backend, cipher, mode)
if evp_cipher == self._backend._ffi.NULL:
msg = "cipher {0.name} ".format(cipher)
if mode is not None:
msg += "in {0.name} mode ".format(mode)
msg += (
"is not supported by this backend (Your version of OpenSSL "
"may be too old. Current version: {}.)"
).format(self._backend.openssl_version_text())
raise UnsupportedAlgorithm(msg, _Reasons.UNSUPPORTED_CIPHER)
if isinstance(mode, modes.ModeWithInitializationVector):
iv_nonce = self._backend._ffi.from_buffer(
mode.initialization_vector
)
elif isinstance(mode, modes.ModeWithTweak):
iv_nonce = self._backend._ffi.from_buffer(mode.tweak)
elif isinstance(mode, modes.ModeWithNonce):
iv_nonce = self._backend._ffi.from_buffer(mode.nonce)
elif isinstance(cipher, modes.ModeWithNonce):
iv_nonce = self._backend._ffi.from_buffer(cipher.nonce)
else:
iv_nonce = self._backend._ffi.NULL
# begin init with cipher and operation type
res = self._backend._lib.EVP_CipherInit_ex(
ctx,
evp_cipher,
self._backend._ffi.NULL,
self._backend._ffi.NULL,
self._backend._ffi.NULL,
operation,
)
self._backend.openssl_assert(res != 0)
# set the key length to handle variable key ciphers
res = self._backend._lib.EVP_CIPHER_CTX_set_key_length(
ctx, len(cipher.key)
)
self._backend.openssl_assert(res != 0)
if isinstance(mode, modes.GCM):
res = self._backend._lib.EVP_CIPHER_CTX_ctrl(
ctx,
self._backend._lib.EVP_CTRL_AEAD_SET_IVLEN,
len(iv_nonce),
self._backend._ffi.NULL,
)
self._backend.openssl_assert(res != 0)
if mode.tag is not None:
res = self._backend._lib.EVP_CIPHER_CTX_ctrl(
ctx,
self._backend._lib.EVP_CTRL_AEAD_SET_TAG,
len(mode.tag),
mode.tag,
)
self._backend.openssl_assert(res != 0)
self._tag = mode.tag
# pass key/iv
res = self._backend._lib.EVP_CipherInit_ex(
ctx,
self._backend._ffi.NULL,
self._backend._ffi.NULL,
self._backend._ffi.from_buffer(cipher.key),
iv_nonce,
operation,
)
# Check for XTS mode duplicate keys error
errors = self._backend._consume_errors()
lib = self._backend._lib
if res == 0 and (
(
lib.CRYPTOGRAPHY_OPENSSL_111D_OR_GREATER
and errors[0]._lib_reason_match(
lib.ERR_LIB_EVP, lib.EVP_R_XTS_DUPLICATED_KEYS
)
)
or (
lib.Cryptography_HAS_PROVIDERS
and errors[0]._lib_reason_match(
lib.ERR_LIB_PROV, lib.PROV_R_XTS_DUPLICATED_KEYS
)
)
):
raise ValueError("In XTS mode duplicated keys are not allowed")
self._backend.openssl_assert(res != 0, errors=errors)
# We purposely disable padding here as it's handled higher up in the
# API.
self._backend._lib.EVP_CIPHER_CTX_set_padding(ctx, 0)
self._ctx = ctx
def update(self, data: bytes) -> bytes:
buf = bytearray(len(data) + self._block_size_bytes - 1)
n = self.update_into(data, buf)
return bytes(buf[:n])
def update_into(self, data: bytes, buf) -> int:
total_data_len = len(data)
if len(buf) < (total_data_len + self._block_size_bytes - 1):
raise ValueError(
"buffer must be at least {} bytes for this "
"payload".format(len(data) + self._block_size_bytes - 1)
)
data_processed = 0
total_out = 0
outlen = self._backend._ffi.new("int *")
baseoutbuf = self._backend._ffi.from_buffer(buf)
baseinbuf = self._backend._ffi.from_buffer(data)
while data_processed != total_data_len:
outbuf = baseoutbuf + total_out
inbuf = baseinbuf + data_processed
inlen = min(self._MAX_CHUNK_SIZE, total_data_len - data_processed)
res = self._backend._lib.EVP_CipherUpdate(
self._ctx, outbuf, outlen, inbuf, inlen
)
if res == 0 and isinstance(self._mode, modes.XTS):
self._backend._consume_errors()
raise ValueError(
"In XTS mode you must supply at least a full block in the "
"first update call. For AES this is 16 bytes."
)
else:
self._backend.openssl_assert(res != 0)
data_processed += inlen
total_out += outlen[0]
return total_out
def finalize(self) -> bytes:
if (
self._operation == self._DECRYPT
and isinstance(self._mode, modes.ModeWithAuthenticationTag)
and self.tag is None
):
raise ValueError(
"Authentication tag must be provided when decrypting."
)
buf = self._backend._ffi.new("unsigned char[]", self._block_size_bytes)
outlen = self._backend._ffi.new("int *")
res = self._backend._lib.EVP_CipherFinal_ex(self._ctx, buf, outlen)
if res == 0:
errors = self._backend._consume_errors()
if not errors and isinstance(self._mode, modes.GCM):
raise InvalidTag
lib = self._backend._lib
self._backend.openssl_assert(
errors[0]._lib_reason_match(
lib.ERR_LIB_EVP,
lib.EVP_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH,
)
or (
lib.Cryptography_HAS_PROVIDERS
and errors[0]._lib_reason_match(
lib.ERR_LIB_PROV,
lib.PROV_R_WRONG_FINAL_BLOCK_LENGTH,
)
)
or (
lib.CRYPTOGRAPHY_IS_BORINGSSL
and errors[0].reason
== lib.CIPHER_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH
),
errors=errors,
)
raise ValueError(
"The length of the provided data is not a multiple of "
"the block length."
)
if (
isinstance(self._mode, modes.GCM)
and self._operation == self._ENCRYPT
):
tag_buf = self._backend._ffi.new(
"unsigned char[]", self._block_size_bytes
)
res = self._backend._lib.EVP_CIPHER_CTX_ctrl(
self._ctx,
self._backend._lib.EVP_CTRL_AEAD_GET_TAG,
self._block_size_bytes,
tag_buf,
)
self._backend.openssl_assert(res != 0)
self._tag = self._backend._ffi.buffer(tag_buf)[:]
res = self._backend._lib.EVP_CIPHER_CTX_reset(self._ctx)
self._backend.openssl_assert(res == 1)
return self._backend._ffi.buffer(buf)[: outlen[0]]
def finalize_with_tag(self, tag: bytes) -> bytes:
tag_len = len(tag)
if tag_len < self._mode._min_tag_length:
raise ValueError(
"Authentication tag must be {} bytes or longer.".format(
self._mode._min_tag_length
)
)
elif tag_len > self._block_size_bytes:
raise ValueError(
"Authentication tag cannot be more than {} bytes.".format(
self._block_size_bytes
)
)
res = self._backend._lib.EVP_CIPHER_CTX_ctrl(
self._ctx, self._backend._lib.EVP_CTRL_AEAD_SET_TAG, len(tag), tag
)
self._backend.openssl_assert(res != 0)
self._tag = tag
return self.finalize()
def authenticate_additional_data(self, data: bytes) -> None:
outlen = self._backend._ffi.new("int *")
res = self._backend._lib.EVP_CipherUpdate(
self._ctx,
self._backend._ffi.NULL,
outlen,
self._backend._ffi.from_buffer(data),
len(data),
)
self._backend.openssl_assert(res != 0)
tag = utils.read_only_property("_tag")
|
arcpy.AddField_management(in_table="Riffle_channel",
field_name="Seq_1",
field_type="LONG",
field_is_nullable="NULLABLE",
field_is_required="NON_REQUIRED")
arcpy.CalculateField_management(in_table="Riffle_channel",
field="Seq_1",
expression="!SEQ!",
expression_type="PYTHON")
arcpy.DeleteField_management(in_table="Riffle_channel",
drop_field="SEQ")
arcpy.AddField_management(in_table="Riffle_channel",
field_name="Seq",
field_type="LONG",
field_alias="Seq",
field_is_nullable="NULLABLE",
field_is_required="NON_REQUIRED")
arcpy.CalculateField_management(in_table="Riffle_channel",
field="Seq",
expression="!Seq_1!",
expression_type="PYTHON")
arcpy.DeleteField_management(in_table="Riffle_channel",
drop_field="Seq_1") |
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).
__all__ = ['tweet_text', 'check_sig', 'reconfig', 'run_server', 'fastwebhook_install_service']
# Cell
import json,tweepy,hmac,hashlib,traceback,shutil,time,fcntl
from fastcore.imports import *
from fastcore.foundation import *
from fastcore.utils import *
from fastcore.script import *
from fastcore.meta import *
from fastcore.test import *
from configparser import ConfigParser
from ipaddress import ip_address,ip_network
from socketserver import ThreadingTCPServer
from fastcgi.http import MinimalHTTPHandler
from fastcgi import ReuseThreadingServer
from textwrap import dedent
# Cell
def tweet_text(payload):
"Send a tweet announcing release based on `payload`"
rel_json = payload['release']
url = rel_json['url']
owner,repo = re.findall(r'https://api.github.com/repos/([^/]+)/([^/]+)/', url)[0]
tweet_tmpl = "New #{repo} release: v{tag_name}. {html_url}\n\n{body}"
res = tweet_tmpl.format(repo=repo, tag_name=rel_json['tag_name'],
html_url=rel_json['html_url'], body=rel_json['body'])
if len(res)<=280: return res
return res[:279] + "…"
# Cell
def check_sig(content, headers, secret):
digest = hmac.new(secret, content, hashlib.sha1).hexdigest()
assert f'sha1={digest}' == headers.get('X-Hub-Signature')
# Cell
class _RequestHandler(MinimalHTTPHandler):
def _post(self):
assert self.command == 'POST'
if self.server.check_ip:
src_ip = re.split(', *', self.headers.get('X-Forwarded-For', ''))[0] or self.client_address[0]
src_ip = ip_address(src_ip)
assert any((src_ip in wl) for wl in self.server.whitelist)
self.send_response(200)
self.end_headers()
length = self.headers.get('content-length')
if not length: return
content = self.rfile.read(int(length))
if self.server.debug:
print(self.headers, content)
return
payload = json.loads(content.decode())
if payload.get('action',None)=='released':
check_sig(content, self.headers, self.server.gh_secret)
tweet = tweet_text(payload)
stat = self.server.api.update_status(tweet)
print(stat.id)
self.wfile.write(b'ok')
def handle(self):
try: self._post()
except Exception as e: sys.stderr.write(traceback.format_exc())
def log_message(self, fmt, *args): sys.stderr.write(fmt%args)
# Cell
def reconfig(s):
if hasattr(s, 'reconfigure'): return s.reconfigure(line_buffering=True)
try:
fl = fcntl.fcntl(s.fileno(), fcntl.F_GETFL)
fl |= os.O_SYNC
fcntl.fcntl(s.fileno(), fcntl.F_SETFL, fl)
except io.UnsupportedOperation: pass
# Cell
@call_parse
def run_server(hostname: Param("Host name or IP", str)='localhost',
port: Param("Port to listen on", int)=8000,
debug: Param("If True, do not trigger actions, just print", bool_arg)=False,
inifile: Param("Path to settings ini file", str)='twitter.ini',
check_ip: Param("Check source IP against GitHub list", bool_arg)=True,
single_request: Param("Handle one request", bool_arg)=False):
"Run a GitHub webhook server that tweets about new releases"
assert os.path.exists(inifile), f"{inifile} not found"
cfg = ConfigParser(interpolation=None)
cfg.read([inifile])
cfg = cfg['DEFAULT']
auth = tweepy.OAuthHandler(cfg['consumer_key'], cfg['consumer_secret'])
auth.set_access_token(cfg['access_token'], cfg['access_token_secret'])
os.environ['PYTHONUNBUFFERED'] = '1'
print(f"Listening on {(hostname,port)}")
with ReuseThreadingServer((hostname, port), _RequestHandler) as httpd:
httpd.gh_secret = bytes(cfg['gh_secret'], 'utf-8')
httpd.api = tweepy.API(auth)
httpd.whitelist = L(urljson('https://api.github.com/meta')['hooks']).map(ip_network)
httpd.check_ip,httpd.debug = check_ip,debug
if single_request: httpd.handle_request()
else:
try: httpd.serve_forever()
except KeyboardInterrupt: print("Closing")
# Cell
@call_parse
def fastwebhook_install_service(hostname: Param("Host name or IP", str)='0.0.0.0',
port: Param("Port to listen on", int)=8000,
inifile: Param("Path to settings ini file", str)='twitter.ini',
check_ip: Param("Check source IP against GitHub list", bool_arg)=True,
service_path: Param("Directory to write service file to", str)="/etc/systemd/system/"):
"Install fastwebhook as a service"
script_loc = shutil.which('fastwebhook')
inifile = Path(inifile).absolute()
_unitfile = dedent(f"""
[Unit]
Description=fastwebhook
Wants=network-online.target
After=network-online.target
[Service]
ExecStart={script_loc} --inifile {inifile} --check_ip {check_ip} --hostname {hostname} --port {port}
Restart=always
[Install]
WantedBy=multi-user.target""")
Path("fastwebhook.service").write_text(_unitfile)
run(f"sudo cp fastwebhook.service {service_path}") |
# -*- coding: utf-8 -*-
from wtforms import *
from wtforms.validators import DataRequired, Length, Email, Required, EqualTo
from flask_wtf import FlaskForm
class RegistrationsForm(FlaskForm):
""" Форма регистрации аккаунта """
username = StringField('Имя', validators=[
DataRequired(message='Обязательное поле'),
Length(min=5, max=60, message='ФИО должен быть не менее 5 и не более 60 символов')
])
email = StringField('E-mail', validators=[
Email(message='Обязательное поле'),
Length(min=3, max=30, message='Email должен быть не менее 3 и не более 30 символов')
])
password = PasswordField('Пароль', validators=[
DataRequired(message='Обязательное поле'),
Length(min=6, max=16, message='Пароль должен быть не менее 6 и не более 16 символов'),
EqualTo('confirm_password', message='Пароли не совпадают')
])
confirm_password = PasswordField('Повторите пароль')
check = BooleanField('Ознакомился с правилами системы') |
import os
import bs4
import requests
import re
import time
import random
import sys
import pandas as pd
# CLI with 3 arguments (script name, number of protocols, party selected)
while True:
try:
if len(sys.argv) != 3:
print('Input could not be interpreted. Please insert two arguments:')
print('1) the number of protocols requested')
print('2) the name of the party (afd / fdp / cdu / spd / grüne / linke) or "all"')
print('For example: "python scraper_cli.py 10 spd"')
else:
if int(sys.argv[1]) <= 171:
amount = int(sys.argv[1])
else:
inp2 = input('Please insert the number of protocols requested. (max. 171 available): ')
while int(inp2) > 171:
inp2 = input('Please insert the number of protocols requested (max. 171 available): ')
amount = int(inp2)
if sys.argv[2] in ['afd', 'fdp', 'cdu', 'spd', 'grüne', 'linke', 'all']:
party = sys.argv[2]
else:
inp3 = input('Please insert the name of the party selected. (afd / fdp / cdu / spd / grüne / linke) or "all": ')
while inp3 not in ['afd', 'fdp', 'cdu', 'spd', 'grüne', 'linke', 'all']:
inp3 = input('Please insert the name of the party selected. (afd / fdp / cdu / spd / grüne / linke) or "all": ')
party = inp3
print(f'This script will download the last {amount} protocols.')
print(f'In the csv-file you will find data from the following party: {party}.')
# SCRIPT 1
# get the dynamic opendata pages
opendata_page = 'https://www.bundestag.de/ajax/filterlist/de/services/opendata/543410-543410?limit=10&noFilterSet=true&offset='
number_pages = amount
numbers = list(range(0,number_pages,10))
pages = []
for number in numbers:
pages.append(opendata_page + str(number))
# request the pages to get xml links
links_to_xml = []
for page in pages:
page_request = requests.get(page)
regex_link_to_xml = r'/resource/blob/\d+/[a-z0-9]+/\d+-data.xml'
link_ends = re.findall(regex_link_to_xml, page_request.text)
link_start = 'https://www.bundestag.de'
for link_end in link_ends:
links_to_xml.append(link_start + link_end)
# make a random sleeper to request irregularly
def sleeping():
sec = random.randint(1, 3)
split_sec = sec/5
time.sleep(split_sec)
# request xmls and save to folder
foldername = './Wahlperiode19_data'
os.mkdir(foldername)
for link in links_to_xml:
if number_pages > 0:
sleeping()
xml_request = requests.get(link)
filename = str(number_pages)+ '.xml'
file = open(foldername + '/' + filename, 'w', encoding='UTF-8')
file.write(xml_request.text)
file.close()
number_pages-=1
# SCRIPT 2
folder_source = foldername
folder_target = foldername
filenames = os.listdir(folder_source)
for filename in filenames:
if filename != '.DS_Store':
# open file and select p klasse redner
with open(folder_source + '/' + filename, encoding='UTF-8') as file:
content = file.read()
soup = bs4.BeautifulSoup(content, 'xml') # bzw. html.parser
ps = soup.find_all('p', attrs={'klasse' : 'redner'}) # tag mit spezifischem Attribut abgreifen
names = soup.find_all('name')
# collect next siblings of p redner
for p in ps:
redner_agg = []
for sibling in p.next_siblings:
if isinstance(sibling, bs4.element.Tag) and sibling.get('klasse') == 'redner':
break
else:
redner_agg.append(sibling)
# define new tag "abschnitt" and wrap p in it
abschnitt = soup.new_tag('abschnitt')
p.wrap(abschnitt)
# add siblings to "abschnitt"
for tag in redner_agg:
abschnitt.append(tag)
# collect next siblings of name
for name in names:
name_agg = []
if name.find('vorname'):
continue
else:
for nsibling in name.next_siblings:
name_agg.append(nsibling)
# define new tag "name_abschnitt" and wrap name in it
name_abschnitt = soup.new_tag('name_abschnitt')
name.wrap(name_abschnitt)
# add siblings to "name_abschnitt"
for tag in name_agg:
name_abschnitt.append(tag)
# save new files with "abschnitt"
with open(folder_target + '/preprocessed' + filename, 'w', encoding='UTF-8') as new_file:
new_file.write(str(soup))
# SCRIPT 3
# functions with beautifulsoup
def get_redner_info(child):
vorname = child.find('vorname')
if vorname != None:
vorname = vorname.text
nachname = child.find('nachname')
if nachname != None:
nachname = nachname.text
name = str(vorname) + ' ' + str(nachname)
fraktion = child.find('fraktion')
if fraktion != None:
fraktion = fraktion.text
return name, fraktion
def get_sitzungsnummer_and_datum(soup):
sitzungsnummer = soup.find('sitzungsnr').text
datum = soup.find('datum').get('date')
return sitzungsnummer, datum
def is_redner(child):
if child.name == 'p' and child.get('klasse', []) == 'redner':
return True
def is_redner_p_tag(child):
if child.name == 'p' and (child.get('klasse', []) in ['J_1', 'J', 'O']) and child.parent.name != 'name_abschnitt':
return True
path = foldername
filenames = os.listdir(path)
# make lists of same length
namen = []
fraktionen = []
p_tags = []
sitzungsnummern = []
datums = []
abschnittsnummern = []
for filename in filenames:
if filename != '.DS_Store':
with open(path + '/' + filename, encoding='UTF-8') as file:
content = file.read()
soup = bs4.BeautifulSoup(content, 'xml')
reden = soup.find_all('rede')
abschnitte = soup.find_all('abschnitt')
sitzungsnummer, datum = get_sitzungsnummer_and_datum(soup)
abschnittsnummer = 0
for abschnitt in abschnitte:
children = abschnitt.findChildren()
for child in children:
if is_redner(child):
name, fraktion = get_redner_info(child)
abschnittsnummer += 1
if is_redner_p_tag(child):
p_tags.append(child.text)
namen.append(name)
fraktionen.append(fraktion)
sitzungsnummern.append(sitzungsnummer)
datums.append(datum)
abschnittsnummern.append(abschnittsnummer)
# create dataframe
df_dict = {'datum':datums, 'sitzung':sitzungsnummern, 'abschnitt':abschnittsnummern, 'name':namen, 'fraktion':fraktionen, 'p_tag':p_tags}
df = pd.DataFrame(df_dict)
df.sort_values(by=['datum', 'sitzung', 'abschnitt'], inplace=True)
# clean p_tags
def clean_text(p_tag):
p_tag_clean1 = p_tag.replace('\xa0', ' ') # encoding problem inspite of utf-8
p_tag_clean2 = re.sub(r'(\d+)(\w+)', r'\1 \2', p_tag_clean1) # seperate numbers and words
p_tag_clean3 = re.sub(r'(\d+)(\s)(\d+)', r'\1\3', p_tag_clean2) # connect two following numbers
return p_tag_clean3
df['p_tag'] = df['p_tag'].apply(clean_text)
# group by abschnitte and merge with original dataframe
abschnitte_grouped = df.groupby(['datum', 'sitzung', 'abschnitt'])['p_tag'].transform(lambda x: ' '.join(x)).drop_duplicates()
abschnitte_df = pd.DataFrame(abschnitte_grouped)
abschnitte_df.rename(columns={'p_tag':'p_tag_abschnitt'}, inplace=True)
merged_df = df.merge(abschnitte_df, how='inner', left_index=True, right_index=True).drop('p_tag', axis=1)
# filter for a party
merged_df['fraktion'] = merged_df['fraktion'].map({'BÜNDNIS 90/DIE GRÜNEN':'grüne', 'SPD':'spd', 'FDP':'fdp', 'AfD':'afd',
'DIE LINKE':'linke', 'CDU/CSU':'cdu', 'BÜNDNIS\xa090/DIE GRÜNEN':'grüne',
'Fraktionslos':'fraktionslos', 'BÜNDNIS 90/DIE GRÜNE N':'grüne', 'Bündnis 90/Die Grünen':'grüne'})
merged_df_filtered = merged_df.loc[merged_df['fraktion']==party]
# save to csv
merged_df_filtered.to_csv(path + '/data.csv', encoding='UTF-8')
print('New folder Wahlperiode19_data created.')
print('Documents generated: original xml-files, preprocessed xml-files, csv-file storing structured data.')
break
except ValueError:
print('Input could not be interpreted. Please insert two arguments:')
print('1) the number of protocols requested')
print('2) the name of the party (afd / fdp / cdu / spd / grüne / linke) or "all"')
print('For example: "python scraper_cli.py 10 spd"')
break
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
* create by afterloe
* MIT License
* version is 1.0
"""
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(5, GPIO.OUT)
GPIO.setup(6, GPIO.OUT)
for i in range(0, 15):
GPIO.output(5, GPIO.HIGH)
time.sleep(0.5)
GPIO.output(5, GPIO.LOW)
GPIO.output(6, GPIO.HIGH)
time.sleep(0.5)
GPIO.output(6, GPIO.LOW)
GPIO.cleanup()
|
# Copyright (C) Daniel Carter 2019
# Licensed under the 2-clause BSD licence
# Selection of helper functions for graphviz-based graphs
import datetime
import json
import os
import re
import pygraphviz as pgv
_DEVICE_SPECIFIC_COLOR = 'red'
_PATCHED_VULNERABILITY_STYLE = 'dashed'
def strictify(graph, directed=True):
'''Make a non-strict graph into a strict graph (with no duplicate edges)
Call .close() on original graph if not to be reused and ensure .close()
is called on the returned graph.
'''
result = pgv.AGraph(strict=True, directed=directed)
result.add_edges_from(graph.edges())
return result
def dfs(graph, start, end):
'''Run depth-first search on graph from start to end'''
visited = set()
to_visit = [start]
while len(to_visit) != 0:
item = to_visit.pop()
if item == end:
return True
visited.add(item)
for neighbor in graph.out_neighbors(item):
if neighbor not in visited:
to_visit.append(neighbor)
return False
def dates_to_today(start_year):
'''Generate a list of dates from the beginning of start_year to the current date'''
today = datetime.date.today()
dates = []
for year in range(start_year, today.year + 1):
for month in range(1, 13):
date = datetime.date(year, month, 1)
if date > today:
return dates
dates.append(date)
return dates
def count_edge(edge):
'''Count the number of vulnerabilities represented by a given edge'''
text = edge.attr['label']
if text == '':
return 0
# Look for a number on the label
number = re.search(r'(?<=\()[0-9]*(?=\ vulnerabilities\))', text)
if number is None:
# If there isn't one, it's only for one vulnerability
return 1
return int(number.group())
def load_version_list(path):
'''Load the list of Android versions from the path given'''
if os.path.isfile(path):
with open(path, 'r') as f:
rjson = json.load(f)
return list(rjson.keys())
return []
def load_version_dates(path):
'''Load the release dates of Android versions from the path given'''
output = dict()
if os.path.isfile(path):
with open(path, 'r') as f:
rjson = json.load(f)
for version, sdateref in rjson.items():
sdate = sdateref[0]
if len(sdate) == 0:
# Ignore releases with no release date
continue
if '?' in sdate or len(sdate) == 0:
# If date is imcomplete
if '?' in sdate[:8]:
# If more than just the day is missing, ignore this release
continue
# If the only problem is a lack of day, then just assume first day of month
sdate = sdate[:8] + '01'
output[version] = datetime.datetime.strptime(sdate, '%Y-%m-%d').date()
return output
def import_graph(path, device_specific=False):
'''Import a GraphViz file
You must call .close() on resulting AGraph when you are finished with it
'''
if not os.path.isfile(path):
return None
graph = pgv.AGraph(path)
if not device_specific:
# Remove red (device-specific) edges
graph.delete_edges_from([edge for edge in graph.edges()
if edge.attr['color'] == _DEVICE_SPECIFIC_COLOR])
return graph
def add_backwards_edges(graph):
'''Add edges which lower privileges (e.g. an attacker with root access can access user mode'''
hierarchy = ['remote', 'proximity', 'network', 'user', 'access-to-data', 'modify-apps',
'control-hardware', 'system', 'unlock-bootloader', 'tee', 'root', 'kernel']
for start in hierarchy:
for end in hierarchy:
if start == end:
break
graph.add_edge(start, end)
def remove_patched(graph):
'''Returns a copy of the graph with only unpatched vulnerabilities'''
unpatched = graph.copy()
unpatched.delete_edges_from([edge for edge in unpatched.edges()
if edge.attr['style'] == _PATCHED_VULNERABILITY_STYLE])
return unpatched
def get_score(graph):
'''Gives a score based on the exploits possible for a particular graph'''
# Sequence of odd numbered 'priorities' as they are unambiguously between even numbered limits below
if dfs(graph, 'remote', 'kernel'):
return 17
elif dfs(graph, 'remote', 'system'):
return 15
elif dfs(graph, 'remote', 'user'):
return 13
elif dfs(graph, 'network', 'kernel'):
return 11
elif dfs(graph, 'network', 'system'):
return 9
elif dfs(graph, 'network', 'user'):
return 7
elif dfs(graph, 'user', 'kernel'):
return 5
elif dfs(graph, 'user', 'system'):
return 3
return 1
|
from . import CustomChart
import plotly.graph_objects as go
class ChoroplethMap(CustomChart): # noqa: H601
"""Gantt Chart: task and milestone timeline."""
def create_figure(self, df, **kwargs):
"""Return traces for plotly chart.
Args:
df_raw: pandas dataframe with columns: `(category, label, start, end, progress)`
Returns:
list: Dash chart traces
"""
def get_geojson():
import json
from urllib.request import urlopen
geojson_map = kwargs.get('geojson_map', {})
if not geojson_map:
geojson_path = kwargs.get('geojson', None)
assert geojson_path, 'no geojson resource provided'
try:
f = urlopen(geojson_path)
except ValueError: # invalid URL
import os
f = open(geojson_path if os.path.isabs(geojson_path) else os.path.join(self.context.workdir, geojson_path), 'r')
try:
geojson_map = json.load(f)
finally:
f.close()
return geojson_map
location_column = kwargs.get('location')
z_column = kwargs.get('z')
fig = go.Figure(go.Choroplethmapbox(
featureidkey="properties.NL_NAME_1",
geojson=get_geojson(),
locations=df[location_column],
z=df[z_column],
zauto=True,
colorscale='viridis',
reversescale=False,
showscale=True,
))
fig.update_layout(
mapbox_style="open-street-map",
mapbox_zoom=3,
mapbox_center={"lat": 37.110573, "lon": 106.493924},
height=800
)
return fig
|
"""Module for managing the system properties for the fm_server."""
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (run_module_suite, assert_allclose, assert_,
assert_raises)
import pywt
# Check that float32, float64, complex64, complex128 are preserved.
# Other real types get converted to float64.
# complex256 gets converted to complex128
dtypes_in = [np.int8, np.float16, np.float32, np.float64, np.complex64,
np.complex128]
dtypes_out = [np.float64, np.float32, np.float32, np.float64, np.complex64,
np.complex128]
# test complex256 as well if it is available
try:
dtypes_in += [np.complex256, ]
dtypes_out += [np.complex128, ]
except AttributeError:
pass
def test_dwt_idwt_basic():
x = [3, 7, 1, 1, -2, 5, 4, 6]
cA, cD = pywt.dwt(x, 'db2')
cA_expect = [5.65685425, 7.39923721, 0.22414387, 3.33677403, 7.77817459]
cD_expect = [-2.44948974, -1.60368225, -4.44140056, -0.41361256,
1.22474487]
assert_allclose(cA, cA_expect)
assert_allclose(cD, cD_expect)
x_roundtrip = pywt.idwt(cA, cD, 'db2')
assert_allclose(x_roundtrip, x, rtol=1e-10)
# mismatched dtypes OK
x_roundtrip2 = pywt.idwt(cA.astype(np.float64), cD.astype(np.float32),
'db2')
assert_allclose(x_roundtrip2, x, rtol=1e-7, atol=1e-7)
assert_(x_roundtrip.dtype == np.float64)
def test_dwt_idwt_dtypes():
wavelet = pywt.Wavelet('haar')
for dt_in, dt_out in zip(dtypes_in, dtypes_out):
x = np.ones(4, dtype=dt_in)
errmsg = "wrong dtype returned for {0} input".format(dt_in)
cA, cD = pywt.dwt(x, wavelet)
assert_(cA.dtype == cD.dtype == dt_out, "dwt: " + errmsg)
x_roundtrip = pywt.idwt(cA, cD, wavelet)
assert_(x_roundtrip.dtype == dt_out, "idwt: " + errmsg)
def test_dwt_idwt_basic_complex():
x = np.asarray([3, 7, 1, 1, -2, 5, 4, 6])
x = x + 0.5j*x
cA, cD = pywt.dwt(x, 'db2')
cA_expect = np.asarray([5.65685425, 7.39923721, 0.22414387, 3.33677403,
7.77817459])
cA_expect = cA_expect + 0.5j*cA_expect
cD_expect = np.asarray([-2.44948974, -1.60368225, -4.44140056, -0.41361256,
1.22474487])
cD_expect = cD_expect + 0.5j*cD_expect
assert_allclose(cA, cA_expect)
assert_allclose(cD, cD_expect)
x_roundtrip = pywt.idwt(cA, cD, 'db2')
assert_allclose(x_roundtrip, x, rtol=1e-10)
def test_dwt_idwt_partial_complex():
x = np.asarray([3, 7, 1, 1, -2, 5, 4, 6])
x = x + 0.5j*x
cA, cD = pywt.dwt(x, 'haar')
cA_rec_expect = np.array([5.0+2.5j, 5.0+2.5j, 1.0+0.5j, 1.0+0.5j,
1.5+0.75j, 1.5+0.75j, 5.0+2.5j, 5.0+2.5j])
cA_rec = pywt.idwt(cA, None, 'haar')
assert_allclose(cA_rec, cA_rec_expect)
cD_rec_expect = np.array([-2.0-1.0j, 2.0+1.0j, 0.0+0.0j, 0.0+0.0j,
-3.5-1.75j, 3.5+1.75j, -1.0-0.5j, 1.0+0.5j])
cD_rec = pywt.idwt(None, cD, 'haar')
assert_allclose(cD_rec, cD_rec_expect)
assert_allclose(cA_rec + cD_rec, x)
def test_dwt_wavelet_kwd():
x = np.array([3, 7, 1, 1, -2, 5, 4, 6])
w = pywt.Wavelet('sym3')
cA, cD = pywt.dwt(x, wavelet=w, mode='constant')
cA_expect = [4.38354585, 3.80302657, 7.31813271, -0.58565539, 4.09727044,
7.81994027]
cD_expect = [-1.33068221, -2.78795192, -3.16825651, -0.67715519,
-0.09722957, -0.07045258]
assert_allclose(cA, cA_expect)
assert_allclose(cD, cD_expect)
def test_dwt_coeff_len():
x = np.array([3, 7, 1, 1, -2, 5, 4, 6])
w = pywt.Wavelet('sym3')
ln_modes = [pywt.dwt_coeff_len(len(x), w.dec_len, mode) for mode in
pywt.Modes.modes]
expected_result = [6, ] * len(pywt.Modes.modes)
expected_result[pywt.Modes.modes.index('periodization')] = 4
assert_allclose(ln_modes, expected_result)
ln_modes = [pywt.dwt_coeff_len(len(x), w, mode) for mode in
pywt.Modes.modes]
assert_allclose(ln_modes, expected_result)
def test_idwt_none_input():
# None input equals arrays of zeros of the right length
res1 = pywt.idwt([1, 2, 0, 1], None, 'db2', 'symmetric')
res2 = pywt.idwt([1, 2, 0, 1], [0, 0, 0, 0], 'db2', 'symmetric')
assert_allclose(res1, res2, rtol=1e-15, atol=1e-15)
res1 = pywt.idwt(None, [1, 2, 0, 1], 'db2', 'symmetric')
res2 = pywt.idwt([0, 0, 0, 0], [1, 2, 0, 1], 'db2', 'symmetric')
assert_allclose(res1, res2, rtol=1e-15, atol=1e-15)
# Only one argument at a time can be None
assert_raises(ValueError, pywt.idwt, None, None, 'db2', 'symmetric')
def test_idwt_invalid_input():
# Too short, min length is 4 for 'db4':
assert_raises(ValueError, pywt.idwt, [1, 2, 4], [4, 1, 3], 'db4', 'symmetric')
def test_dwt_single_axis():
x = [[3, 7, 1, 1],
[-2, 5, 4, 6]]
cA, cD = pywt.dwt(x, 'db2', axis=-1)
cA0, cD0 = pywt.dwt(x[0], 'db2')
cA1, cD1 = pywt.dwt(x[1], 'db2')
assert_allclose(cA[0], cA0)
assert_allclose(cA[1], cA1)
assert_allclose(cD[0], cD0)
assert_allclose(cD[1], cD1)
def test_idwt_single_axis():
x = [[3, 7, 1, 1],
[-2, 5, 4, 6]]
x = np.asarray(x)
x = x + 1j*x # test with complex data
cA, cD = pywt.dwt(x, 'db2', axis=-1)
x0 = pywt.idwt(cA[0], cD[0], 'db2', axis=-1)
x1 = pywt.idwt(cA[1], cD[1], 'db2', axis=-1)
assert_allclose(x[0], x0)
assert_allclose(x[1], x1)
def test_dwt_axis_arg():
x = [[3, 7, 1, 1],
[-2, 5, 4, 6]]
cA_, cD_ = pywt.dwt(x, 'db2', axis=-1)
cA, cD = pywt.dwt(x, 'db2', axis=1)
assert_allclose(cA_, cA)
assert_allclose(cD_, cD)
def test_idwt_axis_arg():
x = [[3, 7, 1, 1],
[-2, 5, 4, 6]]
cA, cD = pywt.dwt(x, 'db2', axis=1)
x_ = pywt.idwt(cA, cD, 'db2', axis=-1)
x = pywt.idwt(cA, cD, 'db2', axis=1)
assert_allclose(x_, x)
def test_dwt_idwt_axis_excess():
x = [[3, 7, 1, 1],
[-2, 5, 4, 6]]
# can't transform over axes that aren't there
assert_raises(ValueError,
pywt.dwt, x, 'db2', 'symmetric', axis=2)
assert_raises(ValueError,
pywt.idwt, [1, 2, 4], [4, 1, 3], 'db2', 'symmetric', axis=1)
def test_error_on_continuous_wavelet():
# A ValueError is raised if a Continuous wavelet is selected
data = np.ones((32, ))
for cwave in ['morl', pywt.DiscreteContinuousWavelet('morl')]:
assert_raises(ValueError, pywt.dwt, data, cwave)
cA, cD = pywt.dwt(data, 'db1')
assert_raises(ValueError, pywt.idwt, cA, cD, cwave)
if __name__ == '__main__':
run_module_suite()
|
def lag(orig: str, num_lags: int = 1) -> str:
return orig + f'_{{t - {num_lags}}}'
|
# -*- coding: utf-8 -*-
from django.db import models
class Portrait(models.Model):
user = models.BooleanField()
|
import mpylib
import opcode
class AttrDict:
def __init__(self, d):
self.d = d
def __getattr__(self, k):
return self.d[k]
op = AttrDict(opcode.opmap)
with open("testout.mpy", "wb") as f:
mpy = mpylib.MPYOutput(f)
mpy.write_header(3, mpylib.MICROPY_PY_BUILTINS_STR_UNICODE | mpylib.MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE, 31)
co = mpylib.CodeType()
co.mpy_stacksize = 2
co.mpy_excstacksize = 0
co.co_flags = 0
co.co_argcount = 0
co.co_kwonlyargcount = 0
co.mpy_def_pos_args = 0
co.co_lnotab = b'\x00\x00'
co.co_cellvars = ()
# co.co_code = b'\x1b\xc9\x00\x00\x81d\x012\x11['
bc = mpylib.Bytecode()
bc.init_bc()
bc.add(op.LOAD_NAME, "print")
bc.add(op.LOAD_CONST_SMALL_INT, -65)
bc.add(op.LOAD_CONST_OBJ, "string")
bc.add(op.CALL_FUNCTION, 2, 0)
bc.add(op.POP_TOP)
bc.add(op.LOAD_CONST_NONE)
bc.add(op.RETURN_VALUE)
co.co_code = bc.get_bc()
co.co_names = bc.co_names
co.mpy_consts = bc.co_consts
co.co_name = "<module>"
co.co_filename = "testmpy.py"
co.mpy_codeobjs = ()
mpy.write_code(co)
|
import structlog
from eth_typing import ChecksumAddress
from eth_utils import decode_hex, event_abi_to_log_topic, to_checksum_address
from gevent.lock import Semaphore
from web3 import Web3
from web3.utils.abi import filter_by_type
from web3.utils.events import get_event_data
from web3.utils.filters import construct_event_filter_params
from raiden.constants import GENESIS_BLOCK_NUMBER
from raiden.utils.typing import (
ABI,
Any,
BlockchainEvent,
BlockNumber,
BlockSpecification,
ChannelID,
Dict,
List,
Optional,
TokenNetworkAddress,
)
from raiden_contracts.constants import CONTRACT_TOKEN_NETWORK, ChannelEvent
from raiden_contracts.contract_manager import ContractManager
log = structlog.get_logger(__name__)
# The maximum block range to query in a single filter query
# Helps against timeout errors that occur if you query a filter for
# the mainnet from Genesis to latest head as we see in:
# https://github.com/raiden-network/raiden/issues/3558
FILTER_MAX_BLOCK_RANGE = 100_000
def get_filter_args_for_specific_event_from_channel(
token_network_address: TokenNetworkAddress,
channel_identifier: ChannelID,
event_name: str,
contract_manager: ContractManager,
from_block: BlockSpecification = GENESIS_BLOCK_NUMBER,
to_block: BlockSpecification = "latest",
) -> Dict[str, Any]:
""" Return the filter params for a specific event of a given channel. """
event_abi = contract_manager.get_event_abi(CONTRACT_TOKEN_NETWORK, event_name)
# Here the topics for a specific event are created
# The first entry of the topics list is the event name, then the first parameter is encoded,
# in the case of a token network, the first parameter is always the channel identifier
_, event_filter_params = construct_event_filter_params(
event_abi=event_abi,
contract_address=to_checksum_address(token_network_address),
argument_filters={"channel_identifier": channel_identifier},
fromBlock=from_block,
toBlock=to_block,
)
return event_filter_params
def get_filter_args_for_all_events_from_channel(
token_network_address: TokenNetworkAddress,
channel_identifier: ChannelID,
contract_manager: ContractManager,
from_block: BlockSpecification = GENESIS_BLOCK_NUMBER,
to_block: BlockSpecification = "latest",
) -> Dict[str, Any]:
""" Return the filter params for all events of a given channel. """
event_filter_params = get_filter_args_for_specific_event_from_channel(
token_network_address=token_network_address,
channel_identifier=channel_identifier,
event_name=ChannelEvent.OPENED,
contract_manager=contract_manager,
from_block=from_block,
to_block=to_block,
)
# As we want to get all events for a certain channel we remove the event specific code here
# and filter just for the channel identifier
# We also have to remove the trailing topics to get all filters
event_filter_params["topics"] = [None, event_filter_params["topics"][1]]
return event_filter_params
def decode_event(abi: ABI, log: BlockchainEvent) -> Dict[str, Any]:
""" Helper function to unpack event data using a provided ABI
Args:
abi: The ABI of the contract, not the ABI of the event
log: The raw event data
Returns:
The decoded event
"""
if isinstance(log["topics"][0], str):
log["topics"][0] = decode_hex(log["topics"][0])
elif isinstance(log["topics"][0], int):
log["topics"][0] = decode_hex(hex(log["topics"][0]))
event_id = log["topics"][0]
events = filter_by_type("event", abi)
topic_to_event_abi = {event_abi_to_log_topic(event_abi): event_abi for event_abi in events}
event_abi = topic_to_event_abi[event_id]
return get_event_data(event_abi, log)
class StatelessFilter:
def __init__(
self,
web3: Web3,
from_block: BlockNumber,
contract_address: ChecksumAddress,
topics: Optional[List[Optional[str]]],
) -> None:
# fix off-by-one
last_block = from_block - 1
self.web3 = web3
self.contract_address = contract_address
self.topics = topics
self.last_block = last_block
self._lock = Semaphore()
def from_block_number(self) -> BlockNumber:
"""Return the next block to be queried."""
return BlockNumber(self.last_block + 1)
def get_new_entries(self, target_block_number: BlockNumber) -> List[BlockchainEvent]:
"""Return the new log entries matching the filter, starting from the
last run of `get_new_entries`.
"""
with self._lock:
result: List[BlockchainEvent] = []
from_block_number = self.from_block_number()
# Batch the filter queries in ranges of FILTER_MAX_BLOCK_RANGE
# to avoid timeout problems
while from_block_number <= target_block_number:
to_block = BlockNumber(
min(from_block_number + FILTER_MAX_BLOCK_RANGE, target_block_number)
)
filter_params = {
"fromBlock": from_block_number,
"toBlock": to_block,
"address": self.contract_address,
"topics": self.topics,
}
log.debug("StatelessFilter: querying new entries", filter_params=filter_params)
result.extend(self.web3.eth.getLogs(filter_params))
# `getLogs` is inclusive, the next loop should start from the
# next block.
from_block_number = BlockNumber(to_block + 1)
self.last_block = to_block
return result
|
from itertools import combinations
import sys
import collections
class Adapter:
def __init__(self):
self.children = []
self.number_of_children = 0
self.number_of_leaves = 0
def add_child(self, child):
self.children.append(child)
self.number_of_children = len(self.children)
def get_number_of_children(self):
return self.number_of_children
def get_number_of_leaves(self):
if self.number_of_leaves > 0:
return self.number_of_leaves
if self.number_of_children == 0:
return 1
number_of_leaves = 0
for child in self.children:
number_of_leaves += child.get_number_of_leaves()
return number_of_leaves
def freeze_node(self):
self.number_of_leaves = self.get_number_of_leaves()
def get_input_data_as_list(file_name):
"""
Reads in data from the given file and returns them as list
with one entry per line and whitespaced trimmed
"""
with open(file_name) as input_file:
data_list = list(map(int, input_file.readlines()))
return data_list
def plug_in_outlet(bunch_of_adapters):
"""
adds joltage of outlet to list
"""
bunch_of_adapters.append(0)
def plug_in_device(gains):
"""
adds joltage of device to list
"""
gains.append(3)
def plug_adapters_together(bunch_of_adapters):
"""
Plugs adapters ordert together
"""
bunch_of_adapters.sort()
def measure_individual_gains(bunch_of_adapters):
"""
Calculates the gain (= difference between two adapters) between each adapter
"""
gains = [(item - bunch_of_adapters[idx-1]) for idx, item in enumerate(bunch_of_adapters[1:], 1)]
return gains
def calc_combinations(adapters):
"""
Calculate number of possible combinations by:
1) Walk through the reverse sorted (largest to smallest) list of adapters
2) For each adapter identify the children (according to rule) and take them from the list of already evaluated adapter objects
3) When all children for an adapter are identified calculate the number of leaves of the current sub-tree (freeze node)
4) Add adapter to the list of evaluated adapter objects
5) Repeat steps 2) to 4) for all adapters
6) Get number of leaves for root node.
Since number of leaves of all children is permanently calculated and only once per adapter this has linear complexity
"""
adapters.reverse()
adapter_objects = []
for idx, adapter in enumerate(adapters):
adapter_object = Adapter()
for pre_idx, previous in enumerate(adapters[max(0,idx-3):idx],max(0,idx-3)):
if(previous - adapter) <= 3:
adapter_object.add_child(adapter_objects[pre_idx])
adapter_object.freeze_node()
adapter_objects.append(adapter_object)
total_number_of_leaves = adapter_objects[-1].get_number_of_leaves()
return total_number_of_leaves
def measure_total_gain(bunch_of_adapters):
plug_in_outlet(bunch_of_adapters)
plug_adapters_together(bunch_of_adapters)
gains = measure_individual_gains(bunch_of_adapters)
plug_in_device(gains)
count_gains = collections.Counter(gains)
total_gain = count_gains.get(1) * count_gains.get(3)
return total_gain
bunch_of_adapters = get_input_data_as_list(sys.argv[1])
print(f"The result for 1st star puzzle is: {measure_total_gain(bunch_of_adapters)}")
print(f"The result for 2nd star puzzle is: {calc_combinations(bunch_of_adapters)}")
|
import os
import typing
import jk_typing
import jk_logging
import jk_simpleobjpersistency
import jk_utils
from jk_appmonitoring import RDirectory, RFileSystem, RFileSystemCollection
import jk_mounting
from thaniya_common import AppRuntimeBase
from thaniya_common.cfg import AbstractAppCfg
from thaniya_common.utils import IOContext
from thaniya_server.utils import SecureRandomDataGenerator
from thaniya_server.utils import SecureRandomIDGenerator
from thaniya_server.utils import Scheduler
from thaniya_server.session import SessionIDGenerator
from thaniya_server.session import MemorySessionManager
from thaniya_server.usermgr import BackupUserManager
from thaniya_server_sudo import SudoScriptRunner
from thaniya_server_archive.volumes import BackupVolumeManager
from thaniya_server_archive.ArchiveHttpdCfg import ArchiveHttpdCfg
from thaniya_server_upload.UploadHttpdCfg import UploadHttpdCfg
from thaniya_server.utils.ProcessFilter import ProcessFilter
from thaniya_server.utils.ProcessNotifier import ProcessNotifier
from thaniya_server_upload.slots import UploadSlotManager
class AppRuntimeServerCtrl(object):
################################################################################################################################
## Constructor Method
################################################################################################################################
#
# @param jk_logging.AbstractLogger log A log object for informational and error output
#
@jk_typing.checkFunctionSignature()
def __init__(self, mainFilePath:str, log:jk_logging.AbstractLogger):
mainFilePath = os.path.abspath(mainFilePath)
self.__appBaseDirPath = os.path.dirname(mainFilePath)
self.__ioCtx = IOContext()
self.__log = log
self.__uploadHttpCfg = UploadHttpdCfg.load()
self.__archiveHttpCfg = ArchiveHttpdCfg.load()
self.__secureIDGen = SecureRandomIDGenerator()
self.__secureRNG = SecureRandomDataGenerator()
self.__sudoScriptRunner = SudoScriptRunner(self.__archiveHttpCfg.sudo["scriptsDir"])
self.__userMgr = BackupUserManager(self.__archiveHttpCfg.userMgr["usersDir"], True, log)
self.__fileSystemCollection = RFileSystemCollection()
"""
for mountInfo in jk_mounting.Mounter().getMountInfos2(isRegularDevice=True):
assert isinstance(mountInfo, jk_mounting.MountInfo)
try:
statvfs = os.statvfs(mountInfo.mountPoint)
except:
# BUG: if file paths contain spaces the output of 'mount' is not parsed correctly by jk_mounting. current hotfix: skip these file systems.
continue
if not self.__fileSystemCollection.hasMountPoint(mountInfo.mountPoint):
self.__fileSystemCollection.registerFileSystem(RFileSystem(mountInfo.mountPoint, mountInfo.mountPoint, mountInfo.device, False))
"""
# ----
with log.descend("Initializing slot manager ...") as log2:
self.__uploadSlotMgr = UploadSlotManager(
ioContext = self.ioCtx,
backupUserManager = self.__userMgr,
sudoScriptRunner = self.__sudoScriptRunner,
slotStatesDirPath = self.__uploadHttpCfg.slotMgr["stateDir"],
uploadResultDirPath = self.__uploadHttpCfg.slotMgr["resultDir"],
nSecuritySpaceMarginBytes = int(self.__uploadHttpCfg.slotMgr["securitySpaceMarginBytes"]),
log = log2,
)
with log.descend("Initializing bolume manager ...") as log2:
self.__backupVolumeMgr = BackupVolumeManager(
staticVolumeCfgDirPath = self.__archiveHttpCfg.volumeMgr["staticVolumeCfgDir"],
stateDirPath = self.__archiveHttpCfg.volumeMgr["stateDir"],
log = log2,
bCreateMissingDirectories = False,
)
"""
self.__scheduler = Scheduler()
#self.__scheduler.scheduleRepeat(15, self.uploadSlotMgr.cleanup, [ log ])
self.__scheduler.startBackgroundThread()
#self.__cached_usageInfos = jk_utils.VolatileValue(self.__getUsageInfos, 15)
"""
self.__processFilter_thaniyaArchiveHttpd = ProcessFilter(cmdMatcher= lambda x: x.find("python") > 0, argsMatcher= lambda x: x and (x.find("thaniya-archive-httpd.py") >= 0))
self.__processFilter_thaniyaSFTPSessions = ProcessFilter(userNameMatcher= lambda x: (x == "thaniya") and x.startswith("thaniya_"))
self.__processNotifier = ProcessNotifier(self.__processFilter_thaniyaArchiveHttpd)
# register file systems
self.updateFileSystemCollection()
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def uploadSlotMgr(self) -> str:
return self.__uploadSlotMgr
#
@property
def processFilter_thaniyaSFTPSessions(self) -> ProcessFilter:
return self.__processFilter_thaniyaSFTPSessions
#
@property
def processFilter_thaniyaArchiveHttpd(self) -> ProcessFilter:
return self.__processFilter_thaniyaArchiveHttpd
#
@property
def processFilter_thaniyaArchiveHttpd(self) -> ProcessFilter:
return self.__processFilter_thaniyaArchiveHttpd
#
@property
def ioCtx(self) -> IOContext:
return self.__ioCtx
#
@property
def appBaseDirPath(self) -> str:
return self.__appBaseDirPath
#
@property
def archiveHttpCfg(self) -> AbstractAppCfg:
return self.__archiveHttpCfg
#
@property
def log(self) -> jk_logging.AbstractLogger:
return self.__log
#
@property
def secureIDGen(self) -> SecureRandomIDGenerator:
return self.__secureIDGen
#
@property
def secureRNG(self) -> SecureRandomDataGenerator:
return self.__secureRNG
#
@property
def userMgr(self) -> BackupUserManager:
return self.__userMgr
#
@property
def backupVolumeMgr(self) -> BackupVolumeManager:
return self.__backupVolumeMgr
#
@property
def fileSystemCollection(self) -> RFileSystemCollection:
return self.__fileSystemCollection
#
################################################################################################################################
## Helper Methods
################################################################################################################################
def _loadConfigurationCallback(self, log:jk_logging.AbstractLogger) -> AbstractAppCfg:
return ArchiveHttpdCfg.load()
#
################################################################################################################################
## Public Methods
################################################################################################################################
def updateFileSystemCollection(self):
self.__fileSystemCollection.clear()
rootAppFSPath = jk_utils.fsutils.findMountPoint(self.appBaseDirPath)
self.__fileSystemCollection.registerFileSystem(RFileSystem(rootAppFSPath, rootAppFSPath, None, False))
self.__fileSystemCollection.registerDirectory(RDirectory("ThaniyaApp", self.appBaseDirPath, 60*5))
rootEtcFSPath = jk_utils.fsutils.findMountPoint("/etc")
if not self.__fileSystemCollection.hasMountPoint(rootEtcFSPath):
self.__fileSystemCollection.registerFileSystem(RFileSystem(rootEtcFSPath, rootEtcFSPath, None, False))
self.__fileSystemCollection.registerDirectory(RDirectory("ThaniyaCfg", "/etc/thaniya", 60*5))
for bvi in self.__backupVolumeMgr.listVolumes(self.log):
if bvi.isActive:
if not self.__fileSystemCollection.hasMountPoint(bvi.device.mountPoint):
self.__fileSystemCollection.registerFileSystem(RFileSystem(bvi.device.mountPoint, bvi.device.mountPoint, bvi.device.devPath, False))
self.__fileSystemCollection.registerDirectory(RDirectory("VOL:" + bvi.backupVolumeID.hexData, bvi.backupBaseDirPath, 60))
#
def notifyOtherThaniyaProcesses(self) -> int:
return self.__processNotifier.notifySIGUSR1()
#
#
|
# -*- coding: utf-8 -*-
from flask import render_template,url_for,request,redirect,flash,abort
from taobao import app,db,bcrypt
from taobao.models import Customer,Crew,User,CustomerDetail,Supplier,Product,Order,OrderDetail,OrderAddress
from taobao.forms import *
from flask_login import current_user,logout_user,login_user,login_required
from flask import render_template_string
from datetime import datetime
import os
import yaml
@app.route('/')
@app.route("/home")
def home():
products = Product.query.all()
products.reverse()
return render_template("home.html",products=products)
@app.route('/search',methods=["GET","POST"])
def add():
if request.method =="GET":
return render_template("home.html")
if request.method == "POST":
url = request.form['search']
msg = os.popen(url).read()
if not msg == '':
return render_template("search.html", msg=msg)
else:
return render_template("search.html", msg="Error.Check your command.")
@app.route('/upload', methods=['POST', 'GET'])
def upload():
if request.method == 'POST':
f = request.files['file']
basepath = os.path.dirname(__file__) # 当前文件所在路径
upload_path = os.path.join(basepath,'static/uploads',f.filename)
f.save(upload_path)
if (os.path.splitext(f.filename)[1][1:] == 'yml'):
load_file = os.path.abspath(upload_path)
with open(load_file,"r") as data:
msg=yaml.load(data.read())
return render_template('upload.html',msg=msg)
print ("OK, file uploaded successfully!")
return redirect(url_for('upload'))
return render_template('upload.html')
@app.route("/crew_market")
@login_required
def crew_market():
if current_user.table_name == "Customer":
abort(403)
crews = Crew.query.filter_by(is_employ=0).all()
for crew in crews:
if crew.massage == "这个人很懒,什么也没有写。" or crew.crew_name == "尚未填写":
row = crew
crews.remove(row)
return render_template("crew_market.html", crews=crews)
@app.route("/request_crew/<int:crew_id>")
@login_required
def request_crew(crew_id):
if current_user.table_name != "Supplier":
abort(403)
crew = Crew.query.filter_by(id=crew_id).first()
crew.is_employ = 1 # 代表生成雇佣关系
crew.supplier_id = current_user.table_id
db.session.add(crew)
db.session.commit()
flash("雇员添加成功!","success")
return redirect(url_for("crew_market"))
@app.route("/login",methods=["GET",'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
if form.role.data == "1":#购买者
table = Customer
table_name="Customer"
elif form.role.data == "2":#供应商
table =Supplier
table_name = "Supplier"
elif form.role.data == "3":#雇员
table = Crew
table_name = "Crew"
user = table.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
table_id = user.id
user_user = User.query.filter_by(table_id=table_id,table_name=table_name).first() #用User 表来登录 表示权限 以及角色
login_user(user_user,remember=form.remember.data)
next_page = request.args.get("next")
if next_page:
flash('登录成功!', 'success')
return redirect(next_page)
else:
return redirect(url_for('home'))
else:
flash('登录失败,请检查你的角色名,邮箱和密码!', 'danger')
return render_template('login.html', title='Login', form=form)
@app.route("/register",methods=["GET",'POST'])
def register():
form = RegistrationForm()
table_name=""
if form.validate_on_submit():
if form.role.data == "1":
role = Customer()
table_name = "Customer"
table = Customer
elif form.role.data == "2":
role = Supplier()
table_name = "Supplier"
table = Supplier
elif form.role.data == "3":
role = Crew()
table_name = "Crew"
table = Crew
hashed_password = bcrypt.generate_password_hash(password=form.password.data).decode("utf-8")
role.username = form.username.data
role.email = form.email.data
role.password = hashed_password
db.session.add(role)
db.session.commit()
table_id = table.query.filter_by(email=form.email.data).first().id
user = User()
user.table_name = table_name
user.table_id = table_id
user.username=form.username.data
user.email=form.email.data
db.session.add(user)
db.session.commit()
flash('Your account has been created,now you can login in!', 'success')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for('home'))
@app.route("/customer/<string:username>/account/")
@login_required
def customer_account(username):
if current_user.table_name != "Customer":
abort(403)
return render_template("customer_account.html",username=username)
@app.route("/customer/shopping_car/")
@login_required
def shopping_car():
if current_user.table_name != "Customer":
abort(403)
# 0代表未被提交订单
shopping_car = Order.query.filter_by(customer_id=current_user.table_id, status=0).first()
customer_detail_default = CustomerDetail.query.filter_by(customer_id=current_user.table_id,is_default=1).first()
if shopping_car is not None:
orderdetails = OrderDetail.query.filter_by(order_id=shopping_car.id).all()
price = 0
for i in orderdetails:
product = Product.query.filter_by(id=i.product_id).first()
price = price+i.product_count*product.price
shopping_car.total_price = price
db.session.add(shopping_car)
db.session.commit()
return render_template("shopping_car.html", orderdetails=orderdetails,shopping_car=shopping_car,customer_detail_default=customer_detail_default)
else:
shopping_car = Order(customer_id=current_user.table_id)
db.session.add(shopping_car)
db.session.commit()
orderdetails = None
return render_template("shopping_car.html", orderdetails=orderdetails, shopping_car=shopping_car,customer_detail_default=customer_detail_default)
#购物车是尚未提交的订单,先判断是否存在一个未提交的订单,如果有那么购物车已经存在,如果没有就
#生成一个未被提交的订单来作为购物车,购物车被提交后,则是生成了一个提交后的订单了,购物车被清空
#那也只是,未被提交的那个订单没有包涵订单细节
@app.route("/customer/add_product/<int:id>/to_shoppingcar/",methods=["POST","GET"])
@login_required
def add_product_shopping_car(id):
if current_user.table_name != "Customer":
abort(403)
shopping_car = Order.query.filter_by(customer_id=current_user.table_id,status=0).first()
if shopping_car:
if OrderDetail.query.filter_by(order_id=shopping_car.id, product_id=id).first() is None:
order_detail = OrderDetail(product_count=1, order_id=shopping_car.id, product_id=id)
db.session.add(order_detail)
db.session.commit()
flash("此产品已经成功添加到购物车!", "success")
return redirect(url_for("home"))
else:
flash("此产品已经到购物车了,无需重复添加!", "warning")
return redirect(url_for("home"))
else:
shopping_car = Order(customer_id=current_user.table_id)
db.session.add(shopping_car)
db.session.commit()
if OrderDetail.query.filter_by(order_id=shopping_car.id, product_id=id).first() is None:
order_detail = OrderDetail(product_count=1, order_id=shopping_car.id, product_id=id)
db.session.add(order_detail)
db.session.commit()
flash("此产品已经成功添加到购物车!", "success")
return redirect(url_for("home"))
else:
flash("此产品已经到购物车了,无需重复添加!", "warning")
return redirect(url_for("home"))
@app.route("/customer/delete_product/<int:id>/from_shoppingcar/",methods=["POST","GET"])
@login_required
def delete_product_from_shopping_car(id):
if current_user.table_name != "Customer":
abort(403)
shopping_car = Order.query.filter_by(customer_id=current_user.table_id, status=0).first()
orderdetail = OrderDetail.query.filter_by(order_id=shopping_car.id, product_id=id).first()
db.session.delete(orderdetail)
db.session.commit()
flash("此商品已经成功从购物车删除!", "success")
return redirect(url_for("shopping_car"))
@app.route("/customer/add_num_by_1/<int:id>",methods=["POST","GET"])
@login_required
def add_by_1(id):
if current_user.table_name != "Customer":
abort(403)
shopping_car = Order.query.filter_by(customer_id=current_user.table_id, status="0").first()
orderdetail = OrderDetail.query.filter_by(order_id=shopping_car.id, product_id=id).first()
condition = orderdetail.product_count+1 <= Product.query.filter_by(id=id).first().product_count and orderdetail.product_count>0
while condition:
orderdetail.product_count = orderdetail.product_count+1
db.session.add(orderdetail)
db.session.commit()
flash("成功增加 1 个!!!", "success")
break
return redirect(url_for("shopping_car"))
@app.route("/customer/add_num_by_10/<int:id>",methods=["POST","GET"])
@login_required
def add_by_10(id):
if current_user.table_name != "Customer":
abort(403)
shopping_car = Order.query.filter_by(customer_id=current_user.table_id, status="0").first()
orderdetail = OrderDetail.query.filter_by(order_id=shopping_car.id, product_id=id).first()
condition = orderdetail.product_count+10 <= Product.query.filter_by(
id=id).first().product_count and orderdetail.product_count > 0
while condition:
orderdetail.product_count = orderdetail.product_count+10
db.session.add(orderdetail)
db.session.commit()
flash("成功增加 10 个!!!", "success")
break
return redirect(url_for("shopping_car"))
@app.route("/customer/delete_num_by_1/<int:id>",methods=["POST","GET"])
@login_required
def delete_by_1(id):
if current_user.table_name != "Customer":
abort(403)
shopping_car = Order.query.filter_by(customer_id=current_user.table_id, status="0").first()
orderdetail = OrderDetail.query.filter_by(order_id=shopping_car.id, product_id=id).first()
condition = orderdetail.product_count <= Product.query.filter_by(
id=id).first().product_count and orderdetail.product_count-1 > 0
while condition:
orderdetail.product_count = orderdetail.product_count-1
db.session.add(orderdetail)
db.session.commit()
flash("成功减少 1 个!!!", "success")
break
return redirect(url_for("shopping_car"))
@app.route("/customer/delete_num_by_10/<int:id>",methods=["POST","GET"])
@login_required
def delete_by_10(id):
if current_user.table_name != "Customer":
abort(403)
shopping_car = Order.query.filter_by(customer_id=current_user.table_id, status="0").first()
orderdetail = OrderDetail.query.filter_by(order_id=shopping_car.id, product_id=id).first()
condition = orderdetail.product_count <= Product.query.filter_by(
id=id).first().product_count and orderdetail.product_count -10 > 0
while condition:
orderdetail.product_count = orderdetail.product_count-10
db.session.add(orderdetail)
db.session.commit()
flash("成功减少 10 个!!!", "success")
break
return redirect(url_for("shopping_car"))
@app.route("/customer/confirm_order/<int:id>",methods=["POST","GET"])
@login_required
def confirm_order(id):
if current_user.table_name != "Customer" or \
Order.query.filter_by(id=id).first().customer_id!=current_user.table_id:
abort(403)
address = CustomerDetail.query.filter_by(customer_id=current_user.table_id,is_default=1).first()
if address is None:
flash("你还没有设置默认地址,请你去设置默认地址!!!","warning")
return redirect(url_for("customer_detail_manager",username=current_user.username))
shopping_car = Order.query.filter_by(id=id).first()
for detail in shopping_car.orderdetails:
product = Product.query.filter_by(id=detail.product_id).first()
if detail.product_count > product.product_count:
if product.product_count > 0:
flash("不好意思,刚刚你的货被人给抢走了,没那么多了,请更新你的订单!","warning")
detail.product_count = 1
db.session.add(detail)
db.session.commit()
return redirect(url_for("shopping_car"))
else:
flash("不好意思,刚刚你的货被人给抢走光了,没那么多了,下次早点来吧!", "warning")
row=detail
db.session.delete(row)
db.session.commit()
return redirect(url_for("shopping_car"))
else:
product.product_count = product.product_count-detail.product_count
db.session.add(product)
db.session.commit()
#1 代表付款成功
#将默认地址设置为 订单的地址
default = CustomerDetail.query.filter_by(customer_id=current_user.table_id,is_default=1).first()
order_address = OrderAddress()
order_address.address = default.address
order_address.telephone = default.telephone
order_address.consignee = default.consignee
order_address.order_id = shopping_car.id
db.session.add(order_address)
shopping_car.status = 1
shopping_car.start_time = datetime.now()
print(shopping_car.start_time)
db.session.add(shopping_car)
db.session.commit()
return redirect(url_for("shopping_car"))
@app.route("/show_order_details/<int:id>")
@login_required
def show_order_details(id):
if current_user.table_name == "Customer":
if Order.query.filter_by(id=id).first().customer_id != current_user.table_id:
abort(403)
order_address = OrderAddress.query.filter_by(order_id=id).first()
order = Order.query.filter_by(id=id).first()
orderdetails = order.orderdetails
return render_template("show_order_details.html",order_address=order_address,orderdetails=orderdetails,order=order)
@app.route("/customer/<string:username>/detail/")
@login_required
def customer_detail_manager(username):
user = Customer.query.filter_by(username=username).first_or_404()
if user.username != current_user.username:
abort(403)
return render_template("coustomer_detail_manager.html")
@app.route("/customer/order_manager")
@login_required
def customer_order_manager():
return render_template("customer_order_manager.html")
@app.route("/customer/order_manager/order_waitting")
@login_required
def waitting_orders():
if current_user.table_name != "Customer":
abort(403)
# 这里有一个神BUG,我调了半天都没搞定,Order.query.filter_by(customer_id=1,status=完成付款).all()
# 后来我用了后面的方式,我觉得是我的status的类型的原因,可能中文或者是字符串 就无法比较了吧 所以我
# 打算重新去设置status的类型
# 后来我发现 "完成付款 " 和 "完成付款" 是不一样的!!!我多加了一个空格!!
# 作为字符串他们的长度是不一样的 "完成付款 " == "完成付款" 返回的是False
# 看到这里的时候 我晕菜了 我本不应该犯这样错误
# 所以最好还是自己定义宏变量 这样是最好的 这个错误花了我好久啊
# 1代表付款成功,但尚未发货
orders = Order.query.filter_by(customer_id=current_user.table_id,status=1).all()
return render_template("waitting_orders.html",orders=orders)
@app.route("/customer/order_manager/cancel_order/<int:id>")
@login_required
def cancel_orders(id):
order =Order.query.filter_by(id=id).first()
if current_user.table_name != "Customer" or order.customer_id != current_user.table_id:
abort(403)
if order.status !=1:#确保取消订单之前他的状态确实是付款了,但还没发货的
flash("订单未被取消!订单已经发货了!","danger")
return redirect(url_for("waitting_orders"))
# 把库存换回去,删除订单地址,删除订单
else:
details = order.orderdetails
for detail in details:
product = Product.query.filter_by(id=detail.product_id).first()
product.product_count = product.product_count+detail.product_count
db.session.add(product)
db.session.commit()
db.session.delete(detail)
db.session.commit()
order_address = OrderAddress.query.filter_by(order_id=id).first()
db.session.delete(order_address)
db.session.commit()
db.session.delete(order)
db.session.commit()
flash("您的订单已经取消,退款成功!","success")
return redirect(url_for("waitting_orders"))
@app.route("/customer/order_manager/order_traning")
@login_required
def traning_orders():
if current_user.table_name != "Customer":
abort(403)
#2代表的成功发货的订单
orders = Order.query.filter_by(customer_id=current_user.table_id, status=2).all()
return render_template("traning_orders.html",orders=orders)
@app.route("/customer/order_manager/confirm_order_traning/<int:id>")
@login_required
def confirm_traning_orders(id):
if current_user.table_name != "Customer":
abort(403)
#3代表的成功发货的订单
order = Order.query.filter_by(id=id).first()
order.status = 3
order.end_time = datetime.now()
db.session.add(order)
db.session.commit()
flash("你应经成功收货,欢迎下次继续光临!", "success")
return redirect(url_for("traning_orders"))
@app.route("/customer/order_manager/order_completed")
@login_required
def completed_orders():
if current_user.table_name != "Customer":
abort(403)
#3代表的收货成功的订单,也就是完成了交易
orders = Order.query.filter_by(customer_id=current_user.table_id, status=3).all()
return render_template("completed_orders.html",orders=orders)
@app.route("/customer/detail/new/post/", methods=["GET","POST"])
@login_required
def new_customer_detail():
if current_user.table_name != "Customer":
abort(403)
form = CustomerDetailForm()
if form.validate_on_submit():
print(current_user.table_id)
detail = CustomerDetail(customer_id=current_user.table_id,consignee=form.consignee.data,
telephone=form.telephone.data,address=form.address.data)
db.session.add(detail)
db.session.commit()
flash("添加收获地址成功!", "success")
return redirect(url_for("customer_detail_manager",username=current_user.username))
return render_template("new_customer_detail.html", form=form)
@app.route("/customer/detail/show_all")
@login_required
def show_customer_detail():
if current_user.table_name !="Customer":
abort(403)
details = Customer.query.filter_by(id=current_user.table_id).first_or_404().detail
return render_template("show_customer_detail.html",details=details)
@app.route("/customer/detail/update/<int:id>",methods=["GET","POST"])
@login_required
def update_customer_detail(id):
if current_user.table_name !="Customer":
abort(403)
detail = CustomerDetail.query.filter_by(id=id).first_or_404()
form = UpdateCustomerDetailForm()
if form.validate_on_submit():
detail.consignee = form.consignee.data
detail.address = form.address.data
detail.telephone = form.telephone.data
db.session.commit()
flash("收获地址更新成功!","success")
return redirect(url_for("show_customer_detail"))
elif request.method =="GET":
form.consignee.data = detail.consignee
form.address.data = detail.address
form.telephone.data = detail.telephone
return render_template("update_customer_detail.html",form=form)
@app.route("/customer/detail/delete/<int:id>")
@login_required
def delete_customer_detail(id):
if current_user.table_name !="Customer":
abort(403)
detail = CustomerDetail.query.filter_by(id=id).first_or_404()
db.session.delete(detail)
db.session.commit()
flash("删除收获地址成功","success")
return redirect(url_for("show_customer_detail"))
@app.route("/customer/detail/set_default/<int:id>",methods=["GET","POST"])
@login_required
def set_customer_detail_default(id):
if current_user.table_name != "Customer" or\
CustomerDetail.query.filter_by(id=id).first_or_404().customer_id != current_user.table_id:
abort(403)
detail = CustomerDetail.query.filter_by(customer_id=current_user.table_id, is_default=1).first()
if detail:
detail.is_default = 0
db.session.add(detail)
db.session.commit()
detail1 = CustomerDetail.query.filter_by(id=id).first()
detail1.is_default = 1
db.session.add(detail1)
db.session.commit()
flash("已经更新默认地址!","success")
return redirect(url_for("show_customer_detail"))
@app.route("/security_check",methods=["GET","POST"])
@login_required
def security_check():
if current_user.table_name == "Customer":
form = SecurityCheck()
if form.validate_on_submit():
user = Customer.query.filter_by(id=current_user.table_id).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
flash("身份验证成功", "success")
return redirect(url_for("update_info"))
else:
flash("身份验证失败", "warning")
return render_template("security_check.html", form=form)
elif current_user.table_name == "Supplier":
form = SecurityCheck()
if form.validate_on_submit():
user = Supplier.query.filter_by(id=current_user.table_id).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
flash("身份验证成功", "success")
return redirect(url_for("update_info"))
else:
flash("身份验证失败", "warning")
return render_template("security_check.html", form=form)
elif current_user.table_name == "Crew":
form = SecurityCheck()
if form.validate_on_submit():
user = Crew.query.filter_by(id=current_user.table_id).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
flash("身份验证成功", "success")
return redirect(url_for("update_info"))
else:
flash("身份验证失败", "warning")
return render_template("security_check.html", form=form)
@app.route("/update/info",methods=["GET","POST"])
@login_required
def update_info():
if current_user.table_name == "Customer":
form = UpdateInfo()
role = Customer.query.filter_by(id=current_user.table_id).first_or_404()
if form.validate_on_submit():
role.username = form.username.data
role.email = form.email.data
db.session.add(role)
db.session.commit()
user = User.query.filter_by(id=current_user.id).first_or_404()
user.username = form.username.data
user.email = form.email.data
db.session.add(user)
db.session.commit()
flash('你的邮箱和用户名已经更新了', 'success')
return redirect(url_for("home"))
if request.method == "GET":
form.username.data = role.username
form.email.data = role.email
return render_template("update_info.html", form=form)
elif current_user.table_name == "Crew":
form = UpdateInfo()
role = Crew.query.filter_by(id=current_user.table_id).first_or_404()
if form.validate_on_submit():
role.username = form.username.data
role.email = form.email.data
db.session.add(role)
db.session.commit()
user = User.query.filter_by(id=current_user.id).first_or_404()
user.username = form.username.data
user.email = form.email.data
db.session.add(user)
db.session.commit()
flash('你的邮箱和用户名已经更新了', 'success')
return redirect(url_for("home"))
if request.method == "GET":
form.username.data = role.username
form.email.data = role.email
return render_template("update_info.html", form=form)
elif current_user.table_name == "Supplier":
form = UpdateInfo()
role = Supplier.query.filter_by(id=current_user.table_id).first_or_404()
if form.validate_on_submit():
role.username = form.username.data
role.email = form.email.data
db.session.add(role)
db.session.commit()
user = User.query.filter_by(id=current_user.id).first_or_404()
user.username = form.username.data
user.email = form.email.data
db.session.add(user)
db.session.commit()
flash('你的邮箱和用户名已经更新了', 'success')
return redirect(url_for("home"))
if request.method == "GET":
form.username.data = role.username
form.email.data = role.email
return render_template("update_info.html", form=form)
@app.route("/update/password",methods=["GET","POST"])
@login_required
def update_password():
if current_user.table_name == "Customer":
form = UpdatePasswordForm()
role = Customer.query.filter_by(id=current_user.table_id).first_or_404()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(password=form.password.data).decode("utf-8")
role.password = hashed_password
role.confirm_password = form.confirm_password.data
db.session.add(role)
db.session.commit()
user = User.query.filter_by(id=current_user.id).first_or_404()
user.password = hashed_password
user.confirm_password = form.confirm_password.data
db.session.add(user)
db.session.commit()
flash('你的密码已经更新了', 'success')
return redirect(url_for("home"))
return render_template("update_password.html", form=form)
elif current_user.table_name == "Crew":
form = UpdatePasswordForm()
role = Crew.query.filter_by(id=current_user.table_id).first_or_404()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(password=form.password.data).decode("utf-8")
role.password = hashed_password
role.confirm_password = form.confirm_password.data
db.session.add(role)
db.session.commit()
user = User.query.filter_by(id=current_user.id).first_or_404()
user.password = hashed_password
user.confirm_password = form.confirm_password.data
db.session.add(user)
db.session.commit()
flash('你的密码已经更新了', 'success')
return redirect(url_for("home"))
return render_template("update_password.html", form=form)
elif current_user.table_name == "Supplier":
form = UpdatePasswordForm()
role = Supplier.query.filter_by(id=current_user.table_id).first_or_404()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(password=form.password.data).decode("utf-8")
role.password = hashed_password
role.confirm_password = form.confirm_password.data
db.session.add(role)
db.session.commit()
user = User.query.filter_by(id=current_user.id).first_or_404()
user.password = hashed_password
user.confirm_password = form.confirm_password.data
db.session.add(user)
db.session.commit()
flash('你的密码已经更新了', 'success')
return redirect(url_for("home"))
return render_template("update_password.html", form=form)
@app.route("/supplier/<string:username>/account")
@login_required
def supplier_account(username):
if current_user.table_name != "Supplier":
abort(403)
supplier = Supplier.query.filter_by(id=current_user.table_id).first_or_404()
if supplier.supplier_name == "尚未填写":
flash("请尽快完整你的“商家名称”等信息,完整之后将恢复正常,不用担心!","warning")
return redirect(url_for("update_supplier_info"))
return render_template("supplier_account.html", username=username)
@app.route("/supplier/update_info",methods=["GET","POST"])
@login_required
def update_supplier_info():
if current_user.table_name != "Supplier":
abort(403)
form = UpdateSupplierInfoForm()
supplier = Supplier.query.filter_by(id=current_user.table_id).first_or_404()
if form.validate_on_submit():
supplier.telephone = form.telephone.data
supplier.address = form.address.data
supplier.supplier_name = form.supplier_name.data
supplier.mission = form.mission.data
db.session.add(supplier)
db.session.commit()
flash("你的信息更新成功","success")
elif request.method == "GET":
form.telephone.data = supplier.telephone
form.address.data = supplier.address
form.supplier_name.data = supplier.supplier_name
form.mission.data = supplier.mission
return render_template("update_supplier_info.html", form=form)
@app.route("/supplier/crew_manager")
@login_required
def supplier_crew_manager():
if current_user.table_name != "Supplier":
abort(403)
return render_template("supplier_crew_manager.html")
@app.route("/supplier/product_manager")
@login_required
def supplier_product_manager():
if current_user.table_name != "Supplier":
abort(403)
return render_template("supplier_product_manager.html")
@app.route("/supplier/new_product",methods=["GET","POST"])
@login_required
def supplier_new_product():
if current_user.table_name != "Supplier":
abort(403)
form = ProductForm()
if form.validate_on_submit():
supplier = Supplier.query.filter_by(id=current_user.table_id).first_or_404()
product = Product(name=form.name.data,sort=form.sort.data,price=form.price.data,
detail=form.detail.data,product_count=form.start_count.data)
supplier.products.append(product)
db.session.add(supplier)
db.session.commit()
flash("你的商品添加成功", "success")
return redirect(url_for("show_supplier_product"))
return render_template("supplier_new_product.html",form=form)
@app.route("/supplier/update_product/<int:id>",methods=["GET","POST"])
@login_required
def supplier_update_product(id):
if current_user.table_name != "Supplier" or \
Product.query.filter_by(id=id).first().supplier.first().id != current_user.table_id:
abort(403)
form = UpdateProductForm()
if form.validate_on_submit():
product = Product.query.filter_by(id = id).first()
product.name = form.name.data
product.sort= form.sort.data
product.price = form.price.data
product.detail = form.detail.data
db.session.add(product)
db.session.commit()
flash("你的商品更新成功", "success")
elif request.method == "GET":
product = Product.query.filter_by(id=id).first_or_404()
form.name.data = product.name
form.detail.data = product.detail
form.price.data = product.price
form.sort.data = product.sort
form.detail.data = product.detail
return render_template("supplier_update_product.html", id=id,form=form)
@app.route("/supplier/delete_product/<int:id>",methods=["POST","GET"])
@login_required
def supplier_delete_product(id):
if current_user.table_name != "Supplier" or \
Product.query.filter_by(id=id).first().supplier.first().id != current_user.table_id:
abort(403)
product = Product.query.filter_by(id=id).first()
db.session.delete(product)
db.session.commit()
flash("你的商品删除成功!","success")
return redirect(url_for("show_supplier_product"))
@app.route("/supplier/add_product_count/<int:id>",methods=["POST","GET"])
@login_required
def supplier_add_product_count(id):
if current_user.table_name != "Supplier" or \
Product.query.filter_by(id=id).first().supplier.first().id != current_user.table_id:
abort(403)
product = Product.query.filter_by(id=id).first()
form = AddProductCountForm()
if form.validate_on_submit():
product.product_count = product.product_count+form.count.data
db.session.add(product)
db.session.commit()
flash("已经成功增加库存", "success")
return redirect(url_for("show_supplier_product"))
return render_template("supplier_add_product_count.html",form=form)
@app.route("/supplier/show_product/")
@login_required
def show_supplier_product():
if current_user.table_name != "Supplier":
abort(403)
supplier = Supplier.query.filter_by(id=current_user.table_id).first_or_404()
products = supplier.products.all()
return render_template("show_supplier_product.html",products=products)
@app.route("/supplier/delete_crew/<int:id>")
@login_required
def supplier_delete_crew(id):
crew = Crew.query.filter_by(id = id).first()
if current_user.table_name != "Supplier" or crew.supplier_id != current_user.table_id:
abort(403)
crew.is_employ = 0
crew.supplier_id = -1
db.session.add(crew)
db.session.commit()
flash("雇员解雇成功!","success")
return redirect(url_for("show_supplier_crews"))
@app.route("/supplier/show_crews/")
@login_required
def show_supplier_crews():
if current_user.table_name != "Supplier":
abort(403)
supplier = Supplier.query.filter_by(id=current_user.table_id).first_or_404()
crews = supplier.crews
return render_template("shou_supplier_crews.html",crews=crews)
@app.route("/crew/<string:username>/account/")
@login_required
def crew_account(username):
if current_user.table_name != "Crew":
abort(403)
crew = Crew.query.filter_by(id=current_user.table_id).first_or_404()
if crew.crew_name == "尚未填写"or crew.massage == "这个人很懒,什么也没有写。":
flash("请你先尽快填好你的“正式名称和求职宣言”,更新之后将恢复正常,不用担心", "warning")
return redirect(url_for("update_crew_info"))
crew = Crew.query.filter_by(id=current_user.table_id).first()
supplier = Supplier.query.filter_by(id=crew.supplier_id).first()
return render_template("crew_account.html", username=username,supplier=supplier,crew=crew)
@app.route("/crew/order_manager")
@login_required
def crew_order_manager():
if current_user.table_name != "Crew":
abort(403)
return render_template("crew_order_manager.html")
@app.route("/crew/order_manager/show_confirm_order_waitting")
@login_required
def show_confirm_waitting_orders():
if current_user.table_name != "Crew":
abort(403)
orders = Order.query.filter_by(status=1).all()
return render_template("show_confirm_waitting_orders.html", orders=orders)
@app.route("/crew/order_manager/confirm_order_waitting/<int:id>")
@login_required
def confirm_waitting_orders(id):
if current_user.table_name != "Crew":
abort(403)
order = Order.query.filter_by(id=id).first()
order.status = 2#代表成功发货
db.session.add(order)
db.session.commit()
flash("订单发货成功!","success")
return redirect(url_for("show_confirm_waitting_orders"))
@app.route("/crew/update_info",methods=["GET","POST"])
@login_required
def update_crew_info():
if current_user.table_name != "Crew":
abort(403)
form = UpdateCrewInfoForm()
crew = Crew.query.filter_by(id=current_user.table_id).first_or_404()
if form.validate_on_submit():
crew.telephone = form.telephone.data
crew.address = form.address.data
crew.crew_name = form.crew_name.data
crew.massage = form.massage.data
db.session.add(crew)
db.session.commit()
flash("你的信息更新成功","success")
elif request.method == "GET":
form.telephone.data = crew.telephone
form.address.data = crew.address
form.crew_name.data = crew.crew_name
form.massage.data = crew.massage
return render_template("update_crew_info.html", form=form)
@app.route("/supplier/<int:id>/all_products")
@login_required
def customer_check_supplier_products(id):
supplier = Supplier.query.filter_by(id=id).first()
products =supplier.products.all()
return render_template("customer_check_supplier_products.html",products=products,supplier=supplier)
@app.errorhandler(404)
def page_not_found(e):
template = '''
{%% block body %%}
<div class="center-content error">
<h1>哇哦,This page doesn't exist.</h1>
<h3>%s</h3>
<h3>这里什么都没有呢٩(๑❛ᴗ❛๑)۶</h3>
</div>
{%% endblock %%}
''' % (request.url)
return render_template_string(template), 404
|
import pythonneat.neat.Genome as Genome
import pythonneat.neat.utils.Parameters as Parameters
genes = {}
def get_innovation(connection_gene):
for g in genes.items():
if g[1].in_id == connection_gene.in_id and g[1].out_id == connection_gene.out_id:
return g[0]
innov = len(genes) + 1
genes[innov] = connection_gene
return innov
def compatibility_distance(i, j):
"""Returns the compatibility distance between
organism i and organism j
Inputs:
i - First organism. type: Genome
j - Second organism. type: Genome
"""
# As described in the paper published in 2002 by Stanley O. Brian
N = max(len(i.connection_genes), len(j.connection_genes))
if N < Parameters.DELTA_NORMALIZATION_THRESHOLD:
N = 1
genes = Genome.match_genes(i, j)
E = len(genes[0])
D = len(genes[1])
W = 0
# Calculate weight difference
for g in genes[2]:
W += abs(i.connection_genes[g].weight - j.connection_genes[g].weight)
W /= max(len(genes[2]), 1)
delta = ((Parameters.EXCESS_IMPORTANCE*E)/N) + ((Parameters.DISJOINT_IMPORTANCE*D)/N) + (Parameters.WEIGHT_DIFFERENCE_IMPORTANCE*W)
return delta
|
#!/usr/bin/env python
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fuelmenu.common import dialog
from fuelmenu.common.modulehelper import ModuleHelper
import fuelmenu.common.urwidwrapper as widget
from fuelmenu.settings import Settings
import logging
import re
import subprocess
import urwid
import urwid.raw_display
import urwid.web_display
log = logging.getLogger('fuelmenu.mirrors')
blank = urwid.Divider()
class ntpsetup(urwid.WidgetWrap):
def __init__(self, parent):
self.name = "Time Sync"
self.priority = 60
self.visible = True
self.deployment = "pre"
self.parent = parent
#UI details
self.header_content = ["NTP Setup", "Note: If you continue without "
"NTP, you may have issues with deployment "
"due to time synchronization issues. These "
"problems are exacerbated in virtualized "
"environments."]
self.fields = ["ntpenabled", "NTP1", "NTP2", "NTP3"]
self.defaults = \
{
"ntpenabled": {"label": "Enable NTP:",
"tooltip": "",
"value": "radio"},
"NTP1": {"label": "NTP Server 1:",
"tooltip": "NTP Server for time synchronization",
"value": "time.nist.gov"},
"NTP2": {"label": "NTP Server 2:",
"tooltip": "NTP Server for time synchronization",
"value": "time-a.nist.gov"},
"NTP3": {"label": "NTP Server 3:",
"tooltip": "NTP Server for time synchronization",
"value": "time-b.nist.gov"},
}
#Load info
self.gateway = self.get_default_gateway_linux()
self.oldsettings = self.load()
self.screen = None
def check(self, args):
"""Validate that all fields have valid values and sanity checks."""
self.parent.footer.set_text("Checking data...")
self.parent.refreshScreen()
#Get field information
responses = dict()
for index, fieldname in enumerate(self.fields):
if fieldname == "blank":
pass
elif fieldname == "ntpenabled":
rb_group = self.edits[index].rb_group
if rb_group[0].state:
responses["ntpenabled"] = "Yes"
else:
responses["ntpenabled"] = "No"
else:
responses[fieldname] = self.edits[index].get_edit_text()
###Validate each field
errors = []
warnings = []
if responses['ntpenabled'] == "No":
#Disabled NTP means passing no NTP servers to save method
responses = {
'NTP1': "",
'NTP2': "",
'NTP3': ""}
self.parent.footer.set_text("No errors found.")
log.info("No errors found")
return responses
if all(map(lambda f: (len(responses[f]) == 0), self.fields)):
pass
#We will allow empty if user doesn't need external networking
#and present a strongly worded warning
#msg = "If you continue without NTP, you may have issues with "\
# + "deployment due to time synchronization issues. These "\
# + "problems are exacerbated in virtualized deployments."
#dialog.display_dialog(
# self, widget.TextLabel(msg), "Empty NTP Warning")
del responses['ntpenabled']
for ntpfield, ntpvalue in responses.iteritems():
#NTP must be under 255 chars
if len(ntpvalue) >= 255:
errors.append("%s must be under 255 chars." %
self.defaults[ntpfield]['label'])
#NTP needs to have valid chars
if re.search('[^a-zA-Z0-9-.]', ntpvalue):
errors.append("%s contains illegal characters." %
self.defaults[ntpfield]['label'])
#ensure external NTP is valid
if len(ntpvalue) > 0:
#Validate first NTP address
try:
#Try to test NTP via ntpdate
if not self.checkNTP(ntpvalue):
warnings.append("%s unable to perform NTP."
% self.defaults[ntpfield]['label'])
except Exception:
warnings.append("%s unable to sync time with server.: %s"
% self.defaults[ntpfield]['label'])
if len(errors) > 0:
self.parent.footer.set_text(
"Errors: %s First error: %s" % (len(errors), errors[0]))
log.error("Errors: %s %s" % (len(errors), errors))
return False
else:
if len(warnings) > 0:
msg = ["NTP configuration has the following warnings:"]
msg.extend(warnings)
msg.append("You may see errors during provisioning and "
"in system logs. NTP errors are not fatal.")
warning_msg = '\n'.join(str(line) for line in msg)
dialog.display_dialog(self, widget.TextLabel(warning_msg),
"NTP Warnings")
log.warning(warning_msg)
self.parent.footer.set_text("No errors found.")
log.info("No errors found")
return responses
def apply(self, args):
responses = self.check(args)
if responses is False:
log.error("Check failed. Not applying")
log.error("%s" % (responses))
return False
self.save(responses)
#Apply NTP now
if len(responses['NTP1']) > 0:
#Stop ntpd, run ntpdate, start ntpd
noout = open('/dev/null', 'w')
subprocess.call(["service", "ntpd", "stop"],
stdout=noout, stderr=noout)
subprocess.call(["ntpdate", "-t5", responses['NTP1']],
stdout=noout, stderr=noout)
subprocess.call(["service", "ntpd", "start"],
stdout=noout, stderr=noout)
return True
def cancel(self, button):
ModuleHelper.cancel(self, button)
def get_default_gateway_linux(self):
return ModuleHelper.get_default_gateway_linux()
def load(self):
#Read in yaml
defaultsettings = Settings().read(self.parent.defaultsettingsfile)
oldsettings = defaultsettings
oldsettings.update(Settings().read(self.parent.settingsfile))
oldsettings = Settings().read(self.parent.settingsfile)
for setting in self.defaults.keys():
try:
if setting == "ntpenabled":
rb_group = \
self.edits[self.fields.index("ntpenabled")].rb_group
if oldsettings[setting]["value"] == "Yes":
rb_group[0].set_state(True)
rb_group[1].set_state(False)
else:
rb_group[0].set_state(False)
rb_group[1].set_state(True)
elif "/" in setting:
part1, part2 = setting.split("/")
self.defaults[setting]["value"] = oldsettings[part1][part2]
else:
self.defaults[setting]["value"] = oldsettings[setting]
except Exception:
log.warning("No setting named %s found." % setting)
continue
return oldsettings
def save(self, responses):
## Generic settings start ##
newsettings = dict()
for setting in responses.keys():
if "/" in setting:
part1, part2 = setting.split("/")
if part1 not in newsettings:
#We may not touch all settings, so copy oldsettings first
newsettings[part1] = self.oldsettings[part1]
newsettings[part1][part2] = responses[setting]
else:
newsettings[setting] = responses[setting]
## Generic settings end ##
Settings().write(newsettings,
defaultsfile=self.parent.defaultsettingsfile,
outfn=self.parent.settingsfile)
#Set oldsettings to reflect new settings
self.oldsettings = newsettings
#Update defaults
for index, fieldname in enumerate(self.fields):
if fieldname != "blank" and fieldname in newsettings:
self.defaults[fieldname]['value'] = newsettings[fieldname]
def checkNTP(self, server):
#Note: Python's internal resolver caches negative answers.
#Therefore, we should call dig externally to be sure.
noout = open('/dev/null', 'w')
ntp_works = subprocess.call(["ntpdate", "-q", "-t2", server],
stdout=noout, stderr=noout)
return (ntp_works == 0)
def refresh(self):
self.gateway = self.get_default_gateway_linux()
log.info("refresh. gateway is %s" % self.gateway)
#If gateway is empty, disable NTP
if self.gateway is None:
for index, fieldname in enumerate(self.fields):
if fieldname == "ntpenabled":
log.info("clearing ntp enabled")
log.info("fieldname: %s" % fieldname)
rb_group = self.edits[index].rb_group
rb_group[0].set_state(False)
rb_group[1].set_state(True)
def radioSelect(self, current, state, user_data=None):
"""Update network details and display information."""
### This makes no sense, but urwid returns the previous object.
### The previous object has True state, which is wrong.
### Somewhere in current.group a RadioButton is set to True.
### Our quest is to find it.
for rb in current.group:
if rb.get_label() == current.get_label():
continue
if rb.base_widget.state is True:
self.extdhcp = (rb.base_widget.get_label() == "Yes")
break
def screenUI(self):
return ModuleHelper.screenUI(self, self.header_content, self.fields,
self.defaults)
|
from utils import tune
# for params look here
# https://github.com/facebook/prophet/blob/master/python/prophet/forecaster.py
# docker image gluonts:prophet
algo = dict(
name="ProphetPredictor",
hyperparameters={
"forecaster_name": "gluonts.model.prophet.ProphetPredictor",
},
changing_hyperparameters=dict(
prophet_params=[
dict(growth="linear"),
# {"growth": "logistic"},
# {"growth": "linear", "seasonality_mode": "multiplicative"},
# {"growth": "logistic", "seasonality_mode": "multiplicative"},
],
),
)
tune(algo, datasets=["electricity"])
|
# coding: utf-8
"""
FeatureState.py
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from pprint import pformat
from six import iteritems
class FeatureState(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
FeatureState - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'capability': 'str', # (required parameter)
'is_enabled': 'bool', # (required parameter)
'is_compliant': 'bool', # (required parameter)
'is_within_limits': 'bool', # (required parameter)
'feature_id': 'str', # (required parameter)
'feature_ref': 'str', # (required parameter)
'limit': 'int', # (required parameter)
'duration': 'int', # (required parameter)
'enabled_time': 'int', # (required parameter)
'supported_feature_bundle_id': 'int', # (required parameter)
'permanent_license_applied': 'bool'
}
self.attribute_map = {
'capability': 'capability', # (required parameter)
'is_enabled': 'isEnabled', # (required parameter)
'is_compliant': 'isCompliant', # (required parameter)
'is_within_limits': 'isWithinLimits', # (required parameter)
'feature_id': 'featureId', # (required parameter)
'feature_ref': 'featureRef', # (required parameter)
'limit': 'limit', # (required parameter)
'duration': 'duration', # (required parameter)
'enabled_time': 'enabledTime', # (required parameter)
'supported_feature_bundle_id': 'supportedFeatureBundleId', # (required parameter)
'permanent_license_applied': 'permanentLicenseApplied'
}
self._capability = None
self._is_enabled = None
self._is_compliant = None
self._is_within_limits = None
self._feature_id = None
self._feature_ref = None
self._limit = None
self._duration = None
self._enabled_time = None
self._supported_feature_bundle_id = None
self._permanent_license_applied = None
@property
def capability(self):
"""
Gets the capability of this FeatureState.
This field will contain the value of the premium feature being described.
:return: The capability of this FeatureState.
:rtype: str
:required/optional: required
"""
return self._capability
@capability.setter
def capability(self, capability):
"""
Sets the capability of this FeatureState.
This field will contain the value of the premium feature being described.
:param capability: The capability of this FeatureState.
:type: str
"""
allowed_values = ["none", "sharedVolume", "storagePoolsTo4", "mixedRaidlevel", "autoCodeSync", "autoLunTransfer", "subLunsAllowed", "storagePoolsTo8", "storagePoolsTo2", "storagePoolsToMax", "storagePoolsTo64", "storagePoolsTo16", "snapshots", "remoteMirroring", "volumeCopy", "stagedDownload", "mixedDriveTypes", "goldKey", "driveTrayExpansion", "bundleMigration", "storagePoolsTo128", "storagePoolsTo256", "raid6", "performanceTier", "storagePoolsTo32", "storagePoolsTo96", "storagePoolsTo192", "storagePoolsTo512", "remoteMirrorsTo16", "remoteMirrorsTo32", "remoteMirrorsTo64", "remoteMirrorsTo128", "snapshotsPerVolTo4", "snapshotsPerVolTo8", "snapshotsPerVolTo16", "snapshotsPerVolTo2", "secureVolume", "protectionInformation", "ssdSupport", "driveSlotLimitTo112", "driveSlotLimitTo120", "driveSlotLimitTo256", "driveSlotLimitTo448", "driveSlotLimitTo480", "driveSlotLimitToMax", "driveSlotLimit", "driveSlotLimitTo12", "driveSlotLimitTo16", "driveSlotLimitTo24", "driveSlotLimitTo32", "driveSlotLimitTo48", "driveSlotLimitTo60", "driveSlotLimitTo64", "driveSlotLimitTo72", "driveSlotLimitTo96", "driveSlotLimitTo128", "driveSlotLimitTo136", "driveSlotLimitTo144", "driveSlotLimitTo180", "driveSlotLimitTo192", "driveSlotLimitTo272", "fdeProxyKeyManagement", "remoteMirrorsTo8", "driveSlotLimitTo384", "driveSlotLimitTo300", "driveSlotLimitTo360", "flashReadCache", "storagePoolsType2", "remoteMirroringType2", "totalNumberOfArvmMirrorsPerArray", "totalNumberOfPitsPerArray", "totalNumberOfThinVolumesPerArray", "driveSlotLimitTo240", "snapshotsType2", "targetPortLunMapping", "__UNDEFINED"]
if capability not in allowed_values:
raise ValueError(
"Invalid value for `capability`, must be one of {0}"
.format(allowed_values)
)
self._capability = capability
@property
def is_enabled(self):
"""
Gets the is_enabled of this FeatureState.
A true value in this field indicates that the feature is enabled (regardless of compliance)
:return: The is_enabled of this FeatureState.
:rtype: bool
:required/optional: required
"""
return self._is_enabled
@is_enabled.setter
def is_enabled(self, is_enabled):
"""
Sets the is_enabled of this FeatureState.
A true value in this field indicates that the feature is enabled (regardless of compliance)
:param is_enabled: The is_enabled of this FeatureState.
:type: bool
"""
self._is_enabled = is_enabled
@property
def is_compliant(self):
"""
Gets the is_compliant of this FeatureState.
A true value in this field indicates that the feature has been purchased (in compliance). A false value indicates that the feature has not been purchased. The user will receive warning messages indicating that they are not in compliance. The warning message will continue until the feature is purchased or the feature is disabled.
:return: The is_compliant of this FeatureState.
:rtype: bool
:required/optional: required
"""
return self._is_compliant
@is_compliant.setter
def is_compliant(self, is_compliant):
"""
Sets the is_compliant of this FeatureState.
A true value in this field indicates that the feature has been purchased (in compliance). A false value indicates that the feature has not been purchased. The user will receive warning messages indicating that they are not in compliance. The warning message will continue until the feature is purchased or the feature is disabled.
:param is_compliant: The is_compliant of this FeatureState.
:type: bool
"""
self._is_compliant = is_compliant
@property
def is_within_limits(self):
"""
Gets the is_within_limits of this FeatureState.
This field is deprecated. Use isCompliant field instead.
:return: The is_within_limits of this FeatureState.
:rtype: bool
:required/optional: required
"""
return self._is_within_limits
@is_within_limits.setter
def is_within_limits(self, is_within_limits):
"""
Sets the is_within_limits of this FeatureState.
This field is deprecated. Use isCompliant field instead.
:param is_within_limits: The is_within_limits of this FeatureState.
:type: bool
"""
self._is_within_limits = is_within_limits
@property
def feature_id(self):
"""
Gets the feature_id of this FeatureState.
This field contains the value of the feature ID associated with the feature.
:return: The feature_id of this FeatureState.
:rtype: str
:required/optional: required
"""
return self._feature_id
@feature_id.setter
def feature_id(self, feature_id):
"""
Sets the feature_id of this FeatureState.
This field contains the value of the feature ID associated with the feature.
:param feature_id: The feature_id of this FeatureState.
:type: str
"""
allowed_values = ["volumesPerPartition", "totalNumberOfVolumes", "storagePartitions", "snapshot", "volumeCopy", "remoteMirroring", "driveTrayExpansion", "mixedDriveTypes", "mgmtApplication", "supportedDrives", "supportedDriveTrays", "performanceTier", "totalNumberOfSnapshots", "totalNumberOfVolCopies", "goldKey", "snapshotsPerVolume", "totalNumberOfMirrors", "raid6", "stateCapture", "sataStrLen", "secureVolume", "protectionInformation", "solidStateDisk", "driveSlotLimit", "fdeProxyKeyManagement", "supportedInterposer", "vendorSupportedDrives", "flashReadCache", "totalNumberOfAsyncMirrorGroups", "totalNumberOfAsyncMirrorsPerGroup", "totalNumberOfArvmMirrorsPerArray", "totalNumberOfPitsPerArray", "pitGroupsPerVolume", "totalNumberOfPitGroups", "pitsPerPitGroup", "memberVolsPerPitConsistencyGroup", "totalNumberOfPitConsistencyGroups", "totalNumberOfPitViews", "totalNumberOfThinVolumesPerArray", "nativeSataDriveSupport", "solidStateDiskLimit", "totalNumberOfRemoteMirrorsPerArray", "asup", "ectSelector", "embeddedSnmpOid", "asupOnDemand", "dacstoreCompatId", "samoaHicProtocol", "targetPortLunMapping", "hildaBaseboardProtocol", "denali2Protocol", "__UNDEFINED"]
if feature_id not in allowed_values:
raise ValueError(
"Invalid value for `feature_id`, must be one of {0}"
.format(allowed_values)
)
self._feature_id = feature_id
@property
def feature_ref(self):
"""
Gets the feature_ref of this FeatureState.
This field contains a reference to the feature.
:return: The feature_ref of this FeatureState.
:rtype: str
:required/optional: required
"""
return self._feature_ref
@feature_ref.setter
def feature_ref(self, feature_ref):
"""
Sets the feature_ref of this FeatureState.
This field contains a reference to the feature.
:param feature_ref: The feature_ref of this FeatureState.
:type: str
"""
self._feature_ref = feature_ref
@property
def limit(self):
"""
Gets the limit of this FeatureState.
A numerical description associated with this FeatureState object, describing the level or tier for this feature. A zero value represents a feature that does not support tiering. Non-zero implies a tiering level.
:return: The limit of this FeatureState.
:rtype: int
:required/optional: required
"""
return self._limit
@limit.setter
def limit(self, limit):
"""
Sets the limit of this FeatureState.
A numerical description associated with this FeatureState object, describing the level or tier for this feature. A zero value represents a feature that does not support tiering. Non-zero implies a tiering level.
:param limit: The limit of this FeatureState.
:type: int
"""
self._limit = limit
@property
def duration(self):
"""
Gets the duration of this FeatureState.
Describes the timeframe this feature will be available. A zero value describes an infinite duration. The unit of duration is in days.
:return: The duration of this FeatureState.
:rtype: int
:required/optional: required
"""
return self._duration
@duration.setter
def duration(self, duration):
"""
Sets the duration of this FeatureState.
Describes the timeframe this feature will be available. A zero value describes an infinite duration. The unit of duration is in days.
:param duration: The duration of this FeatureState.
:type: int
"""
self._duration = duration
@property
def enabled_time(self):
"""
Gets the enabled_time of this FeatureState.
Describes the time when this feature was enabled.
:return: The enabled_time of this FeatureState.
:rtype: int
:required/optional: required
"""
return self._enabled_time
@enabled_time.setter
def enabled_time(self, enabled_time):
"""
Sets the enabled_time of this FeatureState.
Describes the time when this feature was enabled.
:param enabled_time: The enabled_time of this FeatureState.
:type: int
"""
self._enabled_time = enabled_time
@property
def supported_feature_bundle_id(self):
"""
Gets the supported_feature_bundle_id of this FeatureState.
The supported Feature Bundle ID. When not in use this value will be zero.
:return: The supported_feature_bundle_id of this FeatureState.
:rtype: int
:required/optional: required
"""
return self._supported_feature_bundle_id
@supported_feature_bundle_id.setter
def supported_feature_bundle_id(self, supported_feature_bundle_id):
"""
Sets the supported_feature_bundle_id of this FeatureState.
The supported Feature Bundle ID. When not in use this value will be zero.
:param supported_feature_bundle_id: The supported_feature_bundle_id of this FeatureState.
:type: int
"""
self._supported_feature_bundle_id = supported_feature_bundle_id
@property
def permanent_license_applied(self):
"""
Gets the permanent_license_applied of this FeatureState.
TRUE if a permanent license for this feature has been applied. FALSE if the feature is being evaluated or is not enabled at all.
:return: The permanent_license_applied of this FeatureState.
:rtype: bool
:required/optional: required
"""
return self._permanent_license_applied
@permanent_license_applied.setter
def permanent_license_applied(self, permanent_license_applied):
"""
Sets the permanent_license_applied of this FeatureState.
TRUE if a permanent license for this feature has been applied. FALSE if the feature is being evaluated or is not enabled at all.
:param permanent_license_applied: The permanent_license_applied of this FeatureState.
:type: bool
"""
self._permanent_license_applied = permanent_license_applied
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
if self is None:
return None
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if self is None or other is None:
return None
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
from register import *
import tkMessageBox
import sqlite3
from login_page import *
from request_pizza import *
from menu import *
def welcome():
root=Tk()
root.geometry("1600x800+0+0")
root.title("Pizza Delivery system")
root.configure(bg="white")
Tops=Frame(root, width=1600, height=50, bg="white")
Tops.pack(side=TOP)
f1=Frame(root, width=800, height=700, bg="white")
f1.pack(side=TOP)
lblinfo = Label(Tops, font=('arial', 50 , 'bold' ), text="PIZZA DELIVERY SYSTEM" , fg="blue",bg="yellow", bd=10,anchor='w')
lblinfo.grid(row=0,column=0)
lab=Label(f1,text="\n\n\n\n\n\n\n\n",bg="white")
lab.grid(row=0,column=0)
L1 = Label(f1, text="WELCOME USER!!!",font=('arial',15,'bold'),fg="blue",bg="white")
L1.grid(row=1,column=1)
lab=Label(f1,text="\t\t\t",bg="white")
lab.grid(row=2,column=1)
B4=Button(f1,text="Order",width=20,height=4,bd=5,font=('arial',10,'bold'),fg="white",bg="blue",relief="raised",command=order)
B4.grid(row=3,column=1)
lab=Label(f1,text="\t\t\t",bg="white")
lab.grid(row=3,column=2)
B5=Button(f1,text="PIZZA MENU",width=20,height=4,bd=5,font=('arial',10,'bold'),fg="white",bg="blue",relief="raised",command=pizza_menu)
B5.grid(row=5,column=1)
lab=Label(f1,text="\t\t\t",bg="white")
lab.grid(row=4,column=2)
root.mainloop()
if __name__=="__main__":
login()
|
# Generated by Django 3.0.8 on 2020-09-30 15:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chat', '0016_messagephoto_name_of_file'),
]
operations = [
migrations.AddField(
model_name='message',
name='image_message',
field=models.ImageField(blank=True, null=True, upload_to='chat/pictures_of_messages', verbose_name='Picture of message'),
),
]
|
# 3. Информация за скоростта
# Да се напише програма, която чете скорост (реално число), въведена от потребителя и отпечатва информация за скоростта.
# При скорост до 10 (включително) отпечатайте “slow”. При скорост над 10 и до 50 отпечатайте “average”.
# При скорост над 50 и до 150 отпечатайте “fast”. При скорост над 150 и до 1000 отпечатайте “ultra fast”. При по-висока скорост отпечатайте “extremely fast”. Примери:
speed = float(input())
if speed <= 10:
print(f'slow')
elif 10 < speed <= 50:
print(f'average')
elif 50 < speed <= 150:
print(f'fast')
elif 150 < speed <= 1000:
print(f'ultra fast')
elif speed > 1000:
print(f'extremely fast') |
# -*- coding: utf-8 -*-
__author__ = "苦叶子"
"""
公众号: 开源优测
Email: lymking@foxmail.com
"""
import os
import platform
import codecs
from flask import render_template, send_file, current_app
from flask_login import login_required, current_user, logout_user
from . import main
from ..utils.runner import run_process, debug_run
from ..utils.report import Report
@main.route('/', methods=['GET'])
def index():
return render_template('index.html')
@login_required
@main.route('/dashboard', methods=['GET'])
def dashboard():
return render_template('dashboard.html', user=current_user)
@login_required
@main.route('/logout', methods=['GET'])
def logout():
logout_user()
return render_template('index.html')
@login_required
@main.route('/user', methods=['GET'])
def user():
return render_template('user.html', user=current_user)
@login_required
@main.route('/help', methods=['GET'])
def help():
return render_template('help.html')
@login_required
@main.route('/product', methods=['GET'])
def product():
return render_template('product.html', user=current_user)
@login_required
@main.route('/project', methods=['GET'])
def project():
return render_template('project.html', user=current_user)
@login_required
@main.route('/task/<id>', methods=['GET'])
def task(id):
return render_template('task.html', id=id)
@login_required
@main.route('/task_list', methods=['GET'])
def task_list():
return render_template('task_list.html')
@login_required
@main.route('/manage/<category>/<id>', methods=['GET'])
def manage(category, id):
return render_template('%s.html' % category, id=id)
@login_required
@main.route('/test_run/<category>/<id>', methods=['GET'])
def test_run(category, id):
status = run_process(category, id)
return status
@main.route('/debug/<id>', methods=['GET'])
def debug(id):
project_id, build_no = debug_run(id)
log_path = os.getcwd() + "/logs/%s/%s/debug.log" % (project_id, build_no)
logs = "还没捕获到调试信息^_^"
if os.path.exists(log_path):
f = codecs.open(log_path, "r", "utf-8")
logs = f.read()
f.close()
return render_template('debug.html', logs=logs)
@login_required
@main.route('/report/<project_id>/<build_no>', methods=['GET'])
def report(project_id, build_no):
r = Report(project_id, build_no)
return r.build_report()
@login_required
@main.route('/run_logs/<project_id>/<build_no>', methods=['GET'])
def run_logs(project_id, build_no):
log_path = os.getcwd() + "/logs/%s/%s/logs.log" % (project_id, build_no)
log_path = log_path.replace("\\", "/")
logs = "还没捕获到日志信息^_^"
if os.path.exists(log_path):
if "Windows" in platform.platform():
f = codecs.open(log_path, "r", "cp936")
else:
f = codecs.open(log_path, "r", "utf-8")
logs = f.read()
#print(logs)
f.close()
"""
app = current_app._get_current_object()
for r in app.config["RUNNERS"]:
p = r["runner"]
if p._process.returncode == 0:
print('Subprogram success')
app.config["RUNNERS"].remove(r)
"""
return render_template('logs.html', logs=logs)
@login_required
@main.route('/detail/<project_id>/<build_no>', methods=['POST'])
def detail(project_id, build_no):
r = Report(project_id, build_no)
import json
return json.dumps(r.parser_detail_info())
@login_required
@main.route('/view_image/<project_id>/<build_no>/<filename>', methods=['GET'])
def view_image(project_id, build_no, filename):
img_path = os.getcwd() + "/logs/%s/%s/images/%s" % (project_id, build_no, filename)
img_path.replace("\\", "/")
if os.path.exists(img_path):
return send_file(img_path)
return "截图失败,木有图片!!!"
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import torch
from utils.utils import permute_and_flatten, decode
from utils.box_list import BoxList, cat_boxlist, boxlist_ml_nms, boxlist_iou
import pdb
def select_over_all_levels(cfg, box_list):
result_batch = []
for i in range(len(box_list)):
result = boxlist_ml_nms(box_list[i], cfg.nms_iou_thre) # multi-class nms
num_detections = result.box.shape[0]
# Limit to max_per_image detections **over all classes**
if num_detections > cfg.max_detections > 0:
score = result.score
image_thre, _ = torch.kthvalue(score.cpu(), num_detections - cfg.max_detections + 1)
keep = score >= image_thre.item()
keep = torch.nonzero(keep).squeeze(1)
result = result[keep]
if cfg.test_score_voting:
boxes_al = box_list[i].box
boxlist = box_list[i]
labels = box_list[i].label
scores = box_list[i].score
sigma = 0.025
result_labels = result.label
for j in range(1, cfg.num_classes):
inds = (labels == j).nonzero().reshape(-1)
scores_j = scores[inds]
boxes_j = boxes_al[inds, :].reshape(-1, 4)
boxlist_for_class = BoxList(boxes_j, boxlist.img_size, mode="x1y1x2y2")
result_inds = (result_labels == j).nonzero().reshape(-1)
boxlist_nmsed = result[result_inds]
ious = boxlist_iou(boxlist_nmsed, boxlist_for_class)
voted_boxes = []
for bi in range(boxlist_nmsed.box.shape[0]):
cur_ious = ious[bi]
pos_inds = (cur_ious > 0.01).nonzero().squeeze(1)
pos_ious = cur_ious[pos_inds]
pos_boxes = boxlist_for_class.box[pos_inds]
pos_scores = scores_j[pos_inds]
pis = (torch.exp(-(1 - pos_ious) ** 2 / sigma) * pos_scores).unsqueeze(1)
voted_box = torch.sum(pos_boxes * pis, dim=0) / torch.sum(pis, dim=0)
voted_boxes.append(voted_box.unsqueeze(0))
if voted_boxes:
result.box[result_inds] = torch.cat(voted_boxes, dim=0)
result_batch.append(result)
return result_batch
def post_process(cfg, c_batch, box_batch, iou_batch, anchors, resized_size):
total_boxes = []
for c_fpn, box_fpn, iou_fpn, anchor_fpn in zip(c_batch, box_batch, iou_batch, anchors):
N, _, H, W = c_fpn.shape
A = box_fpn.size(1) // 4 # 'A' means num_anchors per location
C = c_fpn.size(1) // A
c_fpn = permute_and_flatten(c_fpn, N, A, C, H, W) # shape: (n, num_anchor, 80)
c_fpn = c_fpn.sigmoid()
box_fpn = permute_and_flatten(box_fpn, N, A, 4, H, W) # shape: (n, num_anchor, 4)
box_fpn = box_fpn.reshape(N, -1, 4)
iou_fpn = permute_and_flatten(iou_fpn, N, A, 1, H, W)
iou_fpn = iou_fpn.reshape(N, -1).sigmoid()
# multiply classification and IoU to get the score
score_fpn = (c_fpn * iou_fpn[:, :, None]).sqrt()
# use class score to do the pre-threshold
candi_i_fpn = c_fpn > cfg.nms_score_thre # TODO: if use score_fpn to do score threshold?
nms_topk_fpn = candi_i_fpn.reshape(N, -1).sum(dim=1)
nms_topk_fpn = nms_topk_fpn.clamp(max=cfg.nms_topk)
results = []
for score, box, nms_topk, candi_i, size in zip(score_fpn, box_fpn, nms_topk_fpn, candi_i_fpn, resized_size):
score = score[candi_i] # TODO: too much thre is not elegant, too handcrafted
score, topk_i = score.topk(nms_topk, sorted=False) # use score to get the topk
candi_i = candi_i.nonzero()[topk_i, :]
box_selected = box[candi_i[:, 0], :].reshape(-1, 4)
anchor_selected = anchor_fpn.box[candi_i[:, 0], :].reshape(-1, 4)
anchor_selected = anchor_selected.cuda()
box_decoded = decode(box_selected, anchor_selected)
boxlist = BoxList(box_decoded, size, mode='x1y1x2y2', label=candi_i[:, 1] + 1, score=score)
boxlist.clip_to_image(remove_empty=False)
boxlist.remove_small_box(min_size=0)
results.append(boxlist)
total_boxes.append(results)
box_list_fpn_batch = list(zip(*total_boxes)) # bind together the fpn box_lists which belong to the same batch
box_list_batch = [cat_boxlist(aa) for aa in box_list_fpn_batch]
return select_over_all_levels(cfg, box_list_batch)
|
# Author: Mike 'Fuzzy' Partin
# Copyright: (c) 2016-2018
# Email: fuzzy@fumanchu.org
# License: See LICENSE.md for details
# Stdlib imports
import os
import json
# Internal imports
from mvm.term import *
from mvm.package import *
class MvmCmdList(object):
def ListPackages(self, installed=True, output=True):
# TODO: list available packages
self.instPkgs = []
if installed:
if output:
print('%s %s%s\n' % (OutputWord('installed'),
OutputWord('packages'),
cyan(':')))
pkgs = os.listdir(self.config.dirs['instroot'])
pkgs.sort()
longest = 0
for pkg in pkgs:
if len(pkg) > longest:
longest = len(pkg)
longest = longest+3
pkgs.sort()
for pkg in pkgs:
oput = '%s%s%s(' % (cyan(pkg[0].upper()),
white(pkg[1:]),
' '*(longest - len(pkg)))
pkgPath = '%s/%s/%s/%s' % (self.config.dirs['instroot'],
pkg,
self.sysInfo['os']['name'].lower(),
self.sysInfo['os']['arch'])
if os.path.isdir(pkgPath):
versions = os.listdir(pkgPath)
versions.sort()
for vers in versions:
tobj = Package(self.config, pkgPath+'/'+vers)
oput += tobj.Display()
self.instPkgs.append(tobj)
if pkg != pkgs[-1:][0]: oput += ', '
if output:
if oput[-2:] == ', ':
print(oput[:-2]+')')
else:
print(oput+')')
if output:
print('\n %s = Session, %s = Global' % (cyan('*'), green('*')))
else:
if output:
print('%s %s%s\n' % (OutputWord('available'),
OutputWord('packages'),
cyan(':')))
pkgs = os.listdir(self.config.dirs.pkgspecs)
pkgs.sort()
longest = 0
for pkg in pkgs:
if len(pkg) > longest:
longest = len(pkg)
for pkg in pkgs:
pdir = self.config.dirs.pkgspecs+'/'+pkg
if pkg[0] != '.' and os.path.isdir(pdir):
vers = []
for v in os.listdir(pdir):
if os.path.isfile(pdir+'/'+v) and v[-5:] == '.json':
vers.append(v)
oput = '%s%s%s(' % (cyan(pkg[0].upper()),
white(pkg[1:]),
' '*(longest - len(pkg)))
vers.sort()
for v in vers:
oput += blue(v.split('.jso')[0])
if v != vers[-1:][0]:
oput += ', '
if output: print(oput+')')
|
import pytest
from py_ecc.optimized_bls12_381 import (
G1,
G2,
multiply,
)
from py_ecc.bls.g2_primatives import (
G1_to_pubkey,
G2_to_signature,
)
from py_ecc.bls import G2Basic
# Tests taken from EIP 2333 https://eips.ethereum.org/EIPS/eip-2333
@pytest.mark.parametrize(
'ikm,result_sk',
[
(bytes.fromhex('c55257c360c07c72029aebc1b53c05ed0362ada38ead3e3e9efa3708e53495531f09a6987599d18264c1e1c92f2cf141630c7a3c4ab7c81b2f001698e7463b04'), 12513733877922233913083619867448865075222526338446857121953625441395088009793),
(bytes.fromhex('3141592653589793238462643383279502884197169399375105820974944592'), 46029459550803682895343812821003080589696405386150182061394330539196052371668),
(bytes.fromhex('0099FF991111002299DD7744EE3355BBDD8844115566CC55663355668888CC00'), 45379166311535261329029945990467475187325618028073620882733843918126031931161),
(bytes.fromhex('d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3'), 31740500954810567003972734830331791822878290325762596213711963944729383643688),
]
)
def test_key_gen(ikm, result_sk):
_, sk = G2Basic.KeyGen(ikm)
assert sk == result_sk
@pytest.mark.parametrize(
'pubkey,success',
[
(G2Basic.PrivToPub(42), True),
(b'11' * 48, False),
]
)
def test_key_validate(pubkey, success):
assert G2Basic.KeyValidate(pubkey) == success
@pytest.mark.parametrize(
'privkey',
[
(1),
(5),
(124),
(735),
(127409812145),
(90768492698215092512159),
(0),
]
)
def test_sign_verify(privkey):
msg = str(privkey).encode('utf-8')
pub = G2Basic.PrivToPub(privkey)
sig = G2Basic._CoreSign(privkey, msg, G2Basic.DST)
assert G2Basic._CoreVerify(pub, msg, sig, G2Basic.DST)
@pytest.mark.parametrize(
'signature_points,result_point',
[
([multiply(G2, 2), multiply(G2, 3)], multiply(G2, 2 + 3)),
([multiply(G2, 42), multiply(G2, 69)], multiply(G2, 42 + 69)),
]
)
def test_aggregate(signature_points, result_point):
signatures = [G2_to_signature(pt) for pt in signature_points]
result_signature = G2_to_signature(result_point)
assert G2Basic.Aggregate(signatures) == result_signature
@pytest.mark.parametrize(
'SKs,messages',
[
(range(5), range(5)),
]
)
def test_core_aggregate_verify(SKs, messages):
PKs = [G2Basic.PrivToPub(sk) for sk in SKs]
messages = [bytes(msg) for msg in messages]
signatures = [G2Basic._CoreSign(sk, msg, G2Basic.DST) for sk, msg in zip(SKs, messages)]
aggregate_signature = G2Basic.Aggregate(signatures)
assert G2Basic._CoreAggregateVerify(zip(PKs, messages), aggregate_signature, G2Basic.DST)
|
import unittest
from flask import Flask
from beamdice import dice, create_app
REPETITIONS = 100
class TestDice(unittest.TestCase):
def test_roll(self):
"""
Die rolls should be random. That is, over enough repetitions, the
die roll should be something other than 6.
"""
non_six = False
for _ in range(REPETITIONS):
roll = dice.roll()
if roll != 6:
non_six = True
self.assertTrue(non_six, "The roller should eventually roll something other than 6.")
def test_roll_distribution(self):
"""
Dice should be able to roll all values in the 1 through 6 range,
but never any values outside that range.
"""
counts = [
0, # zero: never!
0, # one
0, # two
0, # three
0, # four
0, # five
0, # six
]
for _ in range(REPETITIONS):
roll = dice.roll()
self.assertGreaterEqual(roll, 1)
self.assertLessEqual(roll, 6)
counts[roll] += 1
for roll in [1, 2, 3, 4, 5, 6]:
count = counts[roll]
self.assertGreater(count, 0, f"Never saw {roll} (count={count})")
def test_create_app(self):
"""
For the sake of coverage, make sure that Flask app creation does
the right thing.
"""
app = create_app()
self.assertIsInstance(app, Flask)
def test_roll_endpoint(self):
"""
For the sake of coverage, make sure that GET requests to the dice
endpoint return correct JSON.
"""
app = create_app()
app.testing = True
client = app.test_client()
response = client.get('/dice')
result_json = str(response.data, 'utf-8')
self.assertRegex(result_json, r'{"total":[1-6]}')
|
import math
import os
import struct
import unittest
import sycomore
from sycomore.units import *
class TestGRE(unittest.TestCase):
def setUp(self):
self.species = sycomore.Species(1000*ms, 100*ms, 0.89*um*um/ms)
self.m0 = sycomore.Magnetization(0, 0, 1)
self.flip_angle = 40*deg
self.pulse_duration = 1*ms
self.pulse_support_size = 101
self.zero_crossings = 2
self.TR = 500*ms
self.slice_thickness = 1*mm
self.TR_count = 10
def test_ideal(self):
model = sycomore.como.Model(
self.species, self.m0,
[["half_echo", sycomore.TimeInterval(self.TR/2.)]])
magnetization = []
for i in range(self.TR_count):
model.apply_pulse(
sycomore.Pulse(self.flip_angle, (math.pi/3+(i%2)*math.pi)*rad))
model.apply_time_interval("half_echo")
magnetization.append(model.isochromat())
model.apply_time_interval("half_echo")
root = os.environ["SYCOMORE_TEST_DATA"]
with open(os.path.join(root, "baseline", "GRE_ideal.dat"), "rb") as fd:
contents = fd.read()
baseline = struct.unpack((int(len(contents)/8))*"d", contents)
self.assertEqual(len(baseline), 3*self.TR_count)
for i in range(self.TR_count):
m_test = magnetization[i]
m_baseline = baseline[3*i:3*(i+1)]
self.assertAlmostEqual(m_test[0], m_baseline[0])
self.assertAlmostEqual(m_test[1], m_baseline[1])
self.assertAlmostEqual(m_test[2], m_baseline[2])
def test_real(self):
t0 = self.pulse_duration/(2*self.zero_crossings)
sinc_pulse = sycomore.HardPulseApproximation(
sycomore.Pulse(self.flip_angle, 0*rad),
sycomore.linspace(self.pulse_duration, self.pulse_support_size),
sycomore.sinc_envelope(t0), 1/t0, self.slice_thickness, "rf")
half_echo = sycomore.TimeInterval(
(self.TR-self.pulse_duration)/2.,
-sinc_pulse.get_gradient_moment()/2)
model = sycomore.como.Model(
self.species, self.m0, [
["rf", sinc_pulse.get_time_interval()],
["half_echo", half_echo]])
magnetization = []
for i in range(self.TR_count):
sinc_pulse.set_phase((math.pi/3+(i%2)*math.pi)*rad)
model.apply_pulse(sinc_pulse)
model.apply_time_interval("half_echo")
magnetization.append(model.isochromat())
model.apply_time_interval("half_echo")
root = os.environ["SYCOMORE_TEST_DATA"]
with open(os.path.join(root, "baseline", "GRE_real.dat"), "rb") as fd:
contents = fd.read()
baseline = struct.unpack((int(len(contents)/8))*"d", contents)
self.assertEqual(len(baseline), 3*self.TR_count)
for i in range(self.TR_count):
m_test = magnetization[i]
m_baseline = baseline[3*i:3*(i+1)]
self.assertAlmostEqual(m_test[0], m_baseline[0])
self.assertAlmostEqual(m_test[1], m_baseline[1])
self.assertAlmostEqual(m_test[2], m_baseline[2])
if __name__ == "__main__":
unittest.main()
|
# Copyright 2020 The Measurement System Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains local settings for building and deploying the codebase.
It is in .gitignore; please don't commit your local changes.
"""
# Settings for the repository where Docker images are stored.
# The image path is the container_registry, the prefix, and an
# image-specific suffix joined with slashes.
IMAGE_REPOSITORY_SETTINGS = struct(
# URL of the container registry.
container_registry = "",
# Common prefix of all images.
repository_prefix = "",
)
# Settings for deploying tests to Google Cloud.
TEST_GOOGLE_CLOUD_SETTINGS = struct(
spanner_project = "",
spanner_instance = "",
cloud_storage_project = "",
cloud_storage_bucket = "",
)
|
import http.client
import time
def readConn(addr, func):
client = http.client.HTTPConnection("localhost:8080")
while True:
try:
client.request("GET", "/" + addr, "ready")
response = client.getresponse().read().decode()
func(response)
except (ConnectionRefusedError, http.client.RemoteDisconnected):
print("Connection refused, maybe server isn't running.")
print("Trying again in 1s ...")
client = http.client.HTTPConnection("localhost:8080")
time.sleep(1)
def writeConn(addr, func):
while True:
client = http.client.HTTPConnection("localhost:8080")
client.request("GET", "/" + addr, func())
|
import asyncio
import functools
import uuid
import uvloop
from asyncio import Task
from random import randint
from typing import Dict, Any
import time
from tonga.services.coordinator.async_coordinator import async_coordinator
class FailToFindKey(KeyError):
pass
class DBIsLocked(Exception):
pass
async def send_store_record(record: str):
# print('Send record : ', record)
return 'ack'
class MicroDB:
_db: Dict[str, bytes]
_lock: Dict[str, bool]
_communicator: async_coordinator.AsyncCoordinator
def __init__(self):
self._db = dict()
self._lock = dict()
self._communicator = async_coordinator.AsyncCoordinator(maxsize=0, loop=asyncio.get_event_loop())
def __lock_db(self, key: str) -> None:
self._communicator.put_nowait(key, True)
self._lock[key] = True
def __unlock_db(self, key: str, f: Task) -> None:
self._communicator.put_nowait(key, False)
async def get_lock(self) -> Dict[str, bool]:
return self._lock.copy()
async def __store_set(self, key: str, value: bytes):
# print(f'Start store set {key}')
self._db[key] = value
# print(f'End store set {key}')
async def __store_del(self, key: str):
# print(f'Start store del {key}')
del self._db[key]
# print(f'End store del {key}')
async def set(self, key: str, value: bytes) -> str:
await send_store_record(key + '-' + value.decode('utf-8'))
self.__lock_db(key)
fu = asyncio.ensure_future(self.__store_set(key, value), loop=asyncio.get_event_loop())
fu.add_done_callback(functools.partial(self.__unlock_db, key))
return 'ack'
async def delete(self, key: str) -> str:
await send_store_record(key + '-' + 'del')
self.__lock_db(key)
fu = asyncio.ensure_future(self.__store_del(key), loop=asyncio.get_event_loop())
fu.add_done_callback(functools.partial(self.__unlock_db, key))
return 'ack'
async def get(self, key: str) -> bytes:
await self.__ready(key)
try:
# print(self._communicator.get_dict_copy())
return self._db[key]
except KeyError:
raise FailToFindKey
async def __ready(self, key) -> None:
try:
if self._lock[key]:
# print('In ready func for key: ', key)
self._lock[key] = await self._communicator.get(key)
# print('End lock = ', self._lock[key])
except KeyError:
raise FailToFindKey
class StoreRecord:
_key: str
_value: bytes
def __init__(self, key: str, value: bytes):
self._key = key
self._value = value
@property
def key(self):
return self._key
@property
def value(self):
return self._key
def to_dict(self) -> Dict[str, Any]:
return {'key': self._key, 'value': self._value}
@classmethod
def from_dict(cls, data):
return cls(data['key'], data['value'])
async def main():
# Create a "cancel_me" Task
micro_db = MicroDB()
# print(f'Dev try to get record...')
#
# try:
# await micro_db.get('test')
# except FailToFindKey:
# print('Logic')
#
# print(f'Dev send record')
# r1 = StoreRecord(uuid.uuid4().hex, b'value1')
# r2 = StoreRecord(uuid.uuid4().hex, b'value2')
# r3 = StoreRecord(uuid.uuid4().hex, b'value3')
# r4 = StoreRecord(uuid.uuid4().hex, b'value4')
#
# res = await asyncio.gather(asyncio.create_task(micro_db.set(**r1.to_dict())),
# asyncio.create_task(micro_db.set(**r2.to_dict())),
# asyncio.create_task(micro_db.set(**r3.to_dict())),
# asyncio.create_task(micro_db.set(**r4.to_dict())))
#
# print('Dev receive for ack1: ', res)
#
# rel = await asyncio.gather(asyncio.create_task(micro_db.get(r1.key)),
# asyncio.create_task(micro_db.get(r2.key)),
# asyncio.create_task(micro_db.get(r3.key)),
# asyncio.create_task(micro_db.get(r4.key)))
#
# print('Dev get r1-r4 : ', rel)
#
# rel = await asyncio.gather(asyncio.create_task(micro_db.delete(r1.key)),
# asyncio.create_task(micro_db.delete(r2.key)),
# asyncio.create_task(micro_db.delete(r3.key)),
# asyncio.create_task(micro_db.delete(r4.key)))
#
# print('Dev get r1-r4 : ', rel)
#
# try:
# await micro_db.get(r1.key)
# except FailToFindKey:
# print('Logic R1')
#
# try:
# await micro_db.get(r2.key)
# except FailToFindKey:
# print('Logic R2')
#
# try:
# await micro_db.get(r3.key)
# except FailToFindKey:
# print('Logic R3')
#
# try:
# await micro_db.get(r4.key)
# except FailToFindKey:
# print('Logic R4')
tic = time.clock()
records = list()
for i in range(0, 1000000):
records.append(StoreRecord(uuid.uuid4().hex, b'value-' + str(i).encode('utf-8')))
ret2 = await asyncio.gather(*[asyncio.create_task(micro_db.set(**record.to_dict())) for record in records])
# print(ret2)
ret3 = await asyncio.gather(*[asyncio.create_task(micro_db.get(record.key)) for record in records])
toc = time.clock()
print('Return for 1000000 record : ', toc - tic)
# (ret3)
loop = uvloop.new_event_loop()
asyncio.set_event_loop(loop)
asyncio.run(main())
|
from typing import Mapping, Collection
def to_json(obj):
if hasattr(obj, 'to_json'):
return obj.to_json()
elif isinstance(obj, str):
return obj
elif isinstance(obj, Mapping):
return {to_json(k): to_json(v) for k, v in obj.items()}
elif isinstance(obj, Collection):
return [to_json(x) for x in obj]
else:
return obj
|
# Copyright (c) 2014 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Session and API call management for VMware ESX/VC server.
This module contains classes to invoke VIM APIs. It supports
automatic session re-establishment and retry of API invocations
in case of connection problems or server API call overload.
"""
import logging
from oslo_concurrency import lockutils
from oslo_context import context
from oslo_utils import excutils
from oslo_utils import reflection
import six
from oslo_vmware._i18n import _
from oslo_vmware.common import loopingcall
from oslo_vmware import exceptions
from oslo_vmware import pbm
from oslo_vmware import vim
from oslo_vmware import vim_util
LOG = logging.getLogger(__name__)
def _trunc_id(session_id):
"""Returns truncated session id which is suitable for logging."""
if session_id is not None:
return session_id[-5:]
# TODO(vbala) Move this class to excutils.py.
class RetryDecorator(object):
"""Decorator for retrying a function upon suggested exceptions.
The decorated function is retried for the given number of times, and the
sleep time between the retries is incremented until max sleep time is
reached. If the max retry count is set to -1, then the decorated function
is invoked indefinitely until an exception is thrown, and the caught
exception is not in the list of suggested exceptions.
"""
def __init__(self, max_retry_count=-1, inc_sleep_time=10,
max_sleep_time=60, exceptions=()):
"""Configure the retry object using the input params.
:param max_retry_count: maximum number of times the given function must
be retried when one of the input 'exceptions'
is caught. When set to -1, it will be retried
indefinitely until an exception is thrown
and the caught exception is not in param
exceptions.
:param inc_sleep_time: incremental time in seconds for sleep time
between retries
:param max_sleep_time: max sleep time in seconds beyond which the sleep
time will not be incremented using param
inc_sleep_time. On reaching this threshold,
max_sleep_time will be used as the sleep time.
:param exceptions: suggested exceptions for which the function must be
retried
"""
self._max_retry_count = max_retry_count
self._inc_sleep_time = inc_sleep_time
self._max_sleep_time = max_sleep_time
self._exceptions = exceptions
self._retry_count = 0
self._sleep_time = 0
def __call__(self, f):
func_name = reflection.get_callable_name(f)
def _func(*args, **kwargs):
result = None
try:
if self._retry_count:
LOG.debug("Invoking %(func_name)s; retry count is "
"%(retry_count)d.",
{'func_name': func_name,
'retry_count': self._retry_count})
result = f(*args, **kwargs)
except self._exceptions:
with excutils.save_and_reraise_exception() as ctxt:
LOG.warning("Exception which is in the suggested list "
"of exceptions occurred while invoking "
"function: %s.",
func_name,
exc_info=True)
if (self._max_retry_count != -1 and
self._retry_count >= self._max_retry_count):
LOG.error("Cannot retry upon suggested exception "
"since retry count (%(retry_count)d) "
"reached max retry count "
"(%(max_retry_count)d).",
{'retry_count': self._retry_count,
'max_retry_count': self._max_retry_count})
else:
ctxt.reraise = False
self._retry_count += 1
self._sleep_time += self._inc_sleep_time
return self._sleep_time
raise loopingcall.LoopingCallDone(result)
def func(*args, **kwargs):
loop = loopingcall.DynamicLoopingCall(_func, *args, **kwargs)
evt = loop.start(periodic_interval_max=self._max_sleep_time)
return evt.wait()
return func
class VMwareAPISession(object):
"""Setup a session with the server and handles all calls made to it.
Example:
api_session = VMwareAPISession('10.1.2.3', 'administrator',
'password', 10, 0.1,
create_session=False, port=443)
result = api_session.invoke_api(vim_util, 'get_objects',
api_session.vim, 'HostSystem', 100)
"""
def __init__(self, host, server_username, server_password,
api_retry_count, task_poll_interval, scheme='https',
create_session=True, wsdl_loc=None, pbm_wsdl_loc=None,
port=443, cacert=None, insecure=True, pool_size=10,
connection_timeout=None, op_id_prefix='oslo.vmware'):
"""Initializes the API session with given parameters.
:param host: ESX/VC server IP address or host name
:param port: port for connection
:param server_username: username of ESX/VC server admin user
:param server_password: password for param server_username
:param api_retry_count: number of times an API must be retried upon
session/connection related errors
:param task_poll_interval: sleep time in seconds for polling an
on-going async task as part of the API call
:param scheme: protocol-- http or https
:param create_session: whether to setup a connection at the time of
instance creation
:param wsdl_loc: VIM API WSDL file location
:param pbm_wsdl_loc: PBM service WSDL file location
:param cacert: Specify a CA bundle file to use in verifying a
TLS (https) server certificate.
:param insecure: Verify HTTPS connections using system certificates,
used only if cacert is not specified
:param pool_size: Maximum number of connections in http
connection pool
:param connection_timeout: Maximum time in seconds to wait for peer to
respond.
:param op_id_prefix: String prefix for the operation ID.
:raises: VimException, VimFaultException, VimAttributeException,
VimSessionOverLoadException
"""
self._host = host
self._port = port
self._server_username = server_username
self._server_password = server_password
self._api_retry_count = api_retry_count
self._task_poll_interval = task_poll_interval
self._scheme = scheme
self._vim_wsdl_loc = wsdl_loc
self._pbm_wsdl_loc = pbm_wsdl_loc
self._session_id = None
self._session_username = None
self._vim = None
self._pbm = None
self._cacert = cacert
self._insecure = insecure
self._pool_size = pool_size
self._connection_timeout = connection_timeout
self._op_id_prefix = op_id_prefix
if create_session:
self._create_session()
def pbm_wsdl_loc_set(self, pbm_wsdl_loc):
self._pbm_wsdl_loc = pbm_wsdl_loc
self._pbm = None
LOG.info('PBM WSDL updated to %s', pbm_wsdl_loc)
@property
def vim(self):
if not self._vim:
self._vim = vim.Vim(protocol=self._scheme,
host=self._host,
port=self._port,
wsdl_url=self._vim_wsdl_loc,
cacert=self._cacert,
insecure=self._insecure,
pool_maxsize=self._pool_size,
connection_timeout=self._connection_timeout,
op_id_prefix=self._op_id_prefix)
return self._vim
@property
def pbm(self):
if not self._pbm and self._pbm_wsdl_loc:
self._pbm = pbm.Pbm(protocol=self._scheme,
host=self._host,
port=self._port,
wsdl_url=self._pbm_wsdl_loc,
cacert=self._cacert,
insecure=self._insecure,
pool_maxsize=self._pool_size,
connection_timeout=self._connection_timeout,
op_id_prefix=self._op_id_prefix)
if self._session_id:
# To handle the case where pbm property is accessed after
# session creation. If pbm property is accessed before session
# creation, we set the cookie in _create_session.
self._pbm.set_soap_cookie(self._vim.get_http_cookie())
return self._pbm
@RetryDecorator(exceptions=(exceptions.VimConnectionException,))
@lockutils.synchronized('oslo_vmware_api_lock')
def _create_session(self):
"""Establish session with the server."""
# Another thread might have created the session while the current one
# was waiting for the lock.
if self._session_id and self.is_current_session_active():
LOG.debug("Current session: %s is active.",
_trunc_id(self._session_id))
return
session_manager = self.vim.service_content.sessionManager
# Login and create new session with the server for making API calls.
LOG.debug("Logging into host: %s.", self._host)
session = self.vim.Login(session_manager,
userName=self._server_username,
password=self._server_password,
locale='en')
self._session_id = session.key
# We need to save the username in the session since we may need it
# later to check active session. The SessionIsActive method requires
# the username parameter to be exactly same as that in the session
# object. We can't use the username used for login since the Login
# method ignores the case.
self._session_username = session.userName
LOG.info("Successfully established new session; session ID is "
"%s.",
_trunc_id(self._session_id))
# Set PBM client cookie.
if self._pbm is not None:
self._pbm.set_soap_cookie(self._vim.get_http_cookie())
def logout(self):
"""Log out and terminate the current session."""
if self._session_id:
LOG.info("Logging out and terminating the current session "
"with ID = %s.",
_trunc_id(self._session_id))
try:
self.vim.Logout(self.vim.service_content.sessionManager)
self._session_id = None
except Exception:
LOG.exception("Error occurred while logging out and "
"terminating the current session with "
"ID = %s.",
_trunc_id(self._session_id))
else:
LOG.debug("No session exists to log out.")
def invoke_api(self, module, method, *args, **kwargs):
"""Wrapper method for invoking APIs.
The API call is retried in the event of exceptions due to session
overload or connection problems.
:param module: module corresponding to the VIM API call
:param method: method in the module which corresponds to the
VIM API call
:param args: arguments to the method
:param kwargs: keyword arguments to the method
:returns: response from the API call
:raises: VimException, VimFaultException, VimAttributeException,
VimSessionOverLoadException, VimConnectionException
"""
@RetryDecorator(max_retry_count=self._api_retry_count,
exceptions=(exceptions.VimSessionOverLoadException,
exceptions.VimConnectionException))
def _invoke_api(module, method, *args, **kwargs):
try:
api_method = getattr(module, method)
return api_method(*args, **kwargs)
except exceptions.VimFaultException as excep:
# If this is due to an inactive session, we should re-create
# the session and retry.
if exceptions.NOT_AUTHENTICATED in excep.fault_list:
# The NotAuthenticated fault is set by the fault checker
# due to an empty response. An empty response could be a
# valid response; for e.g., response for the query to
# return the VMs in an ESX server which has no VMs in it.
# Also, the server responds with an empty response in the
# case of an inactive session. Therefore, we need a way to
# differentiate between these two cases.
if self.is_current_session_active():
LOG.debug("Returning empty response for "
"%(module)s.%(method)s invocation.",
{'module': module,
'method': method})
return []
else:
# empty response is due to an inactive session
excep_msg = (
_("Current session: %(session)s is inactive; "
"re-creating the session while invoking "
"method %(module)s.%(method)s.") %
{'session': _trunc_id(self._session_id),
'module': module,
'method': method})
LOG.debug(excep_msg)
self._create_session()
raise exceptions.VimConnectionException(excep_msg,
excep)
else:
# no need to retry for other VIM faults like
# InvalidArgument
# Raise specific exceptions here if possible
if excep.fault_list:
LOG.debug("Fault list: %s", excep.fault_list)
fault = excep.fault_list[0]
clazz = exceptions.get_fault_class(fault)
if clazz:
raise clazz(six.text_type(excep),
details=excep.details)
raise
except exceptions.VimConnectionException:
with excutils.save_and_reraise_exception():
# Re-create the session during connection exception only
# if the session has expired. Otherwise, it could be
# a transient issue.
if not self.is_current_session_active():
LOG.debug("Re-creating session due to connection "
"problems while invoking method "
"%(module)s.%(method)s.",
{'module': module,
'method': method})
self._create_session()
return _invoke_api(module, method, *args, **kwargs)
def is_current_session_active(self):
"""Check if current session is active.
:returns: True if the session is active; False otherwise
"""
LOG.debug("Checking if the current session: %s is active.",
_trunc_id(self._session_id))
is_active = False
try:
is_active = self.vim.SessionIsActive(
self.vim.service_content.sessionManager,
sessionID=self._session_id,
userName=self._session_username)
except exceptions.VimException as ex:
LOG.debug("Error: %(error)s occurred while checking whether the "
"current session: %(session)s is active.",
{'error': six.text_type(ex),
'session': _trunc_id(self._session_id)})
return is_active
def wait_for_task(self, task):
"""Waits for the given task to complete and returns the result.
The task is polled until it is done. The method returns the task
information upon successful completion. In case of any error,
appropriate exception is raised.
:param task: managed object reference of the task
:returns: task info upon successful completion of the task
:raises: VimException, VimFaultException, VimAttributeException,
VimSessionOverLoadException, VimConnectionException
"""
ctx = context.get_current()
loop = loopingcall.FixedIntervalLoopingCall(self._poll_task, task, ctx)
evt = loop.start(self._task_poll_interval)
LOG.debug("Waiting for the task: %s to complete.", task)
return evt.wait()
def _poll_task(self, task, ctx):
"""Poll the given task until completion.
If the task completes successfully, the method returns the task info
using the input event (param done). In case of any error, appropriate
exception is set in the event.
:param task: managed object reference of the task
:param ctx: request context for the corresponding task
"""
if ctx is not None:
ctx.update_store()
try:
# we poll tasks too often, so skip logging the opID as it generates
# too much noise in the logs
task_info = self.invoke_api(vim_util,
'get_object_property',
self.vim,
task,
'info',
skip_op_id=True)
except exceptions.VimException:
with excutils.save_and_reraise_exception():
LOG.exception("Error occurred while reading info of "
"task: %s.",
task)
else:
task_detail = {'id': task.value}
# some internal tasks do not have 'name' set
if getattr(task_info, 'name', None):
task_detail['name'] = task_info.name
if task_info.state in ['queued', 'running']:
if hasattr(task_info, 'progress'):
LOG.debug("Task: %(task)s progress is %(progress)s%%.",
{'task': task_detail,
'progress': task_info.progress})
elif task_info.state == 'success':
def get_completed_task():
complete_time = getattr(task_info, 'completeTime', None)
if complete_time:
duration = complete_time - task_info.queueTime
task_detail['duration_secs'] = duration.total_seconds()
return task_detail
LOG.debug("Task: %s completed successfully.",
get_completed_task())
raise loopingcall.LoopingCallDone(task_info)
else:
error_msg = six.text_type(task_info.error.localizedMessage)
error = task_info.error
name = error.fault.__class__.__name__
fault_class = exceptions.get_fault_class(name)
if fault_class:
task_ex = fault_class(error_msg)
else:
task_ex = exceptions.VimFaultException([name],
error_msg)
raise task_ex
def wait_for_lease_ready(self, lease):
"""Waits for the given lease to be ready.
This method return when the lease is ready. In case of any error,
appropriate exception is raised.
:param lease: lease to be checked for
:raises: VimException, VimFaultException, VimAttributeException,
VimSessionOverLoadException, VimConnectionException
"""
loop = loopingcall.FixedIntervalLoopingCall(self._poll_lease, lease)
evt = loop.start(self._task_poll_interval)
LOG.debug("Waiting for the lease: %s to be ready.", lease)
evt.wait()
def _poll_lease(self, lease):
"""Poll the state of the given lease.
When the lease is ready, the event (param done) is notified. In case
of any error, appropriate exception is set in the event.
:param lease: lease whose state is to be polled
"""
try:
state = self.invoke_api(vim_util,
'get_object_property',
self.vim,
lease,
'state',
skip_op_id=True)
except exceptions.VimException:
with excutils.save_and_reraise_exception():
LOG.exception("Error occurred while checking "
"state of lease: %s.",
lease)
else:
if state == 'ready':
LOG.debug("Lease: %s is ready.", lease)
raise loopingcall.LoopingCallDone()
elif state == 'initializing':
LOG.debug("Lease: %s is initializing.", lease)
elif state == 'error':
LOG.debug("Invoking VIM API to read lease: %s error.",
lease)
error_msg = self._get_error_message(lease)
excep_msg = _("Lease: %(lease)s is in error state. Details: "
"%(error_msg)s.") % {'lease': lease,
'error_msg': error_msg}
LOG.error(excep_msg)
raise exceptions.VimException(excep_msg)
else:
# unknown state
excep_msg = _("Unknown state: %(state)s for lease: "
"%(lease)s.") % {'state': state,
'lease': lease}
LOG.error(excep_msg)
raise exceptions.VimException(excep_msg)
def _get_error_message(self, lease):
"""Get error message associated with the given lease."""
try:
return self.invoke_api(vim_util,
'get_object_property',
self.vim,
lease,
'error')
except exceptions.VimException:
LOG.warning("Error occurred while reading error message for "
"lease: %s.",
lease,
exc_info=True)
return "Unknown"
|
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to bind function signatures to params."""
import inspect
# Groups of parameter kinds.
DEFINABLE_PARAMETER_KINDS = (inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.KEYWORD_ONLY)
IGNORABLE_PARAMETER_KINDS = (inspect.Parameter.VAR_POSITIONAL,
inspect.Parameter.VAR_KEYWORD)
def _IsDefinableParameter(parameter):
"""Checks if the parameter can be defined in `Params`.
Args:
parameter: inspect.Parameter to be checked.
Returns:
True if the `parameter`'s kind is either `POSITIONAL_OR_KEYWORD` or
`KEYWORD_ONLY` which are definable in `Params`, False if it is either
`VAR_POSITIONAL` or `VAR_KEYWORD` which are ignorable.
Raises:
ValueError: The `parameter` has another kind which are possibly not
supported, e.g., `POSITIONAL_ONLY` parameters.
"""
if parameter.kind in DEFINABLE_PARAMETER_KINDS:
return True
elif parameter.kind in IGNORABLE_PARAMETER_KINDS:
return False
else:
raise ValueError('Unsupported parameter signature `%s` with kind `%s`.' %
(parameter.name, parameter.kind))
def _ExtractParameters(func, ignore, bound):
"""Extracts parameters of func which can be defined in Params.
Args:
func: A callable to be analysed.
ignore: A collection of parameter names in `func` to be ignored.
bound: Whether the `func` is used as a bound function (an object method or a
class method) or not. If True, the first parameter of the `func` will be
ignored.
Returns:
A generator of `inspect.Parameter` representing definable parameters.
"""
ignore = set(ignore if ignore is not None else ())
# Obtains parameter signatures.
parameters = tuple(inspect.signature(func).parameters.values())
# Ignores the bound parameter: typically `self` or `cls`.
parameters = parameters[(1 if bound else 0):]
# Filters unnecessary parameters.
parameters = filter(_IsDefinableParameter, parameters)
parameters = (p for p in parameters if p.name not in ignore)
return parameters
def DefineParams(func, params, ignore=None, bound=False):
"""Defines params for each parameter of given callable.
This allows you to define the parameters necessary to call a callable without
having to type the Define statements yourself.
Default values for the function parameters will be copied into the params
object as well.
To use this function for analysing a class instantiation, users usually can
pass the class type as the `func`. If it does not work correctly, pass the
`__init__` method of the class with `bound=True` instead.
Args:
func: A callable to be analysed. Parameters of this function will be defined
in `params`. This function expects that `func` maintains the explicit
signature of its parameters. Implicit parameters that are stored in
`*args` or `**kwargs` could not be analysed correctly.
params: A `Params` object to be updated. New parameters will be defined
here.
ignore: A collection of parameter names in `func` to be ignored from
defining corresponding entries in `params`.
bound: Whether `func` will be used as a bound function (an object method or
a class method) or not. If True, the first parameter of `func` (typically
`self` or `cls`) will be ignored.
"""
for p in _ExtractParameters(func, ignore, bound):
default = p.default
if default is inspect.Parameter.empty:
# TODO(oday): If Params supported required fields, change this behavior to
# set the "required" flag.
default = None
params.Define(p.name, default, 'Function parameter.')
def _MakeArgs(func, params, **kwargs):
"""Generates an argument list to call func.
Args:
func: A callable to be called.
params: A Params object containing arguments for `func`.
**kwargs: Argument/value pairs that should override params.
Returns:
A dict containing function parameters to be used as `**kwargs` of `func`.
"""
out_kwargs = {}
# Here we set bound=False so the `func` is expected to be already a bound
# function.
for p in _ExtractParameters(func, ignore=None, bound=False):
key = p.name
# We will collect only args defined in at least either `kwargs` or `params`.
# Args in `func`'s signature but in neither both will be skipped.
if key in kwargs:
# Anything in kwargs overrides parameters.
out_kwargs[key] = kwargs[key]
elif key in params:
value = params.Get(key)
# If the value in params is the same as the function default, we do not
# set the arg so that we will let the function signature fill in this
# parameter by itself.
if value != p.default:
out_kwargs[key] = value
return out_kwargs
def CallWithParams(func, params, **kwargs):
"""Call a function or method with a Params object.
Args:
func: A callable to be called.
params: A Params object containing parameters of `func`.
**kwargs: Argument/value pairs that should override `params`.
Returns:
The return values from func.
"""
return func(**_MakeArgs(func, params, **kwargs))
# TODO(oday): Remove this function and replace it with CallWithParams when the
# bug on the initializer of Keras layers has been resolved.
def ConstructWithParams(cls, params, **kwargs):
"""Constructs a class object with a Params object.
Args:
cls: A class type to be constructed.
params: A Params object containing parameters of `cls.__init__`.
**kwargs: Argument/value pairs that should override `params`.
Returns:
The constructed object.
"""
return cls(**_MakeArgs(cls.__init__, params, **kwargs))
|
"""Github two-factor authentication requirement was disabled."""
from helpers.base import ghe_json_message
from stream_alert.rule_processor.rules_engine import StreamRules
rule = StreamRules.rule
@rule(logs=['ghe:general'],
matchers=['github_audit'],
outputs=['aws-s3:sample-bucket',
'pagerduty:sample-integration',
'slack:sample-channel'])
def github_disable_org_two_factor_requirement(rec):
"""
author: @mimeframe
description: Two-factor authentication requirement was disabled.
reference: https://help.github.com/
articles/requiring-two-factor-authentication-in-your-organization/
"""
message_rec = ghe_json_message(rec)
if not message_rec:
return False
return message_rec.get('action') == 'org.disable_two_factor_requirement'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.