max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
src/experiments_practical.py
|
rafcc/aaai-20.1534
| 1
|
12774251
|
<filename>src/experiments_practical.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import random
import time
from itertools import combinations
import yaml
import data
import model
import sampling
import subfunction
import trainer
def convert_params_to_string(
dimension_simplex, dimension_space, degree, N, num_sample_train, sigma
):
s = "synthetic_"
s = s + "M." + str(dimension_simplex)
s = s + "_L." + str(dimension_space)
s = s + "_D." + str(degree)
s = s + "_N." + str(N)
s = s + "_sigma." + str(sigma)
return s
def sampling_data_and_param(d, p, n, seed):
random.seed(seed)
s = list(range(d.shape[0]))
s_ = random.sample(s, n)
return (d[s_, :], p[s_, :])
def experiments_practical_instances(
data_dir,
trn_data,
test_data,
n,
solution_type,
dim_simplex,
degree,
method,
seed,
results_dir,
opt_flag=1,
):
"""
conduct experiments with synthetic data
Parameters
----------
data_dir : str
the name of directory which include datasets
trn_data : str
dataname to be trained
test_data : str
dataname for test
n : int
number of samples to be trained
dim_simplex : int
dimension of bezier simplex
degree : int
max degree of bezier simplex fittng
method: "borges"/"inductive"
result_dir: str
where to output results
opt_flag : 0/1 (default is 1)
0 : optimal sampling strategy for inductive skeleton fitting
1 : nonoptimal sampling strategy inductive skeketon fitting
"borges" does not care about this parameter.
"""
# data preparation
objective_function_indices_list = list(range(dim_simplex))
subproblem_indices_list = []
for i in range(1, len(objective_function_indices_list) + 1):
for c in combinations(objective_function_indices_list, i):
subproblem_indices_list.append(c)
monomial_degree_list = list(
subfunction.BezierIndex(dim=dim_simplex, deg=degree)
)
data_all = {}
param_all = {}
for e in subproblem_indices_list:
if len(e) <= degree or len(e) == dim_simplex:
string = "_".join(str(i + 1) for i in e)
tmp = data.Dataset(
data_dir + "/" + trn_data + "," + solution_type + "_" + string
)
data_all[e] = tmp.values
tmp = data.Dataset(data_dir + "/" + trn_data + ",w_" + string)
param_all[e] = tmp.values
dim_space = data_all[(0, 1, 2,)].shape[1]
# train
if method == "borges":
param_trn = {}
data_trn = {}
e = tuple(range(dim_simplex))
data_trn[e], param_trn[e] = sampling_data_and_param(
d=data_all[e], p=param_all[e], n=n, seed=seed
)
borges_pastva_trainer = trainer.BorgesPastvaTrainer(
dim_space=dim_space, dim_simplex=dim_simplex, degree=degree
)
control_point = borges_pastva_trainer.update_control_point(
t_mat=param_trn[e],
data=data_trn[e],
c={},
indices_all=monomial_degree_list,
indices_fix=[],
)
elif method == "inductive":
# calculate sample size of each skeleton
calc_sample_size = sampling.CalcSampleSize(degree=degree, dim_simplex=dim_simplex)
train_sample_size_list = calc_sample_size.get_sample_size_list(
n=n, opt_flag=opt_flag
)
# sampling
data_trn = {}
param_trn = {}
for e in data_all:
if len(e) <= degree:
data_trn[e], param_trn[e] = sampling_data_and_param(
d=data_all[e],
p=param_all[e],
n=train_sample_size_list[len(e) - 1],
seed=seed + sum(e),
)
inductive_skeleton_trainer = trainer.InductiveSkeletonTrainer(
dim_space=dim_space, dim_simplex=dim_simplex, degree=degree
)
control_point = inductive_skeleton_trainer.update_control_point(
t_dict=param_trn,
data_dict=data_trn,
c={},
indices_all=monomial_degree_list,
indices_fix=[],
)
else:
pass
# evaluate empirical l2 risk
e = tuple(range(dim_simplex))
data_tst = data.Dataset(
data_dir
+ "/"
+ test_data
+ ","
+ solution_type
+ "_"
+ "_".join(str(i + 1) for i in e)
).values
param_tst = data.Dataset(
data_dir + "/" + test_data + ",w_" + "_".join(str(i + 1) for i in e)
).values
bezier_simplex = model.BezierSimplex(
dim_space=dim_space, dim_simplex=dim_simplex, degree=degree
)
data_pred = bezier_simplex.generate_points(c=control_point, tt=param_tst)
l2_risk = subfunction.calculate_l2_expected_error(true=data_tst, pred=data_pred)
# output result
settings = {}
settings["trn_data"] = trn_data
settings["tset_data"] = test_data
settings["solution_type"] = solution_type
settings["n"] = n
settings["degree"] = degree
settings["dim_space"] = dim_space
settings["dim_simplex"] = dim_simplex
settings["method"] = method
settings["seed"] = seed
settings["opt_flag"] = opt_flag
results = {}
results["l2_risk"] = "{:5E}".format(l2_risk)
o = {}
o["reults"] = results
o["settings"] = settings
ymlfilename = results_dir + "/" + trn_data + "solution_type." + solution_type + "/"
subfunction.create_directory(dir_name=ymlfilename)
for key in ["degree", "n", "method", "opt_flag", "seed"]:
ymlfilename = ymlfilename + key + "." + str(settings[key]) + "_"
ymlfilename = ymlfilename + ".yml"
wf = open(ymlfilename, "w")
wf.write(yaml.dump(o, default_flow_style=False))
wf.close()
if __name__ == "__main__":
results_dir = "../results_practical"
subfunction.create_directory(dir_name=results_dir)
solution_type_list = ["x", "f"]
degree_list = [2, 3]
n_list = [250, 500, 1000, 2000]
start = time.time()
seed_list = [i + 1 for i in range(20)]
for degree in degree_list:
for solution_type in solution_type_list:
for n in n_list:
for seed in seed_list:
start_lap = time.time()
print("(D,solution_type,n,seed)", degree, solution_type, n, seed)
experiments_practical_instances(
data_dir="../data",
trn_data="MED,d_100,o_3,c_2e+00,e_1e-01,n_10000,s_42",
test_data="MED,d_100,o_3,c_2e+00,e_1e-01,n_10000,s_43",
solution_type=solution_type,
n=n,
degree=degree,
dim_simplex=3,
seed=seed,
method="inductive",
opt_flag=1,
results_dir=results_dir,
)
experiments_practical_instances(
data_dir="../data",
trn_data="MED,d_100,o_3,c_2e+00,e_1e-01,n_10000,s_42",
test_data="MED,d_100,o_3,c_2e+00,e_1e-01,n_10000,s_43",
solution_type=solution_type,
n=n,
degree=degree,
dim_simplex=3,
seed=seed,
method="borges",
opt_flag=1,
results_dir=results_dir,
)
lap_time = time.time() - start_lap
elapsed_time = time.time() - start
print("laptime:", lap_time)
print("current:", elapsed_time)
start = time.time()
for degree in degree_list:
for solution_type in solution_type_list:
for n in n_list:
for seed in seed_list:
start_lap = time.time()
print("(D,solution_type,n,seed)", degree, solution_type, n, seed)
experiments_practical_instances(
data_dir="../data",
trn_data="Birthwt6.csv,n_10000,r_1e+00,e_1e-01,m_0e+00,s_42,l_1e+00,t_1e-07,i_10000",
test_data="Birthwt6.csv,n_10000,r_1e+00,e_1e-01,m_0e+00,s_43,l_1e+00,t_1e-07,i_10000",
solution_type=solution_type,
n=n,
degree=degree,
dim_simplex=3,
seed=seed,
method="inductive",
opt_flag=1,
results_dir=results_dir,
)
experiments_practical_instances(
data_dir="../data",
trn_data="Birthwt6.csv,n_10000,r_1e+00,e_1e-01,m_0e+00,s_42,l_1e+00,t_1e-07,i_10000",
test_data="Birthwt6.csv,n_10000,r_1e+00,e_1e-01,m_0e+00,s_43,l_1e+00,t_1e-07,i_10000",
solution_type=solution_type,
n=n,
degree=degree,
dim_simplex=3,
seed=seed,
method="inductive",
opt_flag=1,
results_dir=results_dir,
)
lap_time = time.time() - start_lap
elapsed_time = time.time() - start
print("laptime:", lap_time)
print("current:", elapsed_time)
| 2.546875
| 3
|
src/utils.py
|
Filco306/ds-project-template
| 0
|
12774252
|
<filename>src/utils.py<gh_stars>0
import os
def fix_filename(filename):
return os.path.join("config", filename) if filename[:6] != "config" else filename
| 2.203125
| 2
|
detectVideoMod.py
|
tasanuma714/Raspberry-Pi-Security-Camera-using-Google-Coral-USB-Accelerator
| 2
|
12774253
|
"""
<NAME>
7-19-2019
Version 1.0
https://github.com/tasanuma714/Raspberry-Pi-Security-Camera-using-Google-Coral-USB-Accelerator
***Big Credit to Adrian at PyImageSearch for the base code of this file.
https://www.pyimagesearch.com/2019/04/22/getting-started-with-google-corals-tpu-usb-accelerator/
https://www.pyimagesearch.com/2019/05/13/object-detection-and-image-classification-with-google-coral-usb-accelerator/
File Description: This is the main file which executes the object detection.
Boxes will be formed around recognized the objects in the coco_labels.txt
with a confidence level of over 0.3. Evey second, detected objects and
the timestamp will be written onto surveillance.txt. If a pre-set object
is detected, a text message via Pushetta will be sent and record.py
is called to record and upload the vidoe to Google Drive. Every hour,
log.py is called to upload the contents of surveillance.txt to Google
Drive.
"""
# imports
from edgetpu.detection.engine import DetectionEngine
from imutils.video import VideoStream
from PIL import Image
import argparse
import imutils
import cv2
import time
import datetime
from datetime import datetime
from time import strftime
from pushetta import Pushetta
from subprocess import call
import sys
# starting time
start_time = time.time()
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=True,
help="path to TensorFlow Lite object detection model")
ap.add_argument("-l", "--labels", required=True,
help="path to labels file")
ap.add_argument("-c", "--confidence", type=float, default=0.3,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
# initialize the labels dictionary
print("[INFO] parsing class labels...")
labels = {}
# loop over the class labels file
for row in open(args["labels"]):
# unpack the row and update the labels dictionary
(classID, label) = row.strip().split(maxsplit=1)
labels[int(classID)] = label.strip()
# load the Google Coral object detection model
print("[INFO] loading Coral model...")
model = DetectionEngine(args["model"])
# initialize the video stream and allow the camera sensor to warmup
print("[INFO] starting video stream...")
# vs = VideoStream(src=0).start()
vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
# initializing base variables
sec = -1
prevLabel = []
API_KEY="<KEY>"
CHANNEL_NAME="RaspiSecurityCamera"
p=Pushetta(API_KEY)
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 500 pixels
frame = vs.read()
frame = imutils.resize(frame, width=1000)
orig = frame.copy()
# prepare the frame for object detection by converting (1) it
# from BGR to RGB channel ordering and then (2) from a NumPy
# array to PIL image format
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = Image.fromarray(frame)
# make predictions on the input frame
start = time.time()
results = model.DetectWithImage(frame, threshold=args["confidence"],
keep_aspect_ratio=True, relative_coord=False)
end = time.time()
# loop over the results
for r in results:
# extract the bounding box and box and predicted class label
box = r.bounding_box.flatten().astype("int")
(startX, startY, endX, endY) = box
label = labels[r.label_id]
# draw the bounding box and label on the image
cv2.rectangle(orig, (startX, startY), (endX, endY),
(0, 255, 0), 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
text = "{}: {:.2f}%".format(label, r.score * 100)
cv2.putText(orig, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# appending to surveillance.txt
file1 = open("surveillance.txt", "a")
# checking if one seccond passed
if sec != time.strftime("%S"):
target = False;
# print timestamp
print(time.ctime())
file1.write(time.ctime() + "\n")
# prints every element in results list
for r in results:
print(labels[r.label_id], end=", ")
file1.write(labels[r.label_id] + ", ")
# sets pre-set target element
if labels[r.label_id] == "cup":
target = True
print("/")
file1.write("/" + "\n")
# resets the second to compare
sec = time.strftime("%S")
# checks for target element
if target:
p.pushMessage(CHANNEL_NAME, "The cup has been detected")
print("recording")
file1.write("recording")
vs.stop()
call(["python3", "record.py"])
sys.exit()
file1.close()
# checks elapsed time to upload the contents of surveillance.txt
elapsed_time = time.time() - start_time
if(elapsed_time > 3600):
start_time = time.time()
print("logging")
call(["python3", "log.py"])
# show the output frame and wait for a key press
cv2.imshow("Frame", orig)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
| 2.984375
| 3
|
singl/utils.py
|
mpff/hpa-single-cell-classification
| 0
|
12774254
|
import os
import numpy
import pandas
from skimage import io
def read_ids_from_csv(csv_file):
""" Reads a column named 'ID' from csv_file. This function was
created to make sure basic I/O works in unit testing.
"""
csv = pandas.read_csv(csv_file)
return csv.ID
def read_hpa_image(image_id, root_dir):
""" Reads a four channel HPA cell image given by 'image_id' from
'root_dir' and returns it as a (H x W x 4) numpy array.
"""
root = os.path.join(root_dir, image_id)
stems = ("_red.png", "_blue.png", "_yellow.png", "_green.png")
paths = [root+stem for stem in stems]
image = [io.imread(path) for path in paths]
return numpy.dstack(image)
| 3
| 3
|
convert.py
|
Brandiep/Web-Design-Challenge
| 0
|
12774255
|
import pandas as pd
cities_df = pd.read_csv("Resources/cities.csv")
cities_df.to_html('Resources/cities.html', index=False)
| 2.453125
| 2
|
unittests/test_TFSingleOrigin.py
|
maptube/UMaaS
| 0
|
12774256
|
<gh_stars>0
"""
This is a test for working functionality of TFSingleDest.
The TensorFlow version is compared against the regular python version to verify that CBar, Oi, Dj etc
are all equal when calculated using the different platforms.
SingleDest.py is taken as the gold standard.
"""
import os.path
import math
import numpy as np
from globals import *
from utils import loadMatrix, resizeMatrix
from models.SingleOrigin import SingleOrigin
from models.TFSingleOrigin import TFSingleOrigin
#define epsilon difference limit for assertion tests
epsilon = 2.0 #increased this from 0.1
###############################################################################
def assertEqualFloatsMsg(val1,val2,msg):
diff = abs(val1-val2)
status = 'FAILED'
if diff<epsilon:
status='OK'
text = msg.format(status=status,val1=val1,val2=val2,diff=diff)
return text
###############################################################################
def assertEqualVectorsMsg(v1,v2,msg):
#lengths must be equal and every v1 value must be equal to its corresponding v2 value withing epsilon
if len(v1)!=len(v2):
return msg.format(status="FAILED LENGTH")
for i in range(0,len(v1)):
diff = abs(v1[i]-v2[i])
if (diff>=epsilon):
text = msg.format(status='FAILED i='+str(i),val1=v1[i],val2=v2[i],diff=diff)
return text
###########
text = msg.format(status='OK',val1=v1[0],val2=v2[0],diff=abs(v1[0]-v2[0])) #what should you return here? OK should be enough
return text
###############################################################################
def assertEqualMatricesMsg(mat1,mat2,msg):
#dimensions must be equal and every v1 value must be equal to its corresponding v2 value withing epsilon
(m1,n1) = np.shape(mat1)
(m2,n2) = np.shape(mat2)
if m1!=m2 or n1!=n2:
return msg.format(status="FAILED DIMENSIONS")
count=0
for i in range(0,n1):
for j in range(0,n1):
count+=1
diff = abs(mat1[i,j]-mat2[i,j])
pctdiff = diff/mat1[i,j]*100.0
if diff>1 and pctdiff>10.0: #if (diff>=epsilon):
text = msg.format(status='FAILED i='+str(i)+' j='+str(j),val1=mat1[i,j],val2=mat2[i,j],diff=diff)
print(text)
###########
text = msg.format(status='OK ('+str(count)+')',val1=mat1[0,0],val2=mat2[0,0],diff=abs(mat1[0,0]-mat2[0,0])) #what should you return here? OK should be enough
return text
###############################################################################
"""
This is an equivalence test for the SingleOrigin giving the same results as the TFSingleOrigin i.e. the TensorFlow code
produces the same results.
NOTE: final matrix equivalence test commented out as it is proving problematic to implement - need better test.
"""
def testTFSingleOrigin():
#TensorFlow tests - load testing matrices
TObs1 = loadMatrix(os.path.join(modelRunsDir,TObs31Filename))
TObs2 = loadMatrix(os.path.join(modelRunsDir,TObs32Filename))
TObs3 = loadMatrix(os.path.join(modelRunsDir,TObs33Filename))
Cij1 = loadMatrix(os.path.join(modelRunsDir,CijRoadMinFilename))
Cij2 = loadMatrix(os.path.join(modelRunsDir,CijBusMinFilename))
Cij3 = loadMatrix(os.path.join(modelRunsDir,CijRailMinFilename))
TObs = [TObs1,TObs2,TObs3]
Cij = [Cij1,Cij2,Cij3]
#now set up the two models for comparison
testModel = SingleOrigin()
testTFModel = TFSingleOrigin(7201)
#CBar Test
CBar = testModel.calculateCBar(TObs1,Cij1)
TFCBar = testTFModel.calculateCBar(TObs1,Cij1)
print(assertEqualFloatsMsg(CBar,TFCBar,'{status} CBar test: CBar={val1} TFCBar={val2} diff={diff}'))
#Oi Test
Oi = testModel.calculateOi(TObs1)
TFOi = testTFModel.calculateOi(TObs1)
print(assertEqualVectorsMsg(Oi,TFOi,'{status} Oi test: Oi={val1} TFOi={val2} diff={diff}'))
#Dj Test
Dj = testModel.calculateDj(TObs1)
TFDj = testTFModel.calculateDj(TObs1)
print(assertEqualVectorsMsg(Dj,TFDj,'{status} Dj test: Dj={val1} TFDj={val2} diff={diff}'))
#Calibrate test - this gives us three predicted matrices and three beta values
#testModel.TObs=TObs
#testModel.Cij=Cij
#testModel.isUsingConstraints=False
#testModel.run() #version 1 - this is the main model run code
#TPred=testModel.TPred
#Beta=testModel.Beta
(TPred, secs) = testModel.benchmarkRun(1,TObs1,Cij1,1.0) #version 2 - this is the optimised benchmark code
#
#This is an alternative equivalence test built into the TFModel class as debug code
#NOTE: external calculation of Oi and Dj, so not suitable for a speed test
#Oi = testTFModel.calculateOi(TObs1)
#Dj = testTFModel.calculateDj(TObs1)
#TFDebugTPred = testTFModel.debugRunModel(Oi,Dj,TObs1,Cij1,1.0)
#print("TFDebugTPred[0,0]",TFDebugTPred[0,0])
#
#This is the real GPU code
TFTPred=testTFModel.runModel(TObs1,Cij1,1.0) #TODO: need number of runs passed in here too, just like the cpu one
#and compare them...
print("Comparing TPred and TFTPred matrices for equivalence")
#print(assertEqualMatricesMsg(TFDebugTPred,TFTPred,'{status} TFDebugTPred test: TPred={val1} TFTPred={val2} diff={diff}'))
#todo: need better way of doing this - need mse report of whole thing and printed discrepancies
print(assertEqualMatricesMsg(TPred,TFTPred,'{status} TFDebugTPred test: TPred={val1} TFTPred={val2} diff={diff}'))
###############################################################################
###############################################################################
#
#if __name__ == '__main__':
# main()
| 2.328125
| 2
|
uncertainty/learning/base_self.py
|
sangdon/intern2020_cocal
| 0
|
12774257
|
<reponame>sangdon/intern2020_cocal
import os, sys
import time
import numpy as np
import tensorflow as tf
import model
from learning import LearnerCls, LearnerDACls, LearnerConfPred
from learning import TempScalingCls as CalibratorCls
class BaseLearnerSelf:
def __init__(self, params, params_base, model_s, model_t, params_conf=None, params_init=None, model_name_postfix=''):
"""Initialize the basic self-training class.
params: required parameters for self-training (e.g., learning rate)
params_base: parameters for base model training
model_s: keras student model
model_t: keras teacher model
params_conf: parameters for a confidence classifier
params_init: parameters for initial model training
model_name_postfix: a model name postfix, which is used in saved model files
"""
self.params = params
self.params_base = params_base
self.params_init = params_init
self.params_conf = params_conf
self.model_s = model_s
self.model_t = model_t
self.model = self.model_s.model_base
self.loss_fn_train = None
self.loss_fn_val = None
self.loss_fn_test = None
self.model_name_postfix = model_name_postfix
if hasattr(self.params, 'save_root'):
self.model_fn_best = os.path.join(self.params.save_root, 'model_params%s_best'%(self.model_name_postfix))
self.model_fn_final = os.path.join(self.params.save_root, 'model_params%s_final'%(self.model_name_postfix))
def _load_best(self):
"""Load the best keras model."""
self.model.load_weights(self.model_fn_best)
print("[load] the best model is loaded")
def _check_best(self):
"""Check whether there exists files for the best keras model."""
return os.path.exists(self.model_fn_best + '.index')
def _load_final(self):
"""Load the final keras model."""
self.model.load_weights(self.model_fn_final)
print("[load] the final model is loaded")
def _check_final(self):
"""Check whether there exists files for the final keras model."""
return os.path.exists(self.model_fn_final + '.index')
def _train_begin(self, ds_src, ds_tar, ds_dom):
"""A function called in the begning of training process. It trains an initial model for self-training.
ds_src: source dataset loader, which contains loaders for train/val/test
ds_tar: target dataset loader, which contains loaders for train/val/test
ds_dom: domain dataset loader, which contains loaders for train/val/test
"""
## initialize a base model
if self.params.init_advtr:
print("## init the teacher model with adversarial training")
model_base = self.model
model.set_trainable(model_base, True)
## init a adv model
mdl_adv = getattr(model, self.params_init.model_advtr)(n_in=model_base.dim_feat)
## init a learner
learner = LearnerDACls(self.params_init, model.DAN(model_base, mdl_adv), model_name_postfix='_advtrinit')
## train the model
learner.train([ds_src.train, ds_dom.train], None, ds_tar.test)
## test the model
learner.test(ds_tar.test, ld_name='tar', verbose=True)
print()
else:
print("## init the teacher model with sourceonly training")
model_base = self.model
model.set_trainable(model_base, True)
## init a learner
learner = LearnerCls(self.params_init, model_base, model_name_postfix='_sourceonlyinit')
## train the model
learner.train(ds_src.train, ds_src.val, ds_tar.test)
## test the model
learner.test(ds_tar.test, ld_name='tar', verbose=True)
print()
print('## initialize a pseudo-labeling function')
model_base = self.model
model_conf = self.model_s.train.model_conf
## init a learner
learner = LearnerConfPred(self.params_conf, model_conf, model_base, None, model_name_postfix='_confpredinit')
## set a constant
model_conf.T = tf.Variable(1.0 - self.params_conf.eps)
## test the model
learner.test(ds_tar.test, ld_name='tar', verbose=True)
print()
def _train_end(self):
"""A function called at the end of training process. It saves the final model and load final/best models, depending on an option."""
## save the final model
model_fn = os.path.join(self.params.save_root, 'model_params%s_final'%(self.model_name_postfix))
self.model.save_weights(model_fn)
print("[final model] saved at %s"%(model_fn))
## load the best model
if self.params.load_final:
self._load_final()
else:
self._load_best()
def _train_epoch_begin(self, i_epoch):
"""A function called at the begining of each epoch. It measures epoch duration, switch student and teacher, and save the initial student.
i_epoch: the current epoch
"""
self.t_epoch_begin = time.time()
self.i_epoch = i_epoch
## switch student and teacher
print("## switch: student <-> teacher")
self.model_t, self.model_s = self.model_s, self.model_t
self.model = self.model_s.model_base # initialize the student model with the teacher model
# ## student copy instead of swithcing
# tmp_mdl_fn = os.path.join(self.params.save_root, 'model_params_tmp')
# self.model_t.model_base.save_weights(tmp_mdl_fn)
# self.model.load_weights(tmp_mdl_fn)
print()
## save the initial model
if i_epoch == 1:
## save the initial model
self.error_val, *_ = self.validate(self.model_t.val)
model_fn = os.path.join(self.params.save_root, 'model_params%s_best'%(self.model_name_postfix))
self.model.save_weights(model_fn)
print("[initial model] saved at %s"%(model_fn))
print()
def _train_epoch_end(self, i_epoch, ld_val, ld_te=None):
"""A function called at the end of each epoch. It prints logs and valiate the current model.
i_epoch: current epoch id
ld_val: validation dataset loader
ld_te: test dataset loader
"""
## print progress
msg = "[epoch: %d/%d, %.2f sec]"%(i_epoch, self.params.n_epochs, time.time() - self.t_epoch_begin)
## print losses
for k, v, in self.__dict__.items():
if 'loss' in k and '_fn' not in k:
msg += '%s = %.4f, '%(k, v)
## validate the current model
if i_epoch % self.params.val_period == 0:
if ld_te is not None:
error_te, ece_te, ece_oc_te, *_ = self.test(ld_te)
msg += 'error_test = %.4f, ECE_test = %.2f%% (%.2f%%), '%(error_te, ece_te*100.0, ece_oc_te*100.0)
error_val, *_ = self.validate(ld_val)
msg += 'error_val = %.4f (error_val_best = %.4f)'%(error_val, self.error_val)
## save the best model
if self.error_val >= error_val:
self.error_val = error_val
model_fn = os.path.join(self.params.save_root, 'model_params%s_best'%(self.model_name_postfix))
self.model.save_weights(model_fn)
msg += ', saved!'
print(msg)
def _train_epoch(self, ds_src, ds_tar, ds_dom):
"""A function called at each epoch. It setup a pseudo-labeling function, and learn a student model using a teacher.
ds_src: source dataset loader, which contains loaders for train/val/test
ds_tar: target dataset loader, which contains loaders for train/val/test
ds_dom: domain dataset loader, which contains loaders for train/val/test
"""
## pick a confidence threshold
model_base = self.model_t.train.model_base
model_conf = self.model_t.train.model_conf
## init a learner
print('## init a pseudo-labeling function')
learner = LearnerConfPred(self.params_conf, model_conf, model_base, None, model_name_postfix='_confpred_epoch_%d'%(self.i_epoch))
## set a constant
model_conf.T = tf.Variable(1.0 - self.params_conf.eps)
## test the model
learner.test(ds_tar.test, ld_name='tar', verbose=True)
learner.test(ds_tar.train, ld_name='tar (train)', verbose=True)
print()
## learn the student model using the teacher
print('## learn the student model')
model_s = self.model
model.set_trainable(model_s, True)
## init a learner
learner = LearnerCls(self.params_base, model_s, model_name_postfix='_base_epoch_%d'%(self.i_epoch))
learner.loss_fn_train = self.loss_fn_train ## use the same training loss
## train the model
learner.train(self.model_t.train, self.model_t.val, ds_tar.test)
## test the model
learner.test(ds_tar.test, ld_name='tar', verbose=True)
print()
def train(self, ds_src, ds_tar, ds_dom, ds_src_init, ds_tar_init, ds_dom_init):
"""The main training function.
ds_src: source dataset loader, which contains loaders for train/val/test
ds_tar: target dataset loader, which contains loaders for train/val/test
ds_dom: domain dataset loader, which contains loaders for train/val/test
ds_src_init: source dataset loader to train an initial model, which contains loaders for train/val/test
ds_tar_init: target dataset loader to train an initial model, which contains loaders for train/val/test
ds_dom_init: domain dataset loader to train an initial model, which contains loaders for train/val/test
"""
if not self.params.rerun and self._check_final():
if self.params.load_final:
self._load_final()
else:
self._load_best()
return
self._train_begin(ds_src_init, ds_tar_init, ds_dom_init)
for i in range(1, self.params.n_epochs+1):
self._train_epoch_begin(i)
self._train_epoch(ds_src, ds_tar, ds_dom)
self._train_epoch_end(i, self.model_t.val, ds_tar.test)
self._train_end()
def validate(self, ld_val):
"""This funciton validate the current model using validation dataset loader.
ld_val: validation dataset loader
"""
return self.test(ld_val, loss_fn=self.loss_fn_val)
def test(self, ld_te, loss_fn=None, ld_name='', verbose=False):
"""This function test the current model using the given dataloader
ld_te: test dataset loader in general, but can be anything
loss_fn: adesired loss function to test
iw_fn: importance weigh tfunction
verbose: True if you want to see logs
"""
loss_fn = self.loss_fn_test if loss_fn is None else loss_fn
loss_vec = []
for x, y in ld_te:
loss_dict = loss_fn(x, y,
model=lambda x: self.model(x, training=False),
reduce='none')
loss_vec.append(loss_dict['loss'])
loss_vec = tf.concat(loss_vec, 0)
loss = tf.math.reduce_mean(loss_vec)
return loss, loss_vec
| 2.765625
| 3
|
topCoder/srms/300s/srm373/div2/the_equation.py
|
gauravsingh58/algo
| 1
|
12774258
|
class TheEquation:
def leastSum(self, X, Y, P):
m = 2*P
for i in xrange(1, P+1):
for j in xrange(1, P+1):
if (X*i + Y*j)%P == 0:
m = min(m, i+j)
return m
| 3.125
| 3
|
drift/core/extensions/debughelpers.py
|
dgnorth/drift
| 6
|
12774259
|
<reponame>dgnorth/drift
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
from flask import g
log = logging.getLogger(__name__)
def before_request():
g.client_debug_messages = []
def after_request(response):
if hasattr(g, "client_debug_messages") and len(g.client_debug_messages) > 0:
response.headers["Drift-Debug-Message"] = "\\n".join(g.client_debug_messages)
return response
def drift_init_extension(app, **kwargs):
app.before_request(before_request)
app.after_request(after_request)
# Install DebugToolbar if applicable
if app.debug:
log.info("Flask server is running in DEBUG mode.")
try:
from flask_debugtoolbar import DebugToolbarExtension
DebugToolbarExtension(app)
except ImportError:
log.info("Flask DebugToolbar not available: Do 'pip install "
"flask-debugtoolbar' to enable.")
| 1.984375
| 2
|
Leetcode/125. Valid Palindrome/solution2.py
|
asanoviskhak/Outtalent
| 51
|
12774260
|
class Solution:
def isPalindrome(self, s: str) -> bool:
s = re.sub('[^a-zA-Z0-9]', '', s).lower()
return s == s[::-1]
| 3.3125
| 3
|
ydk/ydk-example-1.py
|
sambyers/devnet_learning
| 0
|
12774261
|
<filename>ydk/ydk-example-1.py
from ydk.providers import NetconfServiceProvider
from ydk.services import CRUDService
from ydk.models.openconfig import openconfig_bgp
import json
def config_native(native):
"""Add config data to native object."""
loopback = native.interface.Loopback()
loopback.name = 0
loopback.description = "PRIMARY ROUTER LOOPBACK"
loopback.ip.address.primary.address = "172.16.255.1"
loopback.ip.address.primary.mask = "255.255.255.255"
native.interface.loopback.append(loopback)
if __name__ == "__main__":
with open('hosts.json', 'r') as fh:
hosts = json.loads(fh.read())
host = hosts[0]
provider = NetconfServiceProvider(address=host.get('host'),
port=int(host.get('netconf_port')),
username=host.get('username'),
password=<PASSWORD>('password'),
protocol='ssh')
crud = CRUDService()
bgp = openconfig_bgp.Bgp()
bgp.global_.config.as_ = 65001
bgp.global_.config.router_id = '10.0.0.1'
result = crud.create(provider, bgp)
| 2.5625
| 3
|
sparse/_compressed/__init__.py
|
pettni/sparse
| 0
|
12774262
|
<filename>sparse/_compressed/__init__.py
from .compressed import GXCS
| 1.164063
| 1
|
enn/losses/prior_losses.py
|
MaxGhenis/enn
| 130
|
12774263
|
# python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Prior losses are losses that regulate towards the prior.
These might take the form of weight regularization, or sampling "fake data".
These prior_losses are used in e.g. supervised/prior_experiment.py.
"""
from absl import logging
import dataclasses
from enn import base
from enn import utils
import haiku as hk
import jax
import jax.numpy as jnp
import typing_extensions
class FakeInputGenerator(typing_extensions.Protocol):
def __call__(self, batch: base.Batch, key: base.RngKey) -> base.Array:
"""Generates a fake batch of input=x for use in prior regularization."""
@dataclasses.dataclass
class MatchingGaussianData(FakeInputGenerator):
"""Generates a fake batch of input=x for use in prior regularization."""
scale: float = 1.
def __call__(self, batch: base.Batch, key: base.RngKey) -> base.Array:
"""Generates a fake batch of input=x for use in prior regularization."""
return jax.random.normal(key, batch.x.shape) * self.scale
def make_gaussian_dataset(batch_size: int,
input_dim: int,
seed: int = 0) -> base.BatchIterator:
"""Returns a batch iterator over random Gaussian data."""
sample_fn = jax.jit(lambda x: jax.random.normal(x, [batch_size, input_dim]))
def batch_iterator():
rng = hk.PRNGSequence(seed)
while True:
x = sample_fn(next(rng))
yield base.Batch(x, y=jnp.ones([x.shape[0], 1]))
return batch_iterator()
def variance_kl(var: base.Array, pred_log_var: base.Array) -> base.Array:
"""Compute the KL divergence between Gaussian variance with matched means."""
log_var = jnp.log(var)
pred_var = jnp.exp(pred_log_var)
return 0.5 * (pred_log_var - log_var + var / pred_var - 1)
def generate_batched_forward_at_data(
num_index_sample: int,
x: base.Array,
enn: base.EpistemicNetwork,
params: hk.Params,
key: base.RngKey) -> base.Output:
"""Generate enn output for batch of data with indices based on random key."""
batched_indexer = utils.make_batch_indexer(enn.indexer, num_index_sample)
batched_forward = jax.vmap(enn.apply, in_axes=[None, None, 0])
batched_out = batched_forward(params, x, batched_indexer(key))
return batched_out
def l2_training_penalty(batched_out: base.Output):
"""Penalize the L2 magnitude of the training network."""
if isinstance(batched_out, base.OutputWithPrior):
return 0.5 * jnp.mean(jnp.square(batched_out.train))
else:
logging.warning('L2 weight penalty only works for OutputWithPrior.')
return 0.
def distill_mean_regression(batched_out: base.Output,
distill_out: base.Output) -> base.Array:
"""Train the mean of the regression to the distill network."""
observed_mean = jnp.mean(utils.parse_net_output(batched_out), axis=0)
distill_mean = jnp.squeeze(utils.parse_net_output(distill_out))
return jnp.mean(jnp.square(distill_mean - observed_mean))
def distill_mean_classification(batched_out: base.Output,
distill_out: base.Output) -> base.Array:
"""Train the mean of the classification to the distill network."""
batched_logits = utils.parse_net_output(batched_out)
batched_probs = jax.nn.softmax(batched_logits, axis=-1)
mean_probs = jnp.mean(batched_probs, axis=0)
distill_probs = jax.nn.softmax(utils.parse_net_output(distill_out), axis=-1)
return jnp.mean(jnp.sum(
mean_probs * jnp.log(mean_probs / distill_probs), axis=1))
def distill_var_regression(batched_out: base.Output,
distill_out: base.Output) -> base.Array:
"""Train the variance of the regression to the distill network."""
assert isinstance(distill_out, base.OutputWithPrior)
observed_var = jnp.var(utils.parse_net_output(batched_out), axis=0)
return jnp.mean(variance_kl(observed_var, distill_out.extra['log_var']))
def distill_var_classification(batched_out: base.Output,
distill_out: base.Output) -> base.Array:
"""Train the variance of the classification to the distill network."""
assert isinstance(distill_out, base.OutputWithPrior)
batched_logits = utils.parse_net_output(batched_out)
observed_var = jnp.var(jax.nn.softmax(batched_logits, axis=-1))
return jnp.mean(variance_kl(observed_var, distill_out.extra['log_var']))
@dataclasses.dataclass
class RegressionPriorLoss(base.LossFn):
"""Regress fake data back to prior, and distill mean/var to mean_index."""
num_index_sample: int
input_generator: FakeInputGenerator = MatchingGaussianData()
scale: float = 1.
distill_index: bool = False
def __call__(self, enn: base.EpistemicNetwork, params: hk.Params,
batch: base.Batch, key: base.RngKey) -> base.Array:
index_key, data_key = jax.random.split(key)
fake_x = self.input_generator(batch, data_key)
# TODO(author2): Complete prior loss refactor --> MultilossExperiment
batched_out = generate_batched_forward_at_data(
self.num_index_sample, fake_x, enn, params, index_key)
# Regularize towards prior output
loss = self.scale * l2_training_penalty(batched_out)
# Distill aggregate stats to the "mean_index"
if hasattr(enn.indexer, 'mean_index') and self.distill_index:
distill_out = enn.apply(params, fake_x, enn.indexer.mean_index)
loss += distill_mean_regression(batched_out, distill_out)
loss += distill_var_regression(batched_out, distill_out)
return loss, {}
@dataclasses.dataclass
class ClassificationPriorLoss(base.LossFn):
"""Penalize fake data back to prior, and distill mean/var to mean_index."""
num_index_sample: int
input_generator: FakeInputGenerator = MatchingGaussianData()
scale: float = 1.
distill_index: bool = False
def __call__(self, enn: base.EpistemicNetwork, params: hk.Params,
batch: base.Batch, key: base.RngKey) -> base.Array:
index_key, data_key = jax.random.split(key)
fake_x = self.input_generator(batch, data_key)
# TODO(author2): Complete prior loss refactor --> MultilossExperiment
batched_out = generate_batched_forward_at_data(
self.num_index_sample, fake_x, enn, params, index_key)
# Regularize towards prior output
loss = self.scale * l2_training_penalty(batched_out)
# Distill aggregate stats to the "mean_index"
if hasattr(enn.indexer, 'mean_index') and self.distill_index:
distill_out = enn.apply(params, fake_x, enn.indexer.mean_index)
loss += distill_mean_classification(batched_out, distill_out)
loss += distill_var_classification(batched_out, distill_out)
return loss, {}
| 2.421875
| 2
|
application.py
|
jbzdarkid/witness-puzzles
| 24
|
12774264
|
import os
from base64 import b64decode
from flask import render_template, request
from io import BytesIO
from json import dumps as to_json_string
from traceback import format_exc
from flask_wtf.csrf import CSRFError
from sqlalchemy.exc import SQLAlchemyError
from werkzeug.exceptions import HTTPException
from application_database import *
from application_utils import *
from application_login import *
# Home page
host_redirect('/pages/browse.html', '/')
host_redirect('/pages/browse.html', '/index.html')
# Short name redirects
host_redirect('/pages/browse.html', '/browse.html')
host_redirect('/pages/editor.html', '/editor.html')
host_redirect('/pages/telemetry.html', '/telemetry.html')
host_redirect('/pages/triangles.html', '/triangles.html')
host_redirect('/pages/validate.html', '/validate.html')
host_redirect('/pages/webcrow.html', '/webcrow.html')
# Large blocks of data
host_statically('data')
host_statically('engine')
host_statically('sourcemaps')
# Actual page sources
host_statically('pages/browse.js')
host_statically('pages/editor.html')
host_statically('pages/editor.js')
host_statically('pages/telemetry.js')
host_statically('pages/triangles.html')
host_statically('pages/triangles.js')
host_statically('pages/validate.html')
host_statically('pages/webcrow.html')
host_statically('pages/webcrow.js')
if application.debug:
host_redirect('/pages/test.html', '/test.html')
host_redirect('/pages/verify_puzzles.html', '/verify_puzzles.html')
host_redirect('/pages/editor_test.html', '/editor_test.html')
host_redirect('/pages/challenge.html', '/challenge.html')
host_statically('pages/test.html')
host_statically('pages/test.js')
host_statically('pages/editor_test.html')
host_statically('pages/editor_test.js')
host_statically('pages/verify_puzzles.html')
host_statically('pages/verify_puzzles.js')
host_statically('pages/challenge.html')
host_statically('pages/challenge.js')
host_statically('pages/_UTM.html')
host_statically('pages/_UTM.js')
host_statically('pages/_UTM2.js')
host_statically('pages/left_door.html')
host_statically('pages/left_door.js')
host_statically('images')
def page_not_found(error=None):
return render_template('404_generic.html'), 404
application.register_error_handler(404, page_not_found)
application.register_error_handler(CSRFError, page_not_found)
# In case of a database error, cancel any active transactions to prevent the database getting stuck.
def handle_database_error(exc):
if db.session.is_active: # db imported from application_database.py
db.session.rollback()
return '', 500
application.register_error_handler(SQLAlchemyError, handle_database_error)
# We do not actually want to handle HTTP exceptions (e.g. 405), we want to just return them to the caller.
# https://flask.palletsprojects.com/en/2.0.x/errorhandling/#generic-exception-handlers
application.register_error_handler(HTTPException, lambda exc: exc)
def handle_exception(exc):
message = f'Caught a {type(exc).__name__}: {format_exc()}'
add_feedback(message)
return '', 500
application.register_error_handler(Exception, handle_exception)
# Publishing puzzles
@csrf.exempt
def publish():
solution_json = request.form['solution']
data = validate_and_capture_image(solution_json)
if 'error' in data:
return data['error'], 400
title = data.get('title', 'Unnamed Puzzle')
# [22:] to remove the "data:image/png;base64," prefix
image = BytesIO(b64decode(data['screenshot'][22:]))
puzzle_json = data['puzzle_json']
display_hash = create_puzzle(title, puzzle_json, solution_json, image)
return display_hash, 200
application.add_url_rule('/publish', 'publish', publish, methods=['POST'])
application.add_url_rule('/publish', 'publish_get', page_not_found, methods=['GET'])
# Playing published puzzles
def play(display_hash):
puzzle = get_puzzle(display_hash)
if not puzzle or not puzzle.puzzle_json:
return render_template('404_puzzle.html', display_hash=display_hash)
return render_template('play_template.html',
puzzle=puzzle.puzzle_json,
display_hash=display_hash,
title=puzzle.title,
image=puzzle.url
)
application.add_url_rule('/play/<display_hash>', 'play', play)
# Getting puzzles from the DB to show on the browse page
def browse():
sort_type = request.args.get('sort_type', 'date') # date
order = request.args.get('order', '') # asc, desc
offset = request.args.get('offset', 0)
limit = request.args.get('limit', 100)
puzzles = get_puzzles(sort_type, order, offset, limit)
output = []
for puzzle in puzzles:
output.append({
'display_hash': puzzle.display_hash,
'url': puzzle.url,
'title': puzzle.title,
})
if len(output) == 0:
return '', 204
else:
return to_json_string(output), 200
application.add_url_rule('/browse', 'browse', browse)
@csrf.exempt
def telemetry():
kwargs = {
'session_id': request.form['session_id'],
'event_type': request.form['event_type'],
'server_version': '%version%',
'client_version': request.form['version'],
'page': request.environ.get('HTTP_REFERER', ''),
}
if kwargs['page']:
page_parts = kwargs['page'].split('/')
if page_parts[-2] == 'play':
kwargs['puzzle'] = page_parts[-1]
if kwargs['event_type'] in ['feedback', 'error']: # Users providing feedback and javascript errors
add_telemetry(**kwargs, data=request.form['data'])
elif kwargs['event_type'] == 'puzzle_start': # Page load on play_template
add_puzzle_start(**kwargs)
elif kwargs['event_type'] == 'puzzle_solve': # Successful solve on play_template
add_puzzle_solve(**kwargs)
else:
print('Unknown event type: ' + kwargs['event_type'])
return '', 200
application.add_url_rule('/telemetry', 'telemetry', telemetry, methods=['POST'])
application.add_url_rule('/telemetry', 'telemetry_get', page_not_found, methods=['GET'])
# Verifying that puzzles are valid
if application.debug:
def verify_puzzles():
import csv
with open('puzzles.tsv', newline='') as csvfile:
puzzles = [row for row in csv.reader(csvfile, delimiter='\t')]
return render_template('verify_puzzles.html', puzzles=puzzles)
application.add_url_rule('/pages/verify_puzzles.html', 'verify_puzzles', verify_puzzles)
if __name__ == '__main__':
extra_files = []
for root, dirs, files in os.walk('.'):
if 'images' in root:
continue
if '.git' in root:
continue
for file in files:
extra_files.append(root + os.sep + file)
# To make this server visible from the local network, add host='0.0.0.0'
application.run(extra_files=extra_files)
| 1.867188
| 2
|
homeassistant/components/melcloud/const.py
|
mengwangk/home-assistant
| 4
|
12774265
|
<filename>homeassistant/components/melcloud/const.py
"""Constants for the MELCloud Climate integration."""
import pymelcloud.ata_device as ata_device
from pymelcloud.const import UNIT_TEMP_CELSIUS, UNIT_TEMP_FAHRENHEIT
from homeassistant.components.climate.const import (
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
)
from homeassistant.const import TEMP_CELSIUS, TEMP_FAHRENHEIT
DOMAIN = "melcloud"
HVAC_MODE_LOOKUP = {
ata_device.OPERATION_MODE_HEAT: HVAC_MODE_HEAT,
ata_device.OPERATION_MODE_DRY: HVAC_MODE_DRY,
ata_device.OPERATION_MODE_COOL: HVAC_MODE_COOL,
ata_device.OPERATION_MODE_FAN_ONLY: HVAC_MODE_FAN_ONLY,
ata_device.OPERATION_MODE_HEAT_COOL: HVAC_MODE_HEAT_COOL,
}
HVAC_MODE_REVERSE_LOOKUP = {v: k for k, v in HVAC_MODE_LOOKUP.items()}
TEMP_UNIT_LOOKUP = {
UNIT_TEMP_CELSIUS: TEMP_CELSIUS,
UNIT_TEMP_FAHRENHEIT: TEMP_FAHRENHEIT,
}
TEMP_UNIT_REVERSE_LOOKUP = {v: k for k, v in TEMP_UNIT_LOOKUP.items()}
| 1.921875
| 2
|
test/SearchSpaceAdv/ShapeClassification/shape_class.py
|
schroeder-dewitt/polyomino-self-assembly
| 0
|
12774266
|
import math
#Single Block
list = [[0,0]]
sum_x = 0
sum_y = 0
for i in list:
sum_x += i[0]
sum_y += i[1]
sum_x /= len(list)
sum_y /= len(list)
print "SingleBlock: Grav. (", sum_x, ", ", sum_y, ")"
d_sum_x = 0
d_sum_y = 0
for i in list:
d_sum_x += (i[0]-sum_x)*(i[0]-sum_x)
d_sum_y += (i[1]-sum_y)*(i[1]-sum_y)
print "SingleBlock: ", len(list), ":(", d_sum_x, ", ", d_sum_y, ")"
#QuadroBlock
list = [[0,0], [1,1], [1,0], [0,1]]
sum_x = 0.0
sum_y = 0.0
for i in list:
sum_x += i[0]
sum_y += i[1]
sum_x /= len(list)
sum_y /= len(list)
print "SingleBlock: Grav. (", sum_x, ", ", sum_y, ")"
d_sum_x = 0.0
d_sum_y = 0.0
for i in list:
d_sum_x += (i[0]-sum_x)*(i[0]-sum_x)
d_sum_y += (i[1]-sum_y)*(i[1]-sum_y)
print "SingleBlock: ", len(list), ":(", d_sum_x, ", ", d_sum_y, ")"
#Cath1
list = [[0,0], [1,1], [1,0], [0,1], [-1, 1], [1, 2], [0, -1], [2, 0]]
sum_x = 0.0
sum_y = 0.0
for i in list:
sum_x += i[0]
sum_y += i[1]
sum_x /= len(list)
sum_y /= len(list)
print "SingleBlock: Grav. (", sum_x, ", ", sum_y, ")"
d_sum_x = 0.0
d_sum_y = 0.0
for i in list:
d_sum_x += (i[0]-sum_x)*(i[0]-sum_x)
d_sum_y += (i[1]-sum_y)*(i[1]-sum_y)
print "SingleBlock: ", len(list), ":(", d_sum_x, ", ", d_sum_y, ")"
#Hollow
list = [[0,0], [1,0], [2,0], [2,1], [2, 2], [1, 2], [0, 2], [0, 1]]
sum_x = 0.0
sum_y = 0.0
for i in list:
sum_x += i[0]
sum_y += i[1]
sum_x /= len(list)
sum_y /= len(list)
print "SingleBlock: Grav. (", sum_x, ", ", sum_y, ")"
d_sum_x = 0.0
d_sum_y = 0.0
for i in list:
d_sum_x += (i[0]-sum_x)*(i[0]-sum_x)
d_sum_y += (i[1]-sum_y)*(i[1]-sum_y)
print "SingleBlock: ", len(list), ":(", d_sum_x, ", ", d_sum_y, ")"
d_sum_x = 0.0
d_sum_y = 0.0
for i in list:
d_sum_x += (i[0]-sum_x)*(i[0]-sum_x)
d_sum_y += (i[1]-sum_y)*(i[1]-sum_y)
print "SingleBlock: ", len(list), ":(", d_sum_x, ", ", d_sum_y, ")"
| 3.296875
| 3
|
autotest/test_016.py
|
pygsflow/pygsflow
| 17
|
12774267
|
# test sfr renumbering schemes and other random utilities
import gsflow
import os
from gsflow.utils import SfrRenumber
ws = os.path.abspath(os.path.dirname(__file__))
def test_sfr_renumber():
# simple test to ensure no crashes in the renumbering schemes
# expand this later to test LAK, AG, and GAGE
local_ws = os.path.join(ws, "..", "examples", "data", "sagehen", "gsflow")
control_file = "saghen_new_cont.control"
gsf = gsflow.GsflowModel.load_from_file(os.path.join(local_ws, control_file))
ml = gsf.mf
# renumber by topology
sfrenum = SfrRenumber(model=ml)
sfrenum.renumber_sfr()
sfrenum.renumber_all()
# renumber by dis
sfrenum = SfrRenumber(model=ml, scheme="dis")
sfrenum.renumber_sfr()
sfrenum.renumber_all()
# renumber by strtop
sfrenum = SfrRenumber(model=ml, scheme="sfr")
sfrenum.renumber_sfr()
sfrenum.renumber_all()
if __name__ == "__main__":
test_sfr_renumber()
| 2.078125
| 2
|
excel.py
|
Zuoxiaoxian/Excel_Oracle_conf_log
| 0
|
12774268
|
<gh_stars>0
# -*- coding: utf-8 -*-
# 作者 :xiaoxianzuo.zuo
# QQ :1980179070
# 文件名 : excel_01.py
# 新建时间 :2018/4/12/012 18:20
import os
import openpyxl
import re
#example.xlsx需要位于当前工作目录中才能使用它,不是就要绝对路径!
# 默认行高、列宽
# default_row_h = 20
# default_col_w = 10
class ParseSheetZxx(object):
def __init__(self, excel_path, default_row_h=20, default_col_w=10):
self.excel_path = excel_path
self.default_row_h = default_row_h
self.default_col_w = default_col_w
'''# 解析指定文件夹下的Excel表!'''
def get_sheets(self):
'''
:return: [(),]
'''
self._wb_sheetnames = []
walks = os.walk(self.excel_path)
#<generator object walk at 0x000002458B6198E0>
try:
for current_path, subfolders, filesnames in walks:
for filename in filesnames:
file_name = os.path.join(current_path, filename)
# file_name = F:\Python_Projects\py_excel\excel_workerbook\heart_rember.xlsx
_wb = openpyxl.load_workbook(file_name)
_sheetnames = _wb.sheetnames
n = _wb, _sheetnames
self._wb_sheetnames.append(n)
except Exception as e:
print("错误!", e)
wb_sheetnames = self._wb_sheetnames
return wb_sheetnames
'''正则解析, 将数字与字母分开'''
def re_parse(self, start, end=None):
pattern = re.compile(r'[A-Z]+')
if end == None:
result = pattern.match(start)
result = result.group()
result_index = start.index(result)
start_str, start_num = result, start[result_index + 1:]
start = start_str, start_num
return start
else:
result = pattern.match(start)
result = result.group()
result_index = start.index(result)
start_str, start_num = result, start[result_index + 1:]
start = (start_str, start_num)
result_end = pattern.match(end)
result_end = result_end.group()
result_index = end.index(result_end)
end_str, end_num = result_end, end[result_index + 1:]
end = (end_str, end_num)
return start, end
'''解析合并。。。得到:[[('J', 'L', '1', '1'),('A', 'A', '2', '2'), ], [('J', 'L', '1', '1'), ('A', 'A', '2', '2'), ]]'''
def parse_merger(self, mergers_lists):
'''
:param mergers_lists:合并项,[<MultiCellRange [J16:L16 A20:A24],<MultiCellRange [J16:L16 A20:A24]>]
:return:[[('J', '1', 'L', '1'),('A', '2', 'A', '2'), ], [('J', '1', 'L', '1'), ('A', '2', 'A', '2'), ]]
'''
# print(mergers_lists)
start_end_list_all = []
for merger_list in mergers_lists:
start_end_list = []
for merger in merger_list:
merger = str(merger).split(":")
start, end = merger[0], merger[1]
# 正则解析,将字母与数字分开!
start, end = self.re_parse(start, end)
start_end = (start[0], end[0], start[1], end[1])
start_end_list.append(start_end)
start_end_list = list(set(start_end_list))
start_end_list_all.append(start_end_list)
# print("start_end_list_all", len(start_end_list_all), start_end_list_all)
return start_end_list_all
'''解析全部。。。得到:[[('A', '1'), ('A', '2')], [('A', '1'), ('A', '2')]]'''
def parse_all(self, max_row_max_column):
start_end_all_lists = []
for row_column in max_row_max_column:
start_end_list = []
row, column = row_column[0], row_column[1]
for c in range(column):
column_chr = chr(c + 65)
for r in range(row):
start_end = (column_chr, str(r + 1))
start_end_list.append(start_end)
start_end_all_lists.append(start_end_list)
return start_end_all_lists
'''根据全部与和合并项,得到没有合并的项!'''
def unmerger(self, start_end_list_all, start_end_all_lists):
# print("解析合并-位置", start_end_list_all)
# print("解析全部-位置", start_end_all_lists)
for i in range(len(start_end_all_lists)):
for start_end_merger in start_end_list_all[i]:
# print("合并-位置: ", start_end_merger)
# print("全部-位置: ", start_end_all)
merger_weizhi0 = start_end_merger[0]
merger_weizhi1 = start_end_merger[1]
# 移除合并行
if merger_weizhi0 == merger_weizhi1:
merger_weizhi2 = start_end_merger[2]
merger_weizhi3 = start_end_merger[3]
for start_end_all in start_end_all_lists[i]:
if start_end_all[0] == merger_weizhi0:
cha_zhi = int(merger_weizhi3) - int(merger_weizhi2)
sha_chu = (start_end_all[0], merger_weizhi2)
if sha_chu in start_end_all_lists[i]:
mergen_index = start_end_all_lists[i].index(sha_chu)
# print("mergen_index", mergen_index)
for k in range(cha_zhi + 1):
start_end_all_lists[i].remove(start_end_all_lists[i][mergen_index])
# 移除合并列
else:
merger_weizhi2 = start_end_merger[2]
cha_zhi = ord(start_end_merger[1]) - ord(start_end_merger[0])
for l in range(cha_zhi + 1):
shan_chu = (chr(ord(merger_weizhi0) + l), merger_weizhi2)
# print("删除的合并列: ", shan_chu)
if shan_chu in start_end_all_lists[i]:
# print("删除!")
start_end_all_lists[i].remove(shan_chu)
# print(" 没有合并的列:", start_end_all_lists[i])
# print("最终: ", start_end_all_lists[i])
# print("移除合并行-移除合并列", start_end_all_lists)
return start_end_all_lists
'''解析表格的边框样式, 主要是是否有边框!'''
def is_or_thin(self, *args):
is_or_thin = []
bold_value = ''
for thin_none in args:
if thin_none == None:
thin_none = '0'
elif thin_none == "thin":
thin_none = '1'
is_or_thin.append(thin_none)
for i in is_or_thin:
bold_value += i
return bold_value
'''最终解析没有合并的,得到指定格式!
得到: 如,[(('A', '24'), '均布8点 \n跳动值', 6, 15, 10, 0, 10, 200, 0, '1101'),]
wei_zhi, value, rstart, rend, erow, ecol, width, height, isedit, bold_value
[[((wei_zhi), alue, rstart, rend, erow, ecol, width, height, isedit, bold_value), (),()]]
'''
def end_parse_unmerger_lists(self, unmerger_lists):
end_parse_unmerger_lists = []
# print("len(unmerger_lists)", len(unmerger_lists), unmerger_lists)
for i in range(len(unmerger_lists)):
end_parse_unmerger_list = []
unmerger_list = unmerger_lists[i]
sheet = self.sheet_list[i]
for end_parse in unmerger_list:
wei_zhi = end_parse
inner = wei_zhi[0] + wei_zhi[1]
value = sheet[inner].value
if value == None:
value = 'null'
isedit = 1
else:
isedit = 0
rend = rstart = int(wei_zhi[1])
erow = ecol = 1
width, height = self.default_col_w, self.default_row_h
border = sheet[inner].border
inner_top, inner_right, inner_bottom, inner_left = border.top.style, border.right.style, border.bottom.style, border.left.style
bold_value = self.is_or_thin(inner_top, inner_right, inner_bottom, inner_left)
# print("!!!!!!!!!!!!!!!:", wei_zhi, value, rstart, rend, erow, ecol, width, height, isedit, bold_value)
end_parse_unmerger_list.append([wei_zhi, value, rstart, rend, erow, ecol, width, height, isedit, bold_value])
end_parse_unmerger_lists.append(end_parse_unmerger_list)
return end_parse_unmerger_lists
'''最终解析 合并了的项!wei_zhi, value, rstart, rend, erow, ecol, width, height, isedit, bold_value'''
def end_parse_merger_lists(self, start_end_list_all):
end_parse_merger_lists = []
for i in range(len(start_end_list_all)):
end_parse_merger_list = []
merger_list = start_end_list_all[i]
sheet = self.sheet_list[i]
for merger in merger_list:
wei_zhi = (merger[0], merger[2])
inner = merger[0] + merger[2]
value = sheet[inner].value
if value == None:
value = 'null'
isedit = 1
else:
isedit = 0
rstart = int(merger[2])
rend = int(merger[3])
# 解析合并项的 宽高!行高:sheet.row_dimensions[6].height,列宽:sheet.column_dimensions['A'].width
if merger[0] == merger[1]:
erow = rend - rstart
ecol = 1
width = int(sheet.column_dimensions[merger[0]].width)
height = 0
for r in range(rstart, rend + 1):
height += sheet.row_dimensions[r].height
self.height = int(height)
inner = merger[0] + str(r)
# print("=====", inner) #A7
# 表格边框!
border = sheet[inner].border
inner_top, inner_right, inner_bottom, inner_left = border.top.style, border.right.style, border.bottom.style, border.left.style
bold_value = self.is_or_thin(inner_top, inner_right, inner_bottom, inner_left)
end_parse_merger_list.append([wei_zhi, value, rstart, rend, erow, ecol, width, 'height', isedit, bold_value])
# print("height", height)
# print("end_parse_merger_list", end_parse_merger_list)
else:
erow = 1
ecol = ord(merger[1]) - ord(merger[0]) + 1
height = int(sheet.row_dimensions[rstart].height)
width_ = 0
for e in range(ecol):
e = chr(ord(merger[0]) + e)
width = sheet.column_dimensions[e].width
if width == None:
width = self.default_col_w
width_ += width
inner = e + merger[2]
# print("=====", inner)
# 表格边框!
border = sheet[inner].border
inner_top, inner_right, inner_bottom, inner_left = border.top.style, border.right.style, border.bottom.style, border.left.style
bold_value = self.is_or_thin(inner_top, inner_right, inner_bottom, inner_left)
end_parse_merger_list.append([wei_zhi, value, rstart, rend, erow, ecol, 'width', height, isedit,bold_value])
width = int(width_)
self.width = width
# print("end_parse_merger_list", len(end_parse_merger_list))
# end_parse_merger_list[6] = width
for i in end_parse_merger_list:
if i[6] == 'width':
i[6] = self.width
if i[7] == 'height':
i[7] = self.height
end_parse_merger_lists.append(end_parse_merger_list)
# print("合并:wei_zhi, value, rstart, rend, erow, ecol, width, height, isedit", wei_zhi, value, rstart, rend, erow, ecol, width, height, isedit)
# print("最终解析的,合并: ", end_parse_merger_lists)
return end_parse_merger_lists
'''定义排序规则!'''
def order_list_lie(self, key):
# print(key[0], '>>>>>', key[1])
return key[0][0]
def order_list_row(self, key):
return int(key[0][1])
''' 排序:将数据进一步排序! '''
def dict_orser(self, zong_lists):
# 排序:将数据进一步排序!
for zong_list in zong_lists:
zong_list.sort(
key=lambda x: (int(x[0][1]), x[0][0])
)
# print("zong_lists", zong_lists)
return zong_lists
# zong_list2 = []
# for zong in zong_list:
# zong_list2.append([zong[0][0], zong[0][1], zong[1: ]])
# print("zong_list2", zong_list2)
# zong_list2.sort(
# key=lambda x: (int(x[1]), x[0])
# )
# zong_lists2.append(zong_list2)
#
# # zong_lists2.append(zong_list2)
# print("zong_lists2", zong_lists2)
# zong_lists = zong_lists2
# return zong_lists2
''' 排序:将合并的与没有合并的进行,排序 '''
def merger_unmerger_order(self, end_parse_unmerger_lists, end_parse_merger_lists):
zong_lists = []
for i in range(len(end_parse_unmerger_lists)):
end_parse_unmerger_list = end_parse_unmerger_lists[i]
end_parse_merger_list = end_parse_merger_lists[i]
merger_unmerger_list = end_parse_unmerger_list + end_parse_merger_list
# 排序。。。。。。
merger_unmerger_list.sort(key=self.order_list_row)
# merger_unmerger_list.sort(key=self.order_list_lie)
# for i in merger_unmerger_list:
# print(i)
zong_lists.append(merger_unmerger_list)
return zong_lists
'''得到sheet表单[(总的行数、列数)],[合并项]'''
def get_merger_and_all(self, wb_sheetnames):
# (zuo_biao, value, rstart, rend, erow, ecol, width, height, isedit, bold)
mergers_lists = []
max_row_max_column = []
wb_sheetnames = wb_sheetnames
self.sheet_list = []
for wb_sheetname in wb_sheetnames:
wb, sheetnames = wb_sheetname
# 第一个sheet
sheet = wb[sheetnames[0]]
self.sheet_list.append(sheet)
# 和并项的坐标!
mergers = sheet.merged_cells
mergers_lists.append(mergers)
# 表单的总行数、列数!
max_row, max_column = sheet.max_row, sheet.max_column
max_row_max_column.append((max_row, max_column))
# return max_row_max_column, mergers_lists
# 解析合并-位置!
start_end_list_all = self.parse_merger(mergers_lists)
# 解析全部-位置!
start_end_all_lists = self.parse_all(max_row_max_column)
# 根据合并的与全部的,得到没有合并的!
unmerger_lists = self.unmerger(start_end_list_all, start_end_all_lists)
# 最终解析没有合并的项!
end_parse_unmerger_lists = self.end_parse_unmerger_lists(unmerger_lists)
# for i in end_parse_unmerger_lists:
# print("end_parse_unmerger_lists", end_parse_unmerger_lists)
# input("******************************")
# 最终解析合并的项!
end_parse_merger_lists = self.end_parse_merger_lists(start_end_list_all)
# list 排序:将合并的与没有合并的进行,排序
zong_lists = self.merger_unmerger_order(end_parse_unmerger_lists, end_parse_merger_lists)
# list 生成 dict 排序:将数据进一步排序!
zong_lists = self.dict_orser(zong_lists)
# print("一步排序 后:", zong_lists)
return zong_lists
'''主函数'''
def excel_01(excel_path):
parse_sheet = ParseSheetZxx(excel_path)
wb_sheetnames = parse_sheet.get_sheets()
zong_lists = parse_sheet.get_merger_and_all(wb_sheetnames)
print("一步排序 后:", zong_lists)
return zong_lists
if __name__ == '__main__':
pass
# parse_sheet = ParseSheet()
# wb_sheetnames = parse_sheet.get_sheets()
# max_row_max_column, mergers_lists = parse_sheet.get_merger_and_all(wb_sheetnames)
# print(max_row_max_column, mergers_lists)
# cwd_path = os.getcwd()
# parert_path = os.path.abspath(os.path.dirname(cwd_path) + os.path.sep + '.')
# excel_path = os.path.join(parert_path, 'excel_workerbook')
# print("excel_path", excel_path)
# excel_01(excel_path)
| 2.796875
| 3
|
pyLMS7002Soapy/LMS7002_DCCAL.py
|
Surfndez/pyLMS7002Soapy
| 46
|
12774269
|
<filename>pyLMS7002Soapy/LMS7002_DCCAL.py
#***************************************************************
#* Name: LMS7002_DCCAL.py
#* Purpose: Class implementing LMS7002 DCCAL functions
#* Author: <NAME> ()
#* Created: 2017-02-10
#* Copyright: <NAME> (limemicro.com)
#* License:
#**************************************************************
from LMS7002_base import *
class LMS7002_DCCAL(LMS7002_base):
__slots__ = [] # Used to generate error on typos
def __init__(self, chip):
self.chip = chip
self.channel = None
self.prefix = "DCCAL_"
#
# DCCAL_CFG (0x05C0)
#
# DCMODE
@property
def DCMODE(self):
"""
Get the value of DCMODE
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'DCMODE')
else:
raise ValueError("Bitfield DCMODE is not supported on chip version "+str(self.chip.chipID))
@DCMODE.setter
def DCMODE(self, value):
"""
Set the value of DCMODE
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1, 'MANUAL', 'AUTO']:
raise ValueError("Value must be [0,1,'MANUAL','AUTO']")
if value==0 or value=='MANUAL':
val = 0
else:
val = 1
self._writeReg('CFG', 'DCMODE', val)
else:
raise ValueError("Bitfield DCMODE is not supported on chip version "+str(self.chip.chipID))
# PD_DCDAC_RXB
@property
def PD_DCDAC_RXB(self):
"""
Get the value of PD_DCDAC_RXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCDAC_RXB')
else:
raise ValueError("Bitfield PD_DCDAC_RXB is not supported on chip version "+str(self.chip.chipID))
@PD_DCDAC_RXB.setter
def PD_DCDAC_RXB(self, value):
"""
Set the value of PD_DCDAC_RXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCDAC_RXB', value)
else:
raise ValueError("Bitfield PD_DCDAC_RXB is not supported on chip version "+str(self.chip.chipID))
# PD_DCDAC_RXA
@property
def PD_DCDAC_RXA(self):
"""
Get the value of PD_DCDAC_RXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCDAC_RXA')
else:
raise ValueError("Bitfield PD_DCDAC_RXA is not supported on chip version "+str(self.chip.chipID))
@PD_DCDAC_RXA.setter
def PD_DCDAC_RXA(self, value):
"""
Set the value of PD_DCDAC_RXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCDAC_RXA', value)
else:
raise ValueError("Bitfield PD_DCDAC_RXA is not supported on chip version "+str(self.chip.chipID))
# PD_DCDAC_TXB
@property
def PD_DCDAC_TXB(self):
"""
Get the value of PD_DCDAC_TXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCDAC_TXB')
else:
raise ValueError("Bitfield PD_DCDAC_TXB is not supported on chip version "+str(self.chip.chipID))
@PD_DCDAC_TXB.setter
def PD_DCDAC_TXB(self, value):
"""
Set the value of PD_DCDAC_TXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCDAC_TXB', value)
else:
raise ValueError("Bitfield PD_DCDAC_TXB is not supported on chip version "+str(self.chip.chipID))
# PD_DCDAC_TXA
@property
def PD_DCDAC_TXA(self):
"""
Get the value of PD_DCDAC_TXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCDAC_TXA')
else:
raise ValueError("Bitfield PD_DCDAC_TXA is not supported on chip version "+str(self.chip.chipID))
@PD_DCDAC_TXA.setter
def PD_DCDAC_TXA(self, value):
"""
Set the value of PD_DCDAC_TXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCDAC_TXA', value)
else:
raise ValueError("Bitfield PD_DCDAC_TXA is not supported on chip version "+str(self.chip.chipID))
# PD_DCCMP_RXB
@property
def PD_DCCMP_RXB(self):
"""
Get the value of PD_DCCMP_RXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCCMP_RXB')
else:
raise ValueError("Bitfield PD_DCCMP_RXB is not supported on chip version "+str(self.chip.chipID))
@PD_DCCMP_RXB.setter
def PD_DCCMP_RXB(self, value):
"""
Set the value of PD_DCCMP_RXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCCMP_RXB', value)
else:
raise ValueError("Bitfield PD_DCCMP_RXB is not supported on chip version "+str(self.chip.chipID))
# PD_DCCMP_RXA
@property
def PD_DCCMP_RXA(self):
"""
Get the value of PD_DCCMP_RXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCCMP_RXA')
else:
raise ValueError("Bitfield PD_DCCMP_RXA is not supported on chip version "+str(self.chip.chipID))
@PD_DCCMP_RXA.setter
def PD_DCCMP_RXA(self, value):
"""
Set the value of PD_DCCMP_RXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCCMP_RXA', value)
else:
raise ValueError("Bitfield PD_DCCMP_RXA is not supported on chip version "+str(self.chip.chipID))
# PD_DCCMP_TXB
@property
def PD_DCCMP_TXB(self):
"""
Get the value of PD_DCCMP_TXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCCMP_TXB')
else:
raise ValueError("Bitfield PD_DCCMP_TXB is not supported on chip version "+str(self.chip.chipID))
@PD_DCCMP_TXB.setter
def PD_DCCMP_TXB(self, value):
"""
Set the value of PD_DCCMP_TXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCCMP_TXB', value)
else:
raise ValueError("Bitfield PD_DCCMP_TXB is not supported on chip version "+str(self.chip.chipID))
# PD_DCCMP_TXA
@property
def PD_DCCMP_TXA(self):
"""
Get the value of PD_DCCMP_TXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCCMP_TXA')
else:
raise ValueError("Bitfield PD_DCCMP_TXA is not supported on chip version "+str(self.chip.chipID))
@PD_DCCMP_TXA.setter
def PD_DCCMP_TXA(self, value):
"""
Set the value of PD_DCCMP_TXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCCMP_TXA', value)
else:
raise ValueError("Bitfield PD_DCCMP_TXA is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_STAT (0x05C1)
#
# DCCAL_CALSTATUS<7:0>
@property
def DCCAL_CALSTATUS(self):
"""
Get the value of DCCAL_CALSTATUS<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('STAT', 'DCCAL_CALSTATUS<7:0>')
else:
raise ValueError("Bitfield DCCAL_CALSTATUS<7:0> is not supported on chip version "+str(self.chip.chipID))
@DCCAL_CALSTATUS.setter
def DCCAL_CALSTATUS(self, value):
"""
Set the value of DCCAL_CALSTATUS<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('STAT', 'DCCAL_CALSTATUS<7:0>', value)
else:
raise ValueError("Bitfield DCCAL_CALSTATUS<7:0> is not supported on chip version "+str(self.chip.chipID))
# DCCAL_CMPSTATUS<7:0>
@property
def DCCAL_CMPSTATUS(self):
"""
Get the value of DCCAL_CMPSTATUS<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('STAT', 'DCCAL_CMPSTATUS<7:0>')
else:
raise ValueError("Bitfield DCCAL_CMPSTATUS<7:0> is not supported on chip version "+str(self.chip.chipID))
@DCCAL_CMPSTATUS.setter
def DCCAL_CMPSTATUS(self, value):
"""
Set the value of DCCAL_CMPSTATUS<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('STAT', 'DCCAL_CMPSTATUS<7:0>', value)
else:
raise ValueError("Bitfield DCCAL_CMPSTATUS<7:0> is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_CFG2 (0x05C2)
#
# DCCAL_CMPCFG<7:0>
@property
def DCCAL_CMPCFG(self):
"""
Get the value of DCCAL_CMPCFG<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG2', 'DCCAL_CMPCFG<7:0>')
else:
raise ValueError("Bitfield DCCAL_CMPCFG<7:0> is not supported on chip version "+str(self.chip.chipID))
@DCCAL_CMPCFG.setter
def DCCAL_CMPCFG(self, value):
"""
Set the value of DCCAL_CMPCFG<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('CFG2', 'DCCAL_CMPCFG<7:0>', value)
else:
raise ValueError("Bitfield DCCAL_CMPCFG<7:0> is not supported on chip version "+str(self.chip.chipID))
# DCCAL_START<7:0>
@property
def DCCAL_START(self):
"""
Get the value of DCCAL_START<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG2', 'DCCAL_START<7:0>')
else:
raise ValueError("Bitfield DCCAL_START<7:0> is not supported on chip version "+str(self.chip.chipID))
@DCCAL_START.setter
def DCCAL_START(self, value):
"""
Set the value of DCCAL_START<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('CFG2', 'DCCAL_START<7:0>', value)
else:
raise ValueError("Bitfield DCCAL_START<7:0> is not supported on chip version "+str(self.chip.chipID))
def startRXBQ(self):
"""
Starts RXBQ calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<7
self.DCCAL_START = 0
def startRXBI(self):
"""
Starts RXBI calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<6
self.DCCAL_START = 0
def startRXAQ(self):
"""
Starts RXAQ calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<5
self.DCCAL_START = 0
def startRXAI(self):
"""
Starts RXAI calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<4
self.DCCAL_START = 0
def startTXBQ(self):
"""
Starts TXBQ calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<3
self.DCCAL_START = 0
def startTXBI(self):
"""
Starts TXBI calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<2
self.DCCAL_START = 0
def startTXAQ(self):
"""
Starts TXAQ calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<1
self.DCCAL_START = 0
def startTXAI(self):
"""
Starts TXAI calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1
self.DCCAL_START = 0
#
# DCCAL_TXAI (0x05C3)
#
@property
def DC_TXAI(self):
"""
Get the value of DC_TXAI
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('TXAI', 'DCRD_TXAI', 0)
self._writeReg('TXAI', 'DCRD_TXAI', 1)
self._writeReg('TXAI', 'DCRD_TXAI', 0)
val = self._readReg('TXAI', 'DC_TXAI<10:0>')
return self.signMagnitudeToInt(val, 11)
else:
raise ValueError("Bitfield DC_TXAI is not supported on chip version "+str(self.chip.chipID))
@DC_TXAI.setter
def DC_TXAI(self, value):
"""
Set the value of DC_TXAI
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-1024<= value <=1024):
raise ValueError("Value must be [-1024..1024]")
val = self.intToSignMagnitude(value, 11)
self._writeReg('TXAI', 'DC_TXAI<10:0>', val)
self._writeReg('TXAI', 'DCWR_TXAI', 0)
self._writeReg('TXAI', 'DCWR_TXAI', 1)
self._writeReg('TXAI', 'DCWR_TXAI', 0)
else:
raise ValueError("Bitfield TXAI is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_TXAQ (0x05C4)
#
@property
def DC_TXAQ(self):
"""
Get the value of DC_TXAQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('TXAQ', 'DCRD_TXAQ', 0)
self._writeReg('TXAQ', 'DCRD_TXAQ', 1)
self._writeReg('TXAQ', 'DCRD_TXAQ', 0)
val = self._readReg('TXAQ', 'DC_TXAQ<10:0>')
return self.signMagnitudeToInt(val, 11)
else:
raise ValueError("Bitfield DC_TXAQ is not supported on chip version "+str(self.chip.chipID))
@DC_TXAQ.setter
def DC_TXAQ(self, value):
"""
Set the value of DC_TXAQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-1024<= value <=1024):
raise ValueError("Value must be [-1024..1024]")
val = self.intToSignMagnitude(value, 11)
self._writeReg('TXAQ', 'DC_TXAQ<10:0>', val)
self._writeReg('TXAQ', 'DCWR_TXAQ', 0)
self._writeReg('TXAQ', 'DCWR_TXAQ', 1)
self._writeReg('TXAQ', 'DCWR_TXAQ', 0)
else:
raise ValueError("Bitfield TXAQ is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_TXBI (0x05C5)
#
@property
def DC_TXBI(self):
"""
Get the value of DC_TXBI
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('TXBI', 'DCRD_TXBI', 0)
self._writeReg('TXBI', 'DCRD_TXBI', 1)
self._writeReg('TXBI', 'DCRD_TXBI', 0)
val = self._readReg('TXBI', 'DC_TXBI<10:0>')
return self.signMagnitudeToInt(val, 11)
else:
raise ValueError("Bitfield DC_TXBI is not supported on chip version "+str(self.chip.chipID))
@DC_TXBI.setter
def DC_TXBI(self, value):
"""
Set the value of DC_TXBI
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-1024<= value <=1024):
raise ValueError("Value must be [-1024..1024]")
val = self.intToSignMagnitude(value, 11)
self._writeReg('TXBI', 'DC_TXBI<10:0>', val)
self._writeReg('TXBI', 'DCWR_TXBI', 0)
self._writeReg('TXBI', 'DCWR_TXBI', 1)
self._writeReg('TXBI', 'DCWR_TXBI', 0)
else:
raise ValueError("Bitfield TXBI is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_TXBQ (0x05C6)
#
@property
def DC_TXBQ(self):
"""
Get the value of DC_TXBQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('TXBQ', 'DCRD_TXBQ', 0)
self._writeReg('TXBQ', 'DCRD_TXBQ', 1)
self._writeReg('TXBQ', 'DCRD_TXBQ', 0)
val = self._readReg('TXBQ', 'DC_TXBQ<10:0>')
return self.signMagnitudeToInt(val, 11)
else:
raise ValueError("Bitfield DC_TXBQ is not supported on chip version "+str(self.chip.chipID))
@DC_TXBQ.setter
def DC_TXBQ(self, value):
"""
Set the value of DC_TXBQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-1024<= value <=1024):
raise ValueError("Value must be [-1024..1024]")
val = self.intToSignMagnitude(value, 11)
self._writeReg('TXBQ', 'DC_TXBQ<10:0>', val)
self._writeReg('TXBQ', 'DCWR_TXBQ', 0)
self._writeReg('TXBQ', 'DCWR_TXBQ', 1)
self._writeReg('TXBQ', 'DCWR_TXBQ', 0)
else:
raise ValueError("Bitfield TXBQ is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_RXAI (0x05C7)
#
@property
def DC_RXAI(self):
"""
Get the value of DC_RXAI
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('RXAI', 'DCRD_RXAI', 0)
self._writeReg('RXAI', 'DCRD_RXAI', 1)
self._writeReg('RXAI', 'DCRD_RXAI', 0)
val = self._readReg('RXAI', 'DC_RXAI<6:0>')
return self.signMagnitudeToInt(val, 7)
else:
raise ValueError("Bitfield DC_RXAI is not supported on chip version "+str(self.chip.chipID))
@DC_RXAI.setter
def DC_RXAI(self, value):
"""
Set the value of DC_RXAI
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-63<= value <=63):
raise ValueError("Value must be [-63..63]")
val = self.intToSignMagnitude(value, 7)
self._writeReg('RXAI', 'DC_RXAI<6:0>', val)
self._writeReg('RXAI', 'DCWR_RXAI', 0)
self._writeReg('RXAI', 'DCWR_RXAI', 1)
self._writeReg('RXAI', 'DCWR_RXAI', 0)
else:
raise ValueError("Bitfield RXAI is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_RXAQ (0x05C8)
#
@property
def DC_RXAQ(self):
"""
Get the value of DC_RXAQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('RXAQ', 'DCRD_RXAQ', 0)
self._writeReg('RXAQ', 'DCRD_RXAQ', 1)
self._writeReg('RXAQ', 'DCRD_RXAQ', 0)
val = self._readReg('RXAQ', 'DC_RXAQ<6:0>')
return self.signMagnitudeToInt(val, 7)
else:
raise ValueError("Bitfield DC_RXAQ is not supported on chip version "+str(self.chip.chipID))
@DC_RXAQ.setter
def DC_RXAQ(self, value):
"""
Set the value of DC_RXAQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-63<= value <=63):
raise ValueError("Value must be [-63..63]")
val = self.intToSignMagnitude(value, 7)
self._writeReg('RXAQ', 'DC_RXAQ<6:0>', val)
self._writeReg('RXAQ', 'DCWR_RXAQ', 0)
self._writeReg('RXAQ', 'DCWR_RXAQ', 1)
self._writeReg('RXAQ', 'DCWR_RXAQ', 0)
else:
raise ValueError("Bitfield RXAQ is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_RXBI (0x05C9)
#
@property
def DC_RXBI(self):
"""
Get the value of DC_RXBI
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('RXBI', 'DCRD_RXBI', 0)
self._writeReg('RXBI', 'DCRD_RXBI', 1)
self._writeReg('RXBI', 'DCRD_RXBI', 0)
val = self._readReg('RXBI', 'DC_RXBI<6:0>')
return self.signMagnitudeToInt(val, 7)
else:
raise ValueError("Bitfield DC_RXBI is not supported on chip version "+str(self.chip.chipID))
@DC_RXBI.setter
def DC_RXBI(self, value):
"""
Set the value of DC_RXBI
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-63<= value <=63):
raise ValueError("Value must be [-63..63]")
val = self.intToSignMagnitude(value, 7)
self._writeReg('RXBI', 'DC_RXBI<6:0>', val)
self._writeReg('RXBI', 'DCWR_RXBI', 0)
self._writeReg('RXBI', 'DCWR_RXBI', 1)
self._writeReg('RXBI', 'DCWR_RXBI', 0)
else:
raise ValueError("Bitfield RXBI is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_RXBQ (0x05CA)
#
@property
def DC_RXBQ(self):
"""
Get the value of DC_RXBQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('RXBQ', 'DCRD_RXBQ', 0)
self._writeReg('RXBQ', 'DCRD_RXBQ', 1)
self._writeReg('RXBQ', 'DCRD_RXBQ', 0)
val = self._readReg('RXBQ', 'DC_RXBQ<6:0>')
return self.signMagnitudeToInt(val, 7)
else:
raise ValueError("Bitfield DC_RXBQ is not supported on chip version "+str(self.chip.chipID))
@DC_RXBQ.setter
def DC_RXBQ(self, value):
"""
Set the value of DC_RXBQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-63<= value <=63):
raise ValueError("Value must be [-63..63]")
val = self.intToSignMagnitude(value, 7)
self._writeReg('RXBQ', 'DC_RXBQ<6:0>', val)
self._writeReg('RXBQ', 'DCWR_RXBQ', 0)
self._writeReg('RXBQ', 'DCWR_RXBQ', 1)
self._writeReg('RXBQ', 'DCWR_RXBQ', 0)
else:
raise ValueError("Bitfield RXBQ is not supported on chip version "+str(self.chip.chipID))
# DC_RXCDIV<7:0>
@property
def DC_RXCDIV(self):
"""
Get the value of DC_RXCDIV<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CLKDIV', 'DC_RXCDIV<7:0>')
else:
raise ValueError("Bitfield DC_RXCDIV<7:0> is not supported on chip version "+str(self.chip.chipID))
@DC_RXCDIV.setter
def DC_RXCDIV(self, value):
"""
Set the value of DC_RXCDIV<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('CLKDIV', 'DC_RXCDIV<7:0>', value)
else:
raise ValueError("Bitfield DC_RXCDIV<7:0> is not supported on chip version "+str(self.chip.chipID))
# DC_TXCDIV<7:0>
@property
def DC_TXCDIV(self):
"""
Get the value of DC_TXCDIV<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CLKDIV', 'DC_TXCDIV<7:0>')
else:
raise ValueError("Bitfield DC_TXCDIV<7:0> is not supported on chip version "+str(self.chip.chipID))
@DC_TXCDIV.setter
def DC_TXCDIV(self, value):
"""
Set the value of DC_TXCDIV<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('CLKDIV', 'DC_TXCDIV<7:0>', value)
else:
raise ValueError("Bitfield DC_TXCDIV<7:0> is not supported on chip version "+str(self.chip.chipID))
# HYSCMP_RXB<2:0>
@property
def HYSCMP_RXB(self):
"""
Get the value of HYSCMP_RXB<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('HYSTCFG', 'HYSCMP_RXB<2:0>')
else:
raise ValueError("Bitfield HYSCMP_RXB<2:0> is not supported on chip version "+str(self.chip.chipID))
@HYSCMP_RXB.setter
def HYSCMP_RXB(self, value):
"""
Set the value of HYSCMP_RXB<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0 <= value <= 7):
raise ValueError("Value must be [0..7]")
self._writeReg('HYSTCFG', 'HYSCMP_RXB<2:0>', value)
else:
raise ValueError("Bitfield HYSCMP_RXB<2:0> is not supported on chip version "+str(self.chip.chipID))
# HYSCMP_RXA<2:0>
@property
def HYSCMP_RXA(self):
"""
Get the value of HYSCMP_RXA<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('HYSTCFG', 'HYSCMP_RXA<2:0>')
else:
raise ValueError("Bitfield HYSCMP_RXA<2:0> is not supported on chip version "+str(self.chip.chipID))
@HYSCMP_RXA.setter
def HYSCMP_RXA(self, value):
"""
Set the value of HYSCMP_RXA<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0 <= value <= 7):
raise ValueError("Value must be [0..7]")
self._writeReg('HYSTCFG', 'HYSCMP_RXA<2:0>', value)
else:
raise ValueError("Bitfield HYSCMP_RXA<2:0> is not supported on chip version "+str(self.chip.chipID))
# HYSCMP_TXB<2:0>
@property
def HYSCMP_TXB(self):
"""
Get the value of HYSCMP_TXB<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('HYSTCFG', 'HYSCMP_TXB<2:0>')
else:
raise ValueError("Bitfield HYSCMP_TXB<2:0> is not supported on chip version "+str(self.chip.chipID))
@HYSCMP_TXB.setter
def HYSCMP_TXB(self, value):
"""
Set the value of HYSCMP_TXB<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0 <= value <= 7):
raise ValueError("Value must be [0..7]")
self._writeReg('HYSTCFG', 'HYSCMP_TXB<2:0>', value)
else:
raise ValueError("Bitfield HYSCMP_TXB<2:0> is not supported on chip version "+str(self.chip.chipID))
# HYSCMP_TXA<2:0>
@property
def HYSCMP_TXA(self):
"""
Get the value of HYSCMP_TXA<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('HYSTCFG', 'HYSCMP_TXA<2:0>')
else:
raise ValueError("Bitfield HYSCMP_TXA<2:0> is not supported on chip version "+str(self.chip.chipID))
@HYSCMP_TXA.setter
def HYSCMP_TXA(self, value):
"""
Set the value of HYSCMP_TXA<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0 <= value <= 7):
raise ValueError("Value must be [0..7]")
self._writeReg('HYSTCFG', 'HYSCMP_TXA<2:0>', value)
else:
raise ValueError("Bitfield HYSCMP_TXA<2:0> is not supported on chip version "+str(self.chip.chipID))
| 2.4375
| 2
|
salts/migrations/0030_auto_20160712_1511.py
|
sputnik-load/salts
| 1
|
12774270
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('salts', '0029_shooting_ticket_id'),
]
operations = [
migrations.RenameField('TestResult', 'test_id', 'session_id'),
migrations.RenameField('Shooting', 'test_id', 'session_id')
]
| 1.578125
| 2
|
UnityEngine/ParticleSystemRingBufferMode/__init__.py
|
Grim-es/udon-pie-auto-completion
| 0
|
12774271
|
<gh_stars>0
from UdonPie import UnityEngine
from UdonPie.Undefined import *
class ParticleSystemRingBufferMode:
def __new__(cls, arg1=None):
'''
:returns: ParticleSystemRingBufferMode
:rtype: UnityEngine.ParticleSystemRingBufferMode
'''
pass
| 1.929688
| 2
|
3/sender.py
|
MrRezoo/rabbitmq-python
| 1
|
12774272
|
import pika
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
ch = connection.channel()
ch.exchange_declare(exchange='logs', exchange_type='fanout')
ch.basic_publish(exchange='logs', routing_key='', body='this is testing fanout')
print('message sent')
connection.close()
| 2.140625
| 2
|
nameko/dependency_providers.py
|
vlcinsky/nameko
| 3,425
|
12774273
|
""" Nameko built-in dependencies.
"""
from nameko.extensions import DependencyProvider
class Config(DependencyProvider):
""" Dependency provider for accessing configuration values.
"""
def get_dependency(self, worker_ctx):
return self.container.config.copy()
| 1.6875
| 2
|
ornl/MasterNode-and-ModelNode-Agents/ModelNode/modelnode/agent.py
|
ChargePoint/volttron-applications
| 0
|
12774274
|
<filename>ornl/MasterNode-and-ModelNode-Agents/ModelNode/modelnode/agent.py
# Copyright (c) 2014 Oak Ridge National Laboratory Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sellcopies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import logging
import sys
import numpy
import time
import json
import random
import uuid
import gevent
from zmq.utils import jsonapi
from volttron.platform.vip.agent import *
from volttron.platform.agent.base_historian import BaseHistorian
from volttron.platform.agent import utils
from volttron.platform.messaging import topics, headers as headers_mod
utils.setup_logging()
Log = logging.getLogger(__name__)
def enum(**enums):
return type('Enum', (), enums)
class ModelNode(Agent):
def __init__(self, config_path, **kwargs):
super(ModelNode, self).__init__(**kwargs)
self.Config = utils.load_config(config_path)
self.AgentStatesEnum = enum(
OFF = 0,
HEATING_STAGE_ONE = 6,
HEATING_STAGE_TWO = 3,
COOLING_STAGE_ONE = -3,
COOLING_STAGE_TWO = -6
)
self.initTimeStamp = time.time()
@Core.receiver('onsetup')
def setup(self, sender, **kwargs):
self.agentID = str(uuid.uuid4())
self.setPoint = self.Config["setPoint"]
self.modelNodePlatform = self.Config["modelnodeplatform"]
# generate initial room temperature +- 2 degrees from setpoint
self.x0 = self.setPoint + numpy.random.randn()
# set initial state
# Note that for the purposes of this experiment, only the cooling condition is used.
if self.x0 > self.setPoint:
self.u0 = -3
self.SetCool(self.AgentStatesEnum.COOLING_STAGE_ONE)
else:
self.u0 = 0
self.SetOff()
@Core.receiver('onstart')
def startup(self, sender, **kwargs):
self.RegisterWithMasterNode()
def RegisterWithMasterNode(self):
msg = {}
msg['ID'] = self.agentID
msg['xref'] = self.setPoint
msg['x0'] = self.x0
msg['platform'] = self.modelNodePlatform
headers = {headers_mod.FROM: self.agentID}
# headers[headers_mod.CONTENT_TYPE] = headers_mod.CONTENT_TYPE.JSON
# self.publish( topics.BUILDING_SEND(campus='ORNL', building='masternode',
# topic='modelnode/register'),
# headers, json.dumps(msg) )
#
self.vip.pubsub.publish(
'pubsub', topic='modelnode/register', headers=headers, message=msg)
def SetOff(self):
self.agentState = self.AgentStatesEnum.OFF
def SetCool(self, stage):
if stage == self.AgentStatesEnum.COOLING_STAGE_ONE:
self.agentState = self.AgentStatesEnum.COOLING_STAGE_ONE
elif stage == self.AgentStatesEnum.COOLING_STAGE_TWO:
self.agentState = self.AgentStatesEnum.COOLING_STAGE_TWO
else:
Log.error(self, "Invalid cooling command/argument")
def SetHeat(self, stage):
if stage == self.AgentStatesEnum.HEATING_STAGE_ONE:
self.agentState = self.AgentStatesEnum.HEATING_STAGE_ONE
elif stage == self.AgentStatesEnum.HEATING_STAGE_TWO:
self.agentState = self.AgentStatesEnum.HEATING_STAGE_TWO
else:
Log.error(self, "Invalid heating command/argument")
# every 2 minutes??
@Core.periodic(120)
def HeartBeat(self):
msg = {}
msg['ID'] = self.agentID
msg['xref'] = self.setPoint
# Ideally, this will contain current temperature of the bldg/zone
# In this experiment, the Master Node calculates this solving the ODE
headers = {headers_mod.FROM: self.agentID}
# headers[headers_mod.CONTENT_TYPE] = headers_mod.CONTENT_TYPE.JSON
# self.publish( topics.BUILDING_SEND(campus='ORNL', building='masternode',
# topic='modelnode/channels'),
# headers, json.dumps(msg) )
self.vip.pubsub.publish(
'pubsub', topic='modelnode/channels', headers=headers, message=msg)
@PubSub.subscribe('pubsub',"masternode/command")
def ProcessIncomingMessage(self, peer, sender, bus, topic, headers, message):
msg = message
if msg['ID'] == self.agentID:
value = msg['action']
if value == 0:
self.SetOff()
Log.info("OFF")
elif value == -3:
self.SetCool(self.AgentStatesEnum.COOLING_STAGE_ONE)
Log.info("COOL STAGE 1")
elif value == -6:
self.SetCool(self.AgentStatesEnum.COOLING_STAGE_TWO)
Log.info("COOL STAGE 2")
else:
Log.error("Invalid command received")
# Note that the heating condition is not considered here
def main(argv=sys.argv):
try:
utils.vip_main(ModelNode)
except Exception as e:
Log.exception('unhandled exception')
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| 1.875
| 2
|
get_autologin/tests.py
|
mazlumagar/django-get-autologin
| 1
|
12774275
|
from django.contrib.auth.models import AnonymousUser
from django.test import TestCase, RequestFactory, Client
from django.contrib.auth import get_user_model
from django.urls.base import reverse
from django.conf import settings
from .models import Token
from .views import user_auth
UserModel = get_user_model()
class GetAutoAuthTestCase(TestCase):
def setUp(self):
self.client = Client()
self.factory = RequestFactory()
self.user = UserModel.objects.create(username="jacob", email="<EMAIL>", password="<PASSWORD>")
self.url_token = Token.objects.create(user=self.user)
def test_auth(self):
auth_url = "{0}?token={1}".format(reverse('get_autologin:auth'), self.url_token.token)
request = self.factory.get(auth_url)
request.user = AnonymousUser()
request.session = self.client.session
response = user_auth(request)
response.client = self.client
self.assertRedirects(response, settings.LOGIN_REDIRECT_URL, status_code=302, target_status_code=200)
| 2.375
| 2
|
examples/rl_dqgnn/plot_test_curve.py
|
Sirui-Xu/Arena
| 1
|
12774276
|
from matplotlib import pyplot as plt
import pickle
import numpy as np
import os,sys
'''
results = []
for i in range(10):
with open(f'/home/yiran/pc_mapping/arena-v2/examples/bc_saved_models/refactor_success_max_mine/run{i}/test_result.npy', 'rb') as f:
result_i = pickle.load(f)
result_number = [v for (k,v) in result_i.items()]
results.append(result_number)
results = np.array(results)
result_mean = results.mean(axis=0)
result_std = results.std(axis=0)
print(result_mean, result_std)
exit()
'''
x = [1,3,5,7,9,11,13,15,17,19]
y_ub = np.arange(1,21,2)
y_heuristic = [1.0, 3.0, 4.99, 6.83, 8.53, 9.46, 10.89, 12.3, 13.69, 13.64]
y_DDQN = [0.99, 2.88, 4.78, 3.82, 2.37, 2.14, 1.35, 1.01, 0.91,1.09]
y_refactor_max = [0.98, 2.86, 4.64, 5.67, 5.81, 5.82, 5.35, 5.07, 3.34, 3.11]
y_refactor_success_max = [0.99, 3.0, 4.94, 6.55, 7.74, 8.47, 8.48, 7.72, 7.29, 5.85]
y_refactor_purify_10of10_max = [1.0, 2.97, 4.85, 6.76, 8.05, 8.42, 8.66, 8.03, 7.58, 5.65]
y_refactor_purify_9of10_max = [1.0, 3.0, 4.94, 6.69, 8.27, 9.27, 9.3, 8.88, 8.87, 7.94]
y_refactor_purify_8of10_max = [1.0, 3.0, 4.95, 6.76, 7.68, 8.14, 8.11, 8.18, 6.99, 5.09]
y_refactor_purify_7of10_max = [1.0, 3.0, 4.93, 6.91, 8.32, 9.46, 10.64, 11.7, 11.81, 10.86]
y_refactor_purify_6of10_max = [1.0, 2.97, 4.93, 6.78, 8.35, 9.87, 10.78, 11.29, 12.0, 11.09]
y_refactor_purify_5of10_max = [1.0, 2.94, 5.0, 6.59, 8.28, 8.96, 10.22, 10.34, 10.93, 10.56]
y_refactor_purify_4of10_max = [1.0, 2.97, 5.0, 6.79, 8.16, 9.27, 8.16, 7.82, 7.47, 6.02]
y_refactor_purify_3of10_max = [1.0, 2.95, 4.96, 6.56, 7.96, 9.14, 8.64, 7.64, 7.36, 4.54]
y_refactor_purify_2of10_max = [1.0, 3.0, 4.95, 6.75, 8.32, 9.49, 9.55, 9.73, 9.75, 8.04]
y_refactor_purify_1of10_max = [1.0, 2.97, 4.96, 6.75, 7.92, 8.09, 7.92, 6.62, 5.85, 4.7]
plt.xlabel('number of coins')
plt.ylabel('collected coins (mean of 100 runs)')
plt.xlim(0, 19)
plt.xticks(np.arange(1,21,2))
plt.ylim(0, 19)
plt.yticks(np.arange(1,21,2))
plt.plot(x, y_ub, label='max score')
plt.plot(x, y_heuristic, label='IL heuristic')
plt.plot(x, y_refactor_purify_6of10_max, label='IL purify')
plt.plot(x, y_refactor_success_max, label='IL successful traj')
plt.plot(x, y_refactor_max, label='IL all traj')
plt.plot(x, y_DDQN, label='DoubleDQN')
plt.legend(loc='upper left')
plt.show()
| 2.328125
| 2
|
src/mandelbrot.py
|
poseen/pyMandelbrot
| 0
|
12774277
|
#!/usr/bin/python
"""
This is the main python file to run.
"""
import sys
from application import Application
# -- Functions ---------------------------------------------------------------
def main():
"""
The main function.
"""
app = Application(sys.argv)
app.run()
# -- Main entry point --------------------------------------------------------
if __name__ == "__main__":
main()
| 2.9375
| 3
|
python_developer_tools/cv/detection/CenterNet2/__init__.py
|
carlsummer/python_developer_tools
| 32
|
12774278
|
# !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:5/18/2021 11:16 AM
# @File:__init__.py
| 1.09375
| 1
|
bomber_monkey/features/display/score_display_system.py
|
MonkeyPatchIo/bomber-monkey
| 0
|
12774279
|
<reponame>MonkeyPatchIo/bomber-monkey<gh_stars>0
import pygame as pg
from bomber_monkey.features.player.player import Player
from bomber_monkey.game_config import GameConfig, GAME_FONT
from bomber_monkey.utils.vector import Vector
from python_ecs.ecs import System, Simulator
FONT_SIZE = 35
MARGIN = 5
class PlayerScoreDisplaySystem(System):
def __init__(self, screen):
super().__init__([Player])
self.screen = screen
self.font = pg.font.Font(GAME_FONT, FONT_SIZE)
def update(self, sim: Simulator, dt: float, player: Player) -> None:
conf: GameConfig = sim.context.conf
text = self.font.render(str(sim.context.scores.scores[player.player_id]), 1, player.color)
board_pos = player.slot.start_pos
pos_x = Vector.create(0, 0)
if board_pos.x < 0:
pos_x = conf.pixel_size.x - FONT_SIZE - MARGIN
else:
pos_x = MARGIN
if board_pos.y < 0:
pos_y = conf.playground_offset.y - FONT_SIZE - MARGIN
else:
pos_y = MARGIN
self.screen.blit(text, (pos_x, pos_y))
| 2.59375
| 3
|
py/bitbox02/bitbox02/util.py
|
conte91/bitbox02-firmware
| 0
|
12774280
|
<reponame>conte91/bitbox02-firmware<filename>py/bitbox02/bitbox02/util.py
# Copyright 2019 Shift Cryptosecurity AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Useful functions"""
from typing import Optional, List, Tuple
from pathlib import Path
import json
import os
import binascii
import base58
from .bitbox02 import common
from .communication import bitbox_api_protocol
def parse_xpub(xpub: str) -> common.XPub:
"""
Parse an xpub to a protobuf XPub.
The version is stripped, so the xpub can be any format (xpub, ypub, etc.).
"""
decoded = base58.b58decode_check(xpub)
decoded = decoded[4:]
depth, decoded = decoded[:1], decoded[1:]
parent_fp, decoded = decoded[:4], decoded[4:]
child_num, decoded = decoded[:4], decoded[4:]
chain_code, decoded = decoded[:32], decoded[32:]
pubkey, decoded = decoded[:33], decoded[33:]
assert len(decoded) == 0
return common.XPub(
depth=depth,
parent_fingerprint=parent_fp,
child_num=int.from_bytes(child_num, "big"),
chain_code=chain_code,
public_key=pubkey,
)
class UserCache:
"""Data structure to hold keys"""
# pylint: disable=too-few-public-methods
def __init__(self, raw_cache: Optional[str] = None):
if raw_cache is None:
self.app_static_privkey = None
self.device_static_pubkeys: List[bytes] = []
return
(privkey, pubkeys) = UserCache.deserialize(raw_cache)
self.app_static_privkey = privkey
self.device_static_pubkeys = pubkeys
def serialize(self) -> str:
"""Serialize struct to string"""
pubkeys = [binascii.hexlify(x).decode("utf-8") for x in self.device_static_pubkeys]
privkey = None
if self.app_static_privkey is not None:
privkey = binascii.hexlify(self.app_static_privkey).decode("utf-8")
return json.dumps({"device_static_pubkeys": pubkeys, "app_static_privkey": privkey})
@staticmethod
def deserialize(raw: str) -> Tuple[Optional[bytes], List[bytes]]:
"""Deserialize content from disk to struct"""
try:
data = json.loads(raw)
privkey = None
if data["app_static_privkey"] is not None:
privkey = binascii.unhexlify(data["app_static_privkey"])
pubkeys = [binascii.unhexlify(x) for x in data["device_static_pubkeys"]]
return (privkey, pubkeys)
except json.JSONDecodeError:
return (None, [])
except KeyError:
return (None, [])
class NoiseConfigUserCache(bitbox_api_protocol.BitBoxNoiseConfig):
"""A noise config that stores the keys in a file in XDG_CACHE_HOME or ~/.cache"""
def __init__(self, appid: str) -> None:
"""
Args:
appid: A string that uniqely identifies your application. It will be used as the name
of the cache directory. Directory separators will create subdirectories, e.g.
"shift/test1".
"""
self._cache_file_path = NoiseConfigUserCache._find_cache_file(appid)
super().__init__()
@staticmethod
def _find_cache_file(appid: str) -> Path:
cachedir_env = os.environ.get("XDG_CACHE_HOME", "")
if cachedir_env == "":
homedir = os.environ.get("HOME", "")
if homedir == "":
raise RuntimeError("Can't find cache dir")
cachedir = Path(homedir) / ".cache"
else:
cachedir = Path(cachedir_env)
return cachedir / appid / "bitbox02.dat"
def _read_cache(self) -> UserCache:
try:
with self._cache_file_path.open("r") as fileh:
return UserCache(fileh.read())
except FileNotFoundError:
return UserCache()
def _write_cache(self, data: UserCache) -> None:
self._cache_file_path.parent.mkdir(parents=True, exist_ok=True)
with self._cache_file_path.open("w") as fileh:
fileh.write(data.serialize())
def contains_device_static_pubkey(self, pubkey: bytes) -> bool:
data = self._read_cache()
if pubkey in data.device_static_pubkeys:
return True
return False
def add_device_static_pubkey(self, pubkey: bytes) -> None:
if not self.contains_device_static_pubkey(pubkey):
data = self._read_cache()
data.device_static_pubkeys.append(pubkey)
self._write_cache(data)
def get_app_static_privkey(self) -> Optional[bytes]:
data = self._read_cache()
return data.app_static_privkey
def set_app_static_privkey(self, privkey: bytes) -> None:
data = self._read_cache()
data.app_static_privkey = privkey
self._write_cache(data)
| 2.03125
| 2
|
project03/main.py
|
PatrickKalkman/pirplepython
| 0
|
12774281
|
<reponame>PatrickKalkman/pirplepython
"""
Python Is Easy course @P<EMAIL>
Project #3: Pick a Card Game!
<NAME> / <EMAIL>
Details:
Everyone has their favorite card game. What's yours? For this assignment,
choose a card game (other than Blackjack), and turn it into a Python program.
It doesn't matter if it's a 1-player game, or a 2 player game, or more!
That's totally up to you. A few requirements:
It's got to be a card game (no board games, etc)
When the game starts up, you should ask for the players' names.
And after they enter their names, your game should refer to them by name only.
("It's John's turn" instead of "It's player 1's turn).
At any point during the game, someone should be able to type "--help" to be
taken to a screen where they can read the rules of the game and instructions
for how to play. After they're done reading, they should be able to type
"--resume" to go back to the game and pick up where they left off.
Extra Credit:
Want to make this much much harder on yourself? Okay, you asked for it!
For extra credit, allow 2 players to play on two different computers that are
on the same network. Two people should be able to start identical versions of
your program, and enter the internal IP address of the user on the network who
they want to play against. The two applications should communicate with each
other, across the network using simple HTTP requests. Try this library to send
requests:
http://docs.python-requests.org/en/master/
http://docs.python-requests.org/en/master/user/quickstart/
And try Flask to receive them:
http://flask.pocoo.org/
The 2-player game should only start if one person has challenged the other
(by entering their internal IP address), and the 2nd person has accepted
the challenge. The exact flow of the challenge mechanism is up to you.
"""
from enum import Enum
from game import Deck, Player, Rules, Stack
class GameEngine:
class States(Enum):
INIT = 1
PLAYING = 2
FINISHED = 3
def __init__(self):
self.state = self.States.INIT
self.current_player_index = 0
self.players = []
self.deck = Deck()
self.rules = Rules()
self.stack = Stack()
self.cheat_card = ""
def initialize_game(self):
self.deck.build()
self.deck.shuffle()
self.rules.read_rules()
def create_players(self):
number_players = int(input("Enter the number of players? "))
for player_number in range(1, number_players + 1):
player_question = f"Enter the name of player{player_number}? "
name = input(player_question)
self.players.append(Player(name))
def current_player(self):
return self.players[self.current_player_index]
def deal_cards(self):
player_index = 0
for card in self.deck.cards:
self.players[player_index].hand.append(card)
player_index += 1
if player_index >= len(self.players):
player_index = 0
def next_player(self):
if self.current_player().no_more_cards():
print(f"{self.current_player().name} won the game!!!!!")
self.state = self.States.FINISHED
self.current_player_index += 1
if self.current_player_index >= len(self.players):
self.current_player_index = 0
def previous_player(self):
if self.current_player_index > 0:
return self.players[self.current_player_index - 1]
else:
return self.players[len(self.players) - 1]
def print_rules(self):
print(self.rules)
def game_loop(self):
while self.state != self.States.FINISHED:
if self.state == self.States.INIT:
self.create_players()
self.deal_cards()
self.state = self.States.PLAYING
print(f"{self.current_player().name} it is your turn")
print(self.stack)
print(self.current_player())
command = input((f"What do you want to do?" " (help, playcard, cheater) "))
if command == "help":
self.print_rules()
elif command == "playcard":
call_card = input("Which card do you want to play? ")
if self.current_player().has_card(call_card):
card = self.current_player().get_card(call_card)
self.stack.add_card(card)
self.cheat_card = input(
("What card do you " "want to say you " "played? ")
)
self.next_player()
else:
print("You don't have that card")
elif command == "cheater":
lastcard = self.stack.get_last_card()
print(f"Last card was: {lastcard}")
if self.cheat_card == str(lastcard):
print(
(
f"No, {self.previous_player().name} did not cheat, "
"you will get all the played cards"
)
)
played_cards = self.stack.get_cards()
self.current_player().add(played_cards)
self.stack.clear()
else:
print(
(
f"Yes, you are right {self.previous_player().name} "
f"cheated. {self.previous_player().name} will get "
"all played cards"
)
)
played_cards = self.stack.get_cards()
self.previous_player().add(played_cards)
self.stack.clear()
def start_game(self):
self.initialize_game()
self.game_loop()
game = GameEngine()
game.start_game()
| 4.15625
| 4
|
django/contrib/formtools/wizard/storage/exceptions.py
|
pomarec/django
| 285
|
12774282
|
<reponame>pomarec/django
from django.core.exceptions import ImproperlyConfigured
class MissingStorage(ImproperlyConfigured):
pass
class NoFileStorageConfigured(ImproperlyConfigured):
pass
| 1.609375
| 2
|
code/src.py
|
oShadow05/ftp_most_update_files
| 0
|
12774283
|
import subprocess
import ftplib
import os
import time
from hide_data import *
from datetime import datetime
# Creazione delle cartelle nominate per giorno, mese, anno, ora, minuti, secondi
def create_path_folder(init_path):
day = time.strftime("%d", time.localtime())
month = time.strftime("%m", time.localtime())
year = time.strftime("%y", time.localtime())
ora = time.strftime("%H", time.localtime())
minuti = time.strftime("%M", time.localtime())
secondi = time.strftime("%S", time.localtime())
# Creazione delle cartella a seconda dell'orario
if ora == "21" and minuti == "30" and secondi == "00":
path_for_G = init_path + day + "-" + month + "-" + year + "-Ora" + ora + "-minuti" + minuti + "-secondi" + secondi + " Backup completo"
else:
path_for_G = init_path + day + "-" + month + "-" + year + "-Ora" + ora + "-minuti" + minuti + "-secondi" + secondi
return path_for_G
def check_if_connection_exist(net_path, password):
try:
os.chdir("G:\ICT")
except:
# Il primo richiamo a NET USE serve per creare una connessione al server
call_to_server2003_with_CMD = " NET USE " + net_path + " " + password
subprocess.Popen(call_to_server2003_with_CMD, stdout=subprocess.PIPE, shell=True)
# Il secondo richiamo a NET USE crea un'unità virtuale dove andar a scrivere i dati o elaborare dati
call_to_server2003_with_CMD = " NET USE G: " + net_path + " " + password
subprocess.Popen(call_to_server2003_with_CMD, stdout=subprocess.PIPE, shell=True)
# --> FINE DELLA CREAZIONE DELLA COMUNICAZIONE TRA SERVER E DATI
# Cache date e Cache files per download files solo se i files vengono modificati
date = [
# DATA DI CREAZIONE DEI FILES
]
files = [
# NOMI DEI FILES
]
#--> Fine Cache
def download_files(ip, user, password, path_files, path_for_G):
ftp = ftplib.FTP(ip)
ftp.login(user, password)
ftp.cwd(path_files)
filenames = ftp.nlst()
for filename in filenames:
file_time = ftp.sendcmd("MDTM " + filename)
file_convert_time = datetime.strptime(file_time[4:], "%Y%m%d%H%M%S").strftime("%Y-%m-%d %H.%M")
for e in range(len(date)):
if files[e] == filename:
print(filename, "\n")
print(date[e], "\n")
if file_convert_time > date[e]:
local_filename = os.path.join(path_for_G, filename)
file = open(local_filename, 'wb')
ftp.retrbinary('RETR ' + filename, file.write)
file.close()
date[e] = file_convert_time
print("agg", "\n", date[e], "\n")
else:
print("il file è nella versione più recente")
try:
time.sleep(3)
os.rmdir(path_for_G)
except:
print("file in folder")
ftp.quit()
| 2.515625
| 3
|
mapscraper/google.py
|
Armadillomon/google-trips
| 0
|
12774284
|
<reponame>Armadillomon/google-trips
import re
import locale
import datetime
import time
import io
import os
from selenium import webdriver
from selenium.webdriver.support import expected_conditions
from PIL import Image
import mapscraper.metrics
from .captions import *
class GoogleDateParser:
PATTERN = r"(\w+),\s+(\d{1,2})\s+(\w+)\s+(\d+)"
_locale = locale.getdefaultlocale()[0]
locale.setlocale(locale.LC_TIME, _locale)
WEEKDAYS = { time.strftime("%A", datetime.date(1970, 1, 4 + i).timetuple()).upper(): i for i in range(0, 7) }
if _locale == "pl_PL": MONTHS = { "STYCZNIA": 1, "LUTEGO": 2, "MARCA": 3, "KWIETNIA": 4, "MAJA": 5, "CZERWCA": 6, "LIPCA": 7, "SIERPNIA": 8, "WRZEŚNIA": 9, "PAŹDZIERNIKA": 10, "LISTOPADA": 11, "GRUDNIA": 12 }
else: MONTHS = { time.strftime("%B", datetime.date(1970, 1 + i, 1).timetuple()).upper(): i+1 for i in range(0, 12) }
@classmethod
def parse(cls, string):
result = re.match(cls.PATTERN, string.upper())
if result:
weekday = cls.WEEKDAYS[result[1]]
day = int(result[2])
month = cls.MONTHS[result[3]]
year = int(result[4])
return datetime.date(year, month, day)
else: return None
class GoogleMap:
def __init__(self):
self.date = None
self.image = None
@classmethod
def from_bytes(self, png):
map = self()
map.image = Image.open(io.BytesIO(png))
map.size = map.image.size
return map
def add_date_caption(self, font, size, padding):
self._date_caption = DateCaption(self.date, font, size, padding)
self._date_caption.add_to_img(self.image)
def add_annotation(self, type: str, annotation: str or os.PathLike, **kwargs):
if type == "text":
font = kwargs["font"]
size = kwargs["size"]
padding = size
caption = Caption(annotation, font, size, padding)
caption.add_to_img(self.image, (self._date_caption.position[0] + self._date_caption.length, self._date_caption.position[1]))
elif type == "icon":
symbol = AnnotationSymbol(annotation)
symbol.add_to_img(self.image, self._align_symbol(symbol))
else: raise ArgumentError("Invalid annotation type")
def _align_symbol(self, symbol: AnnotationSymbol):
return (self._date_caption.bbox[2], (self._date_caption.bbox[1] + self._date_caption.bbox[3] - symbol.size[1])//2)
def save(self, filename):
if os.path.isdir(filename): filename = os.path.join(filename, f"{map.date.strftime('%Y%m%d')}.png")
self.image.save(filename)
def close(self):
self.image.close()
class MapControls:
ZOOMIN_SELECTOR = "#map > div > div > div:nth-child(13) > div > div > div > button:nth-child(1)"
ZOOMOUT_SELECTOR = "#map > div > div > div:nth-child(13) > div > div > div > button:nth-child(3)"
MAP_SELECTOR = "#map > div > div > div:nth-child(16) > div:nth-child(2) > div:nth-child(1) > button"
SATELLITE_SELECTOR = "#map > div > div > div:nth-child(16) > div:nth-child(2) > div:nth-child(2) > button"
GOOGLE_ACCOUNT_SETTINGS = "#gb > div"
GOOGLE_MAP_SETTINGS = "#map > div > div > div:nth-child(16) > div.map-controls-container"
GOOGLE_ZOOM = "#map > div > div > div:nth-child(13) > div > div"
GOOGLE_LAYER = "#map > div > div > div:nth-child(16) > div:nth-child(2)"
def __init__(self, driver):
self._driver = driver
self._wait = webdriver.support.wait.WebDriverWait(self._driver, 60)
self.controls = {}
self.overlay = []
self._display = []
self._initialize_controls(
("zoomin", self.ZOOMIN_SELECTOR),
("zoomout", self.ZOOMOUT_SELECTOR),
("toggle_map", self.MAP_SELECTOR),
("toggle_satellite", self.SATELLITE_SELECTOR)
)
self._initialize_overlay(
self.GOOGLE_ACCOUNT_SETTINGS,
self.GOOGLE_MAP_SETTINGS,
self.GOOGLE_ZOOM,
self.GOOGLE_LAYER
)
def _initialize_controls(self, *controls):
for control in controls:
self._wait.until(expected_conditions.visibility_of_element_located((webdriver.common.by.By.CSS_SELECTOR, control[1])))
self.controls[control[0]] = control[1]
def _initialize_overlay(self, *selectors):
for selector in selectors:
element = self._wait.until(expected_conditions.visibility_of_element_located((webdriver.common.by.By.CSS_SELECTOR, selector)))
self.overlay.append(element)
self._display.append(element.value_of_css_property("display"))
def hide(self):
self._driver.execute_async_script("""
var callback = arguments[arguments.length - 1];
for (element of arguments[0]) element.style.display = "none";
callback();
""", self.overlay)
def show(self):
self._driver.execute_async_script("""
var callback = arguments[arguments.length - 1];
for (index in arguments[0]) arguments[0][index].style.display = arguments[1][index];
callback();
""", self.overlay, self._display)
def zoom_in(self):
button = self._wait.until(expected_conditions.element_to_be_clickable((webdriver.common.by.By.CSS_SELECTOR, self.controls["zoomin"])))
button.click()
def zoom_out(self):
button = self._wait.until(expected_conditions.element_to_be_clickable((webdriver.common.by.By.CSS_SELECTOR, self.controls["zoomout"])))
button.click()
def toggle_map(self):
button = self._wait.until(expected_conditions.element_to_be_clickable((webdriver.common.by.By.CSS_SELECTOR, self.controls["toggle_map"])))
button.click()
def toggle_satellite(self):
button = self._wait.until(expected_conditions.element_to_be_clickable((webdriver.common.by.By.CSS_SELECTOR, self.controls["toggle_satellite"])))
button.click()
class MapPage:
MAP_CLASS = "map-wrapper"
MAP_SELECTOR = "div.map-wrapper"
FRAME_SELECTOR = "div.map-page-content-wrapper"
DATE_SELECTOR = "#map-page > div.map-page-content-wrapper > div > div > div > div.timeline-header > div > div.timeline-title"
NEXT_SELECTOR = "i.timeline-header-button.next-date-range-button.material-icons-extended.material-icon-with-ripple.rtl-mirrored"
def __init__(self, profile_dir):
browser_options = webdriver.ChromeOptions()
browser_options.add_argument(f"user-data-dir={profile_dir}")
self._driver = webdriver.Chrome(options=browser_options)
def open(self, url):
self._driver.get(url)
self._wait = webdriver.support.wait.WebDriverWait(self._driver, 60)
self.controls = MapControls(self._driver)
def close(self):
self._driver.close()
@property
def frame_width(self):
return self._driver.find_element_by_css_selector(self.FRAME_SELECTOR).size["width"]
@property
def frame_height(self):
return self._driver.find_element_by_css_selector(self.FRAME_SELECTOR).size["height"]
def resize_window(self, map_width, map_height):
window_size = self._driver.get_window_size()
body_size = self._driver.find_element_by_css_selector("body").size
correction = [window_size["width"] - body_size["width"], window_size["height"] - body_size["height"]]
screen = mapscraper.metrics.Screen()
target_width = map_width + self.frame_width + correction[0]
target_height = map_height + correction[1]
if target_width > screen.width or target_height > screen.height: raise RuntimeError("Window cannot be larger than screen resolution")
self._driver.set_window_size(target_width, target_height)
canvas = self._driver.find_element_by_css_selector(self.MAP_SELECTOR)
diff_width = map_width - canvas.size["width"]
diff_height = map_height - canvas.size["height"]
if diff_width or diff_height:
window_size = self._driver.get_window_size()
self._driver.set_window_size(window_size["width"] + diff_width, window_size["height"] + diff_height)
self._driver.refresh()
self.controls = MapControls(self._driver)
def resize_map(self, map_width, map_height):
self._driver.execute_async_script("""
var callback = arguments[arguments.length - 1];
let canvas = document.getElementsByClassName(arguments[0])[0];
canvas.style.width = arguments[1] + "px";
canvas.style.height = arguments[2] + "px";
callback();
""", self.MAP_CLASS, str(map_width), str(map_height))
self.controls = MapControls(self._driver)
def next_map(self):
next_day = self._wait.until(expected_conditions.element_to_be_clickable((webdriver.common.by.By.CSS_SELECTOR, self.NEXT_SELECTOR)))
next_day.click()
@property
def map(self):
canvas = self._driver.find_element_by_css_selector(self.MAP_SELECTOR)
map = GoogleMap.from_bytes(canvas.screenshot_as_png)
map.date = GoogleDateParser.parse(self._driver.find_element_by_css_selector(self.DATE_SELECTOR).text)
return map
| 2.65625
| 3
|
neurokit2/complexity/complexity_hjorth.py
|
BelleJohn/neuropsychology-NeuroKit
| 1
|
12774285
|
import numpy as np
import pandas as pd
def complexity_hjorth(signal):
"""Hjorth's Complexity and Parameters
Hjorth Parameters are indicators of statistical properties used in signal processing in the
time domain introduced by Hjorth (1970). The parameters are activity, mobility, and complexity.
NeuroKit returns complexity directly in the output tuple, but the other parameters can be found
in the dictionary.
- The **complexity** parameter gives an estimate of the bandwidth of the signal, which
indicates the similarity of the shape of the signal to a pure sine wave (where the value
converges to 1). Complexity is define as the ratio of the mobility of the first derivative of
the signal to the mobility of the signal.
- The **mobility** parameter represents the mean frequency or the proportion of standard
deviation of the power spectrum. This is defined as the square root of variance of the first
derivative of the signal divided by the variance of the signal.
- The **activity** parameter is simply the variance of the signal.
See Also
--------
fractal_petrosian
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
Returns
-------
hjorth : float
Hjorth's Complexity.
info : dict
A dictionary containing additional information regarding the parameters used
to compute Hjorth's Complexity.
Examples
----------
>>> import neurokit2 as nk
>>>
>>> signal = nk.signal_simulate(duration=2, frequency=5)
>>>
>>> complexity, info = nk.complexity_hjorth(signal)
>>> complexity #doctest: +SKIP
References
----------
- https://github.com/raphaelvallat/antropy/blob/master/antropy
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Calculate derivatives
dx = np.diff(signal)
ddx = np.diff(dx)
# Calculate variance and its derivatives
x_var = np.var(signal) # = activity
dx_var = np.var(dx)
ddx_var = np.var(ddx)
# Mobility and complexity
mobility = np.sqrt(dx_var / x_var)
complexity = np.sqrt(ddx_var / dx_var) / mobility
return complexity, {"Mobility": mobility, "Activity": x_var}
| 3.46875
| 3
|
sdk/python/pulumi_exoscale/security_group_rule.py
|
secustor/pulumi-exoscale
| 0
|
12774286
|
<gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['SecurityGroupRuleArgs', 'SecurityGroupRule']
@pulumi.input_type
class SecurityGroupRuleArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
cidr: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
end_port: Optional[pulumi.Input[int]] = None,
icmp_code: Optional[pulumi.Input[int]] = None,
icmp_type: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
security_group: Optional[pulumi.Input[str]] = None,
security_group_id: Optional[pulumi.Input[str]] = None,
start_port: Optional[pulumi.Input[int]] = None,
user_security_group: Optional[pulumi.Input[str]] = None,
user_security_group_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a SecurityGroupRule resource.
:param pulumi.Input[str] type: The traffic direction to match (`INGRESS` or `EGRESS`).
:param pulumi.Input[str] cidr: A source (for ingress)/destination (for egress) IP subnet (in [CIDR notation][cidr]) to match (conflicts with `user_security_group`/`security_group_id`).
:param pulumi.Input[str] description: A free-form text describing the Security Group rule purpose.
* `start_port`/`end_port` - A `TCP`/`UDP` port range to match.
* `icmp_type`/`icmp_code` - An ICMP/ICMPv6 [type/code][icmp] to match.
:param pulumi.Input[str] protocol: The network protocol to match. Supported values are: `TCP`, `UDP`, `ICMP`, `ICMPv6`, `AH`, `ESP`, `GRE`, `IPIP` and `ALL`.
:param pulumi.Input[str] security_group: The Security Group name the rule applies to.
:param pulumi.Input[str] security_group_id: The Security Group ID the rule applies to.
:param pulumi.Input[str] user_security_group: A source (for ingress)/destination (for egress) Security Group name to match (conflicts with `cidr`/`security_group_id`).
:param pulumi.Input[str] user_security_group_id: A source (for ingress)/destination (for egress) Security Group ID to match (conflicts with `cidr`/`security_group)`).
"""
pulumi.set(__self__, "type", type)
if cidr is not None:
pulumi.set(__self__, "cidr", cidr)
if description is not None:
pulumi.set(__self__, "description", description)
if end_port is not None:
pulumi.set(__self__, "end_port", end_port)
if icmp_code is not None:
pulumi.set(__self__, "icmp_code", icmp_code)
if icmp_type is not None:
pulumi.set(__self__, "icmp_type", icmp_type)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if security_group is not None:
pulumi.set(__self__, "security_group", security_group)
if security_group_id is not None:
pulumi.set(__self__, "security_group_id", security_group_id)
if start_port is not None:
pulumi.set(__self__, "start_port", start_port)
if user_security_group is not None:
pulumi.set(__self__, "user_security_group", user_security_group)
if user_security_group_id is not None:
pulumi.set(__self__, "user_security_group_id", user_security_group_id)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The traffic direction to match (`INGRESS` or `EGRESS`).
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def cidr(self) -> Optional[pulumi.Input[str]]:
"""
A source (for ingress)/destination (for egress) IP subnet (in [CIDR notation][cidr]) to match (conflicts with `user_security_group`/`security_group_id`).
"""
return pulumi.get(self, "cidr")
@cidr.setter
def cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cidr", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A free-form text describing the Security Group rule purpose.
* `start_port`/`end_port` - A `TCP`/`UDP` port range to match.
* `icmp_type`/`icmp_code` - An ICMP/ICMPv6 [type/code][icmp] to match.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="endPort")
def end_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "end_port")
@end_port.setter
def end_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "end_port", value)
@property
@pulumi.getter(name="icmpCode")
def icmp_code(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "icmp_code")
@icmp_code.setter
def icmp_code(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "icmp_code", value)
@property
@pulumi.getter(name="icmpType")
def icmp_type(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "icmp_type")
@icmp_type.setter
def icmp_type(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "icmp_type", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
The network protocol to match. Supported values are: `TCP`, `UDP`, `ICMP`, `ICMPv6`, `AH`, `ESP`, `GRE`, `IPIP` and `ALL`.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="securityGroup")
def security_group(self) -> Optional[pulumi.Input[str]]:
"""
The Security Group name the rule applies to.
"""
return pulumi.get(self, "security_group")
@security_group.setter
def security_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_group", value)
@property
@pulumi.getter(name="securityGroupId")
def security_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The Security Group ID the rule applies to.
"""
return pulumi.get(self, "security_group_id")
@security_group_id.setter
def security_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_group_id", value)
@property
@pulumi.getter(name="startPort")
def start_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "start_port")
@start_port.setter
def start_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "start_port", value)
@property
@pulumi.getter(name="userSecurityGroup")
def user_security_group(self) -> Optional[pulumi.Input[str]]:
"""
A source (for ingress)/destination (for egress) Security Group name to match (conflicts with `cidr`/`security_group_id`).
"""
return pulumi.get(self, "user_security_group")
@user_security_group.setter
def user_security_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_security_group", value)
@property
@pulumi.getter(name="userSecurityGroupId")
def user_security_group_id(self) -> Optional[pulumi.Input[str]]:
"""
A source (for ingress)/destination (for egress) Security Group ID to match (conflicts with `cidr`/`security_group)`).
"""
return pulumi.get(self, "user_security_group_id")
@user_security_group_id.setter
def user_security_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_security_group_id", value)
@pulumi.input_type
class _SecurityGroupRuleState:
def __init__(__self__, *,
cidr: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
end_port: Optional[pulumi.Input[int]] = None,
icmp_code: Optional[pulumi.Input[int]] = None,
icmp_type: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
security_group: Optional[pulumi.Input[str]] = None,
security_group_id: Optional[pulumi.Input[str]] = None,
start_port: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
user_security_group: Optional[pulumi.Input[str]] = None,
user_security_group_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering SecurityGroupRule resources.
:param pulumi.Input[str] cidr: A source (for ingress)/destination (for egress) IP subnet (in [CIDR notation][cidr]) to match (conflicts with `user_security_group`/`security_group_id`).
:param pulumi.Input[str] description: A free-form text describing the Security Group rule purpose.
* `start_port`/`end_port` - A `TCP`/`UDP` port range to match.
* `icmp_type`/`icmp_code` - An ICMP/ICMPv6 [type/code][icmp] to match.
:param pulumi.Input[str] protocol: The network protocol to match. Supported values are: `TCP`, `UDP`, `ICMP`, `ICMPv6`, `AH`, `ESP`, `GRE`, `IPIP` and `ALL`.
:param pulumi.Input[str] security_group: The Security Group name the rule applies to.
:param pulumi.Input[str] security_group_id: The Security Group ID the rule applies to.
:param pulumi.Input[str] type: The traffic direction to match (`INGRESS` or `EGRESS`).
:param pulumi.Input[str] user_security_group: A source (for ingress)/destination (for egress) Security Group name to match (conflicts with `cidr`/`security_group_id`).
:param pulumi.Input[str] user_security_group_id: A source (for ingress)/destination (for egress) Security Group ID to match (conflicts with `cidr`/`security_group)`).
"""
if cidr is not None:
pulumi.set(__self__, "cidr", cidr)
if description is not None:
pulumi.set(__self__, "description", description)
if end_port is not None:
pulumi.set(__self__, "end_port", end_port)
if icmp_code is not None:
pulumi.set(__self__, "icmp_code", icmp_code)
if icmp_type is not None:
pulumi.set(__self__, "icmp_type", icmp_type)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if security_group is not None:
pulumi.set(__self__, "security_group", security_group)
if security_group_id is not None:
pulumi.set(__self__, "security_group_id", security_group_id)
if start_port is not None:
pulumi.set(__self__, "start_port", start_port)
if type is not None:
pulumi.set(__self__, "type", type)
if user_security_group is not None:
pulumi.set(__self__, "user_security_group", user_security_group)
if user_security_group_id is not None:
pulumi.set(__self__, "user_security_group_id", user_security_group_id)
@property
@pulumi.getter
def cidr(self) -> Optional[pulumi.Input[str]]:
"""
A source (for ingress)/destination (for egress) IP subnet (in [CIDR notation][cidr]) to match (conflicts with `user_security_group`/`security_group_id`).
"""
return pulumi.get(self, "cidr")
@cidr.setter
def cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cidr", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A free-form text describing the Security Group rule purpose.
* `start_port`/`end_port` - A `TCP`/`UDP` port range to match.
* `icmp_type`/`icmp_code` - An ICMP/ICMPv6 [type/code][icmp] to match.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="endPort")
def end_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "end_port")
@end_port.setter
def end_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "end_port", value)
@property
@pulumi.getter(name="icmpCode")
def icmp_code(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "icmp_code")
@icmp_code.setter
def icmp_code(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "icmp_code", value)
@property
@pulumi.getter(name="icmpType")
def icmp_type(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "icmp_type")
@icmp_type.setter
def icmp_type(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "icmp_type", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
The network protocol to match. Supported values are: `TCP`, `UDP`, `ICMP`, `ICMPv6`, `AH`, `ESP`, `GRE`, `IPIP` and `ALL`.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="securityGroup")
def security_group(self) -> Optional[pulumi.Input[str]]:
"""
The Security Group name the rule applies to.
"""
return pulumi.get(self, "security_group")
@security_group.setter
def security_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_group", value)
@property
@pulumi.getter(name="securityGroupId")
def security_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The Security Group ID the rule applies to.
"""
return pulumi.get(self, "security_group_id")
@security_group_id.setter
def security_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_group_id", value)
@property
@pulumi.getter(name="startPort")
def start_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "start_port")
@start_port.setter
def start_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "start_port", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The traffic direction to match (`INGRESS` or `EGRESS`).
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="userSecurityGroup")
def user_security_group(self) -> Optional[pulumi.Input[str]]:
"""
A source (for ingress)/destination (for egress) Security Group name to match (conflicts with `cidr`/`security_group_id`).
"""
return pulumi.get(self, "user_security_group")
@user_security_group.setter
def user_security_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_security_group", value)
@property
@pulumi.getter(name="userSecurityGroupId")
def user_security_group_id(self) -> Optional[pulumi.Input[str]]:
"""
A source (for ingress)/destination (for egress) Security Group ID to match (conflicts with `cidr`/`security_group)`).
"""
return pulumi.get(self, "user_security_group_id")
@user_security_group_id.setter
def user_security_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_security_group_id", value)
class SecurityGroupRule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cidr: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
end_port: Optional[pulumi.Input[int]] = None,
icmp_code: Optional[pulumi.Input[int]] = None,
icmp_type: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
security_group: Optional[pulumi.Input[str]] = None,
security_group_id: Optional[pulumi.Input[str]] = None,
start_port: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
user_security_group: Optional[pulumi.Input[str]] = None,
user_security_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides an Exoscale [Security Group][r-security_group] rule resource. This can be used to create and delete Security Group rules.
## Example Usage
```python
import pulumi
import pulumi_exoscale as exoscale
webservers = exoscale.SecurityGroup("webservers")
# ...
http = exoscale.SecurityGroupRule("http",
security_group_id=webservers.id,
type="INGRESS",
protocol="TCP",
cidr="0.0.0.0/0",
start_port=80,
end_port=80)
```
## Import
An existing Security Group rule can be imported as a resource by `<SECURITY-GROUP-ID>/<SECURITY-GROUP-RULE-ID>`console
```sh
$ pulumi import exoscale:index/securityGroupRule:SecurityGroupRule http eb556678-ec59-4be6-8c54-0406ae0f6da6/846831cb-a0fc-454b-9abd-cb526559fcf9
```
[cidr]https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation [icmp]https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages [r-security_group]security_group.html
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cidr: A source (for ingress)/destination (for egress) IP subnet (in [CIDR notation][cidr]) to match (conflicts with `user_security_group`/`security_group_id`).
:param pulumi.Input[str] description: A free-form text describing the Security Group rule purpose.
* `start_port`/`end_port` - A `TCP`/`UDP` port range to match.
* `icmp_type`/`icmp_code` - An ICMP/ICMPv6 [type/code][icmp] to match.
:param pulumi.Input[str] protocol: The network protocol to match. Supported values are: `TCP`, `UDP`, `ICMP`, `ICMPv6`, `AH`, `ESP`, `GRE`, `IPIP` and `ALL`.
:param pulumi.Input[str] security_group: The Security Group name the rule applies to.
:param pulumi.Input[str] security_group_id: The Security Group ID the rule applies to.
:param pulumi.Input[str] type: The traffic direction to match (`INGRESS` or `EGRESS`).
:param pulumi.Input[str] user_security_group: A source (for ingress)/destination (for egress) Security Group name to match (conflicts with `cidr`/`security_group_id`).
:param pulumi.Input[str] user_security_group_id: A source (for ingress)/destination (for egress) Security Group ID to match (conflicts with `cidr`/`security_group)`).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SecurityGroupRuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an Exoscale [Security Group][r-security_group] rule resource. This can be used to create and delete Security Group rules.
## Example Usage
```python
import pulumi
import pulumi_exoscale as exoscale
webservers = exoscale.SecurityGroup("webservers")
# ...
http = exoscale.SecurityGroupRule("http",
security_group_id=webservers.id,
type="INGRESS",
protocol="TCP",
cidr="0.0.0.0/0",
start_port=80,
end_port=80)
```
## Import
An existing Security Group rule can be imported as a resource by `<SECURITY-GROUP-ID>/<SECURITY-GROUP-RULE-ID>`console
```sh
$ pulumi import exoscale:index/securityGroupRule:SecurityGroupRule http eb556678-ec59-4be6-8c54-0406ae0f6da6/846831cb-a0fc-454b-9abd-cb526559fcf9
```
[cidr]https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation [icmp]https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages [r-security_group]security_group.html
:param str resource_name: The name of the resource.
:param SecurityGroupRuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SecurityGroupRuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cidr: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
end_port: Optional[pulumi.Input[int]] = None,
icmp_code: Optional[pulumi.Input[int]] = None,
icmp_type: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
security_group: Optional[pulumi.Input[str]] = None,
security_group_id: Optional[pulumi.Input[str]] = None,
start_port: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
user_security_group: Optional[pulumi.Input[str]] = None,
user_security_group_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SecurityGroupRuleArgs.__new__(SecurityGroupRuleArgs)
__props__.__dict__["cidr"] = cidr
__props__.__dict__["description"] = description
__props__.__dict__["end_port"] = end_port
__props__.__dict__["icmp_code"] = icmp_code
__props__.__dict__["icmp_type"] = icmp_type
__props__.__dict__["protocol"] = protocol
__props__.__dict__["security_group"] = security_group
__props__.__dict__["security_group_id"] = security_group_id
__props__.__dict__["start_port"] = start_port
if type is None and not opts.urn:
raise TypeError("Missing required property 'type'")
__props__.__dict__["type"] = type
__props__.__dict__["user_security_group"] = user_security_group
__props__.__dict__["user_security_group_id"] = user_security_group_id
super(SecurityGroupRule, __self__).__init__(
'exoscale:index/securityGroupRule:SecurityGroupRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
cidr: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
end_port: Optional[pulumi.Input[int]] = None,
icmp_code: Optional[pulumi.Input[int]] = None,
icmp_type: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
security_group: Optional[pulumi.Input[str]] = None,
security_group_id: Optional[pulumi.Input[str]] = None,
start_port: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
user_security_group: Optional[pulumi.Input[str]] = None,
user_security_group_id: Optional[pulumi.Input[str]] = None) -> 'SecurityGroupRule':
"""
Get an existing SecurityGroupRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cidr: A source (for ingress)/destination (for egress) IP subnet (in [CIDR notation][cidr]) to match (conflicts with `user_security_group`/`security_group_id`).
:param pulumi.Input[str] description: A free-form text describing the Security Group rule purpose.
* `start_port`/`end_port` - A `TCP`/`UDP` port range to match.
* `icmp_type`/`icmp_code` - An ICMP/ICMPv6 [type/code][icmp] to match.
:param pulumi.Input[str] protocol: The network protocol to match. Supported values are: `TCP`, `UDP`, `ICMP`, `ICMPv6`, `AH`, `ESP`, `GRE`, `IPIP` and `ALL`.
:param pulumi.Input[str] security_group: The Security Group name the rule applies to.
:param pulumi.Input[str] security_group_id: The Security Group ID the rule applies to.
:param pulumi.Input[str] type: The traffic direction to match (`INGRESS` or `EGRESS`).
:param pulumi.Input[str] user_security_group: A source (for ingress)/destination (for egress) Security Group name to match (conflicts with `cidr`/`security_group_id`).
:param pulumi.Input[str] user_security_group_id: A source (for ingress)/destination (for egress) Security Group ID to match (conflicts with `cidr`/`security_group)`).
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SecurityGroupRuleState.__new__(_SecurityGroupRuleState)
__props__.__dict__["cidr"] = cidr
__props__.__dict__["description"] = description
__props__.__dict__["end_port"] = end_port
__props__.__dict__["icmp_code"] = icmp_code
__props__.__dict__["icmp_type"] = icmp_type
__props__.__dict__["protocol"] = protocol
__props__.__dict__["security_group"] = security_group
__props__.__dict__["security_group_id"] = security_group_id
__props__.__dict__["start_port"] = start_port
__props__.__dict__["type"] = type
__props__.__dict__["user_security_group"] = user_security_group
__props__.__dict__["user_security_group_id"] = user_security_group_id
return SecurityGroupRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def cidr(self) -> pulumi.Output[Optional[str]]:
"""
A source (for ingress)/destination (for egress) IP subnet (in [CIDR notation][cidr]) to match (conflicts with `user_security_group`/`security_group_id`).
"""
return pulumi.get(self, "cidr")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A free-form text describing the Security Group rule purpose.
* `start_port`/`end_port` - A `TCP`/`UDP` port range to match.
* `icmp_type`/`icmp_code` - An ICMP/ICMPv6 [type/code][icmp] to match.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="endPort")
def end_port(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "end_port")
@property
@pulumi.getter(name="icmpCode")
def icmp_code(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "icmp_code")
@property
@pulumi.getter(name="icmpType")
def icmp_type(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "icmp_type")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[Optional[str]]:
"""
The network protocol to match. Supported values are: `TCP`, `UDP`, `ICMP`, `ICMPv6`, `AH`, `ESP`, `GRE`, `IPIP` and `ALL`.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="securityGroup")
def security_group(self) -> pulumi.Output[str]:
"""
The Security Group name the rule applies to.
"""
return pulumi.get(self, "security_group")
@property
@pulumi.getter(name="securityGroupId")
def security_group_id(self) -> pulumi.Output[str]:
"""
The Security Group ID the rule applies to.
"""
return pulumi.get(self, "security_group_id")
@property
@pulumi.getter(name="startPort")
def start_port(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "start_port")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The traffic direction to match (`INGRESS` or `EGRESS`).
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userSecurityGroup")
def user_security_group(self) -> pulumi.Output[str]:
"""
A source (for ingress)/destination (for egress) Security Group name to match (conflicts with `cidr`/`security_group_id`).
"""
return pulumi.get(self, "user_security_group")
@property
@pulumi.getter(name="userSecurityGroupId")
def user_security_group_id(self) -> pulumi.Output[Optional[str]]:
"""
A source (for ingress)/destination (for egress) Security Group ID to match (conflicts with `cidr`/`security_group)`).
"""
return pulumi.get(self, "user_security_group_id")
| 1.921875
| 2
|
stock.py
|
ak4stock/ths_tdx_stock_xueqiu_guoren
| 2
|
12774287
|
/**
和谐完全加密
通达信加密公式解密和谐
通达信超赢版和谐
涨停密码
股票程序化交易
股票自动交易
首板套利
一进二
二进三
三进四
妖股龙头
擒龙捉妖
A股股票量化交易
下单跟单服务器搭建
文华财经
通达信
同花顺
雪球跟单下单
聚宽跟单下单
果仁跟单下单
掘金跟单下单
海龟策略
均线策略
网格交易
马丁格尔
Python量化交易
东方财富量化交易
证券量化交易
策略代写合作
股票指标公式代写
选股指标公式代写
股票量化交易
万1免5
万一免五
万1.5免5
万1.5免五
开户
数据抓取爬虫
其它合作事宜
你的专职程序员
你的专属工程师
打板
程序化交易
智能交易
MT4外汇
外汇EA
股票交易接口
程序代写开发
爱建证券有限责任公司
安信证券股份有限公司
安信证券资产管理有限公司
北京高华证券有限责任公司
渤海汇金证券资产管理有限公司
渤海证券股份有限公司
财达证券股份有限公司
财通证券股份有限公司
财信证券有限责任公司
长城国瑞证券有限公司
长城证券股份有限公司
长江证券承销保荐有限公司
长江证券股份有限公司
川财证券有限责任公司
大和证券(中国)有限责任公司
大通证券股份有限公司
大同证券有限责任公司
德邦证券股份有限公司
德邦证券资产管理有限公司
第一创业证券承销保荐有限责任公司
第一创业证券股份有限公司
东北证券股份有限公司
东方财富证券股份有限公司
东方证券承销保荐有限公司
东方证券股份有限公司
东海证券股份有限公司
东吴证券股份有限公司
东兴证券股份有限公司
东亚前海证券有限责任公司
东莞证券股份有限公司
方正证券承销保荐有限责任公司
方正证券股份有限公司
高盛高华证券有限责任公司
光大证券股份有限公司
广发证券股份有限公司
广发证券资产管理(广东)有限公司
国都证券股份有限公司
国海证券股份有限公司
国金证券股份有限公司
国开证券股份有限公司
国联证券股份有限公司
国融证券股份有限公司
国盛证券有限责任公司
国泰君安证券股份有限公司
国信证券股份有限公司
国元证券股份有限公司
海通证券股份有限公司
恒泰长财证券有限责任公司
恒泰证券股份有限公司
宏信证券有限责任公司
红塔证券股份有限公司
华安证券股份有限公司
华宝证券股份有限公司
华创证券有限责任公司
华福证券有限责任公司
华金证券股份有限公司
华林证券股份有限公司
华龙证券股份有限公司
华融证券股份有限公司
华泰联合证券有限责任公司
华泰证券股份有限公司
华西证券股份有限公司
华兴证券有限公司
华英证券有限责任公司
华鑫证券有限责任公司
汇丰前海证券有限责任公司
江海证券有限公司
金通证券有限责任公司
金元证券股份有限公司
金圆统一证券有限公司
九州证券股份有限公司
开源证券股份有限公司
联储证券有限责任公司
民生证券股份有限公司
摩根大通证券(中国)有限公司
摩根士丹利华鑫证券有限责任公司
南京证券股份有限公司
平安证券股份有限公司
瑞信证券(中国)有限公司
瑞银证券有限责任公司
山西证券股份有限公司
上海东方证券资产管理有限公司
上海光大证券资产管理有限公司
上海海通证券资产管理有限公司
上海证券有限责任公司
上海甬兴证券资产管理有限公司
申港证券股份有限公司
申万宏源西部证券有限公司
申万宏源证券承销保荐有限责任公司
申万宏源证券有限公司
世纪证券有限责任公司
首创证券股份有限公司
太平洋证券股份有限公司
天风(上海)证券资产管理有限公司
天风证券股份有限公司
万和证券股份有限公司
万联证券股份有限公司
网信证券有限责任公司
五矿证券有限公司
西部证券股份有限公司
西南证券股份有限公司
湘财证券股份有限公司
新时代证券股份有限公司
信达证券股份有限公司
星展证券(中国)有限公司
兴业证券股份有限公司
野村东方国际证券有限公司
银泰证券有限责任公司
英大证券有限责任公司
粤开证券股份有限公司
招商证券股份有限公司
浙商证券股份有限公司
中德证券有限责任公司
中国国际金融股份有限公司
中国银河证券股份有限公司
中国中金财富证券有限公司
中航证券有限公司
中山证券有限责任公司
中泰证券股份有限公司
中天国富证券有限公司
中天证券股份有限公司
中信建投证券股份有限公司
中信证券(山东)有限责任公司
中信证券股份有限公司
中信证券华南股份有限公司
中银国际证券股份有限公司
中邮证券有限责任公司
中原证券股份有限公司
甬兴证券有限公司
主要行业
金融服务房地产有色金属
医药生物化工行业机械设备
交通运输农林牧渔电子行业
新能源建筑建材信息服务
汽车行业黑色金属采掘行业
家用电器餐饮旅游公用事业
商业贸易信息设备食品饮料
轻工制造纺织服装新能源汽车
高端装备制造其他行业
https://xueqiu.com/
https://www.joinquant.com/
https://guorn.com/
https://www.eastmoney.com/
http://www.10jqka.com.cn/
http://vip.stock.finance.sina.com.cn/mkt/
https://www.yuncaijing.com/
http://cwzx.shdjt.com/top500.asp
http://www.tetegu.com/
http://www.jiatoupai.com/aspx/gudongkeys.aspx
https://www.taoguba.com.cn/
https://www.tdx.com.cn/
https://www.myquant.cn/
http://www.hibor.com.cn/
*/
| 2.59375
| 3
|
model/RPNet.py
|
zha-hengfeng/EACNet
| 1
|
12774288
|
# ERFNet full model definition for Pytorch
# Sept 2017
# <NAME>
#######################
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
class DownsamplerBlock(nn.Module):
def __init__(self, ninput, noutput):
super().__init__()
self.conv = nn.Conv2d(ninput, noutput - ninput, (3, 3), stride=2, padding=1, bias=True)
self.conv2 = nn.Conv2d(16, 64, (1, 1), stride=1, padding=0, bias=True)
self.pool = nn.MaxPool2d(2, stride=2, return_indices=True)
self.bn = nn.BatchNorm2d(noutput, eps=1e-3)
def forward(self, input):
c = input
a = self.conv(input)
b, max_indices = self.pool(input)
# print(a.shape,b.shape,c.shape,max_indices.shape,"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
output1 = torch.cat([a, b], 1)
if b.shape[1] == 16:
b_c = self.conv2(b)
else:
b_c = b
output = self.bn(output1)
return F.relu(output), max_indices, b, b_c, output1
class non_bottleneck_1d(nn.Module):
def __init__(self, chann, dropprob, dilated):
super().__init__()
self.conv3x1_1 = nn.Conv2d(chann, chann, (3, 1), stride=1, padding=(1, 0), bias=True)
self.conv1x3_1 = nn.Conv2d(chann, chann, (1, 3), stride=1, padding=(0, 1), bias=True)
self.bn1 = nn.BatchNorm2d(chann, eps=1e-03)
self.conv3x1_2 = nn.Conv2d(chann, chann, (3, 1), stride=1, padding=(1 * dilated, 0), bias=True,
dilation=(dilated, 1))
self.conv1x3_2 = nn.Conv2d(chann, chann, (1, 3), stride=1, padding=(0, 1 * dilated), bias=True,
dilation=(1, dilated))
self.bn2 = nn.BatchNorm2d(chann, eps=1e-03)
self.dropout = nn.Dropout2d(dropprob)
def forward(self, input):
output = self.conv3x1_1(input)
output = F.relu(output)
output = self.conv1x3_1(output)
output = self.bn1(output)
output = F.relu(output)
output = self.conv3x1_2(output)
output = F.relu(output)
output = self.conv1x3_2(output)
output = self.bn2(output)
if (self.dropout.p != 0):
output = self.dropout(output)
return F.relu(output + input), output # +input = identity (residual connection)
class RPNet(nn.Module):
def __init__(self, num_classes):
super().__init__()
self.initial_block = DownsamplerBlock(3, 16)
self.l0d1 = non_bottleneck_1d(16, 0.03, 1)
self.down0_25 = DownsamplerBlock(16, 64)
self.l1d1 = non_bottleneck_1d(64, 0.03, 1)
self.l1d2 = non_bottleneck_1d(64, 0.03, 1)
self.l1d3 = non_bottleneck_1d(64, 0.03, 1)
self.l1d4 = non_bottleneck_1d(64, 0.03, 1)
self.l1d5 = non_bottleneck_1d(64, 0.03, 1)
self.down0_125 = DownsamplerBlock(64, 128)
self.l2d1 = non_bottleneck_1d(128, 0.3, 2)
self.l2d2 = non_bottleneck_1d(128, 0.3, 4)
self.l2d3 = non_bottleneck_1d(128, 0.3, 8)
self.l2d4 = non_bottleneck_1d(128, 0.3, 16)
self.l3d1 = non_bottleneck_1d(128, 0.3, 2)
self.l3d2 = non_bottleneck_1d(128, 0.3, 4)
self.l3d3 = non_bottleneck_1d(128, 0.3, 8)
self.l3d4 = non_bottleneck_1d(128, 0.3, 16)
# Only in encoder mode:
self.conv2d1 = nn.Conv2d(
128,
num_classes,
kernel_size=1,
stride=1,
padding=0,
bias=True)
self.conv2d2 = nn.Conv2d(
192,
num_classes,
kernel_size=1,
stride=1,
padding=0,
bias=True)
self.conv2d3 = nn.Conv2d(
36,
num_classes,
kernel_size=1,
stride=1,
padding=0,
bias=True)
self.conv2d4 = nn.Conv2d(
16,
num_classes,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.conv2d5 = nn.Conv2d(
64,
num_classes,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.main_unpool1 = nn.MaxUnpool2d(kernel_size=2)
self.main_unpool2 = nn.MaxUnpool2d(kernel_size=2)
def forward(self, input, predict=False):
output, max_indices0_0, d, d_d, dd = self.initial_block(input)
output, y = self.l0d1(output)
output, max_indices1_0, d1, d1_d1, ddd = self.down0_25(output)
# print(d1.shape,'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
d2 = self.main_unpool1(d1, max_indices1_0)
d_1 = d2 - dd
output, y = self.l1d1(output)
output, y = self.l1d2(output)
output, y = self.l1d3(output)
output, y = self.l1d4(output)
cc_2 = self.conv2d4(d_1)
output, max_indices2_0, d3, d3_d3, dddd = self.down0_125(output)
d4 = self.main_unpool2(d3, max_indices2_0)
d_2 = d4 - d1_d1
cc_4 = self.conv2d5(d_2)
output, y = self.l2d1(output)
output, y = self.l2d2(output)
output, y = self.l2d3(output)
output, y = self.l2d4(output)
output, y = self.l3d1(output)
output, y = self.l3d2(output)
output, y = self.l3d3(output)
output, y = self.l3d4(output)
x1_81 = output
x1_8 = self.conv2d1(output)
x1_8_2 = torch.nn.functional.interpolate(x1_81, scale_factor=2, mode='bilinear')
out4 = torch.cat((x1_8_2, d_2), 1)
x1_41 = self.conv2d2(out4)
x1_4 = x1_41 + cc_4
x1_4_2 = torch.nn.functional.interpolate(x1_4, scale_factor=2, mode='bilinear')
out2 = torch.cat((x1_4_2, d_1), 1)
x1_21 = self.conv2d3(out2)
x1_2 = x1_21 + cc_2
x1_1 = torch.nn.functional.interpolate(x1_2, scale_factor=2, mode='bilinear')
return x1_1, x1_2, x1_4, x1_8
| 3
| 3
|
build/lib/nhps/distance/remove_base.py
|
Anirudh-Murali/neural-hawkes-particle-smoothing
| 37
|
12774289
|
import numpy as np
import warnings
def remove_base(seq, base, tolerance=1e-4):
"""
Functionality: Remove x from (x \sqcup z)
Since there might be some float errors, I allow for a mismatch of the time_stamps between
two seqs no larger than a threshold.
The threshold value: tolerance * max_time_stamp
:param list seq: x \sqcup z
:param list base: x
:param float tolerance: A rate.
:rtype: list
:return: z
"""
if len(seq) == 0:
return seq
tolerance = tolerance * seq[-1]['time_since_start']
n_seq = len(seq)
n_base = len(base)
seq_types = np.empty(shape=[n_seq], dtype=np.int64)
seq_time_stamps = np.empty(shape=[n_seq], dtype=np.float32)
base_types = np.empty(shape=[n_base], dtype=np.int64)
base_time_stamps = np.empty(shape=[n_base], dtype=np.float32)
for token_idx, token in enumerate(seq):
seq_types[token_idx] = token['type_event']
seq_time_stamps[token_idx] = token['time_since_start']
for token_idx, token in enumerate(base):
base_types[token_idx] = token['type_event']
base_time_stamps[token_idx] = token['time_since_start']
type_equal = base_types.repeat(n_seq).reshape(n_base, n_seq)
type_equal = type_equal == seq_types
time_equal = base_time_stamps.repeat(n_seq).reshape(n_base, n_seq)
time_equal = np.abs(time_equal - seq_time_stamps) < tolerance
to_remove = (type_equal & time_equal).any(axis=0)
rst = list()
for token_idx in np.where(~to_remove)[0]:
rst.append(seq[token_idx])
if len(rst) + len(base) != len(seq):
warnings.warn('Some base tokens are missing from the seq!')
return rst
def remove_bases_for_test(all_particles, golds, bases):
"""
Helper function for testing.
Functionality: Remove observed tokens from proposed particles and gold seqs.
:param list all_particles: x \sqcup z_m
:param list golds: x \sqcup z
:param list bases: x
:rtype: list, list
:return: particles (only z_m) and gold seqs (only z)
"""
assert len(all_particles) == len(golds) == len(bases)
rst_particles = list()
rst_golds = list()
for particles, gold, base in zip(all_particles, golds, bases):
new_particles = list()
for particle in particles:
new_particles.append(remove_base(particle, base))
rst_particles.append(new_particles)
rst_golds.append(remove_base(gold, base))
return rst_particles, rst_golds
# Following codes are just for testing
if __name__ == '__main__':
import pickle
dataset = pickle.load(open('data/pilottaxi/train.pkl', 'rb'))
seq = dataset['seqs'][0]
# base = dataset['seqs_obs'][0]
base = list()
from pprint import pprint
pprint('seq:')
pprint(seq)
pprint('base:')
pprint(base)
pprint('after removal:')
pprint(remove_base(seq, base))
assert len(seq) == len(remove_base(seq, base))
| 2.421875
| 2
|
2015/day05/solve.py
|
greenbender/aoc2018
| 0
|
12774290
|
import sys
strings = [l.strip() for l in sys.stdin]
def nice1(string):
vowels, double = 0, False
for i in range(len(string)):
if i > 0:
if string[i-1:i+1] in ('ab', 'cd', 'pq', 'xy'):
return False
if not double and string[i-1] == string[i]:
double = True
if vowels < 3 and string[i] in 'aeiou':
vowels += 1
return double and vowels >= 3
def nice2(string):
triple = pair = False
for i in range(len(string)):
if i > 0 and not pair:
two = string[i-1:i+1]
if two in string[:i-1] or two in string[i+1:]:
pair = True
if i > 1 and not triple:
three = string[i-2:i+1]
if three[0] == three[2]:
triple = True
return triple and pair
def part1():
print len(filter(nice1, strings))
def part2():
print len(filter(nice2, strings))
part1()
part2()
| 3.609375
| 4
|
config/asgi.py
|
vendari12/django-ai-algotrade
| 0
|
12774291
|
"""
ASGI config for stockze project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/asgi/
"""
import os
import sys
from pathlib import Path
from django.core.asgi import get_asgi_application
# This allows easy placement of apps within the interior
# stockze directory.
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent
sys.path.append(str(ROOT_DIR / "stockze"))
# If DJANGO_SETTINGS_MODULE is unset, default to the local settings
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
# This application object is used by any ASGI server configured to use this file.
django_application = get_asgi_application()
# Apply ASGI middleware here.
# from helloworld.asgi import HelloWorldApplication
# application = HelloWorldApplication(application)
# Import websocket application here, so apps from django_application are loaded first
from config.websocket import websocket_application # noqa isort:skip
'''
import pypeln as pl
import asyncio
django_application = pl.task.map(django_application(scope, receive, send), workers=max)
django_application = list(django_application)
'''
async def application(scope, receive, send):
if scope["type"] == "http":
await django_application(scope, receive, send)
elif scope["type"] == "websocket":
await websocket_application(scope, receive, send)
else:
raise NotImplementedError(f"Unknown scope type {scope['type']}")
import socketio
import engineio
sio = socketio.AsyncServer(async_mode='asgi', client_manager=socketio.AsyncRedisManager('redis://redis:6379/0'), logger=False, engineio_logger=True, ping_timeout=60000000, ping_interval= 6000000)
application = engineio.ASGIApp(sio, application)
'''
import pypeln as pl
application = pl.task.map(application, workers=max)
application = list(application)
'''
#import sys
#stage = pl.process.map(application, stage, workers=3, on_start=on_start, on_end=on_end)
#application = pl.sync.map(application, workers=1)
#application = list(application)
'''
async def application(scope, receive, send):
if scope["type"] == "http":
django_application(scope, receive, send)
await pl.task.each.django_application(scope, receive, send)
elif scope["type"] == "websocket":
websocket_application(scope, receive, send)
await pl.task.each.websocket_application(scope, receive, send)
else:
raise NotImplementedError(f"Unknown scope type {scope['type']}")
def application(scope, receive, send):
if scope["type"] == "http":
async def django_application(scope, receive, send)
stage = pl.task.map(django_application, workers=max)
await stage
elif scope["type"] == "websocket":
async def websocket_application(scope, receive, send)
stage = pl.task.map(websocket_application, workers=max)
await stage
else:
raise NotImplementedError(f"Unknown scope type {scope['type']}")
'''
'''
import socketio
import engineio
#import gevent
#import eventlet
sio = socketio.AsyncServer(async_mode='asgi', client_manager=socketio.AsyncRedisManager('redis://redis:6379/0'))
#sio = socketio.AsyncServer(async_mode='gevent/eventlet', client_manager=socketio.AsyncRedisManager('redis://redis:6379/0')) #fails
#sio = socketio.AsyncServer(async_mode='aiohttp', client_manager=socketio.AsyncRedisManager('redis://redis:6379/0')) #works
#sio = socketio.AsyncServer(async_mode='tornado', client_manager=socketio.AsyncRedisManager('redis://redis:6379/0')) #works aiohttp
application = engineio.ASGIApp(sio, application)
#application = socketio.ASGIApp(sio, application) #?complicated
'''
#application = pl.sync.map(application, workers=max)
#application = list(application)
#import pypeln as pl
#application = pl.task.map(application)
#application = pl.process.map(application, stage, workers=3, on_start=on_start, on_end=on_end)
#sio = socketio.AsyncServer(async_mode='wsgi', client_manager=socketio.AsyncRedisManager('redis://redis:6379/0')) #reduced
#application = socketio.ASGIApp(sio, application) #?complicated
#application = pl.task.each(application)
#import asyncio
#application = asyncio.run(application()) #works 404
#application = asyncio.run(application.serve_forever()) #438
#application = asyncio.run(application.start_server()) #438
#application = asyncio.start_server(application()) #works 454
#asyncio.run_until_complete(application.serve_forever()) #works 604 - 603 - 547
#asyncio.run(_main_coroutine(application, functools.partial(asyncio.start_server), _do_nothing, container)) #342
#asyncio.run(_main_coroutine(application.serve_forever(), functools.partial(asyncio.start_server), _do_nothing, container)) #684 fast reset by peer - 553 - 720 - 435 timeout - 727
#asyncio.start_server(_main_coroutine(application.serve_forever(), functools.partial(asyncio.start_server), _do_nothing, container)) # 543 - 715 fast 559
#asyncio.start_server(_main_coroutine(application.serve_forever(), functools.partial(asyncio.run), _do_nothing, container)) #533 fast 439
#asyncio.run(_main_coroutine(application.serve_forever(), functools.partial(asyncio.run), _do_nothing, container)) # 856 fast - timeout - 522 - 438
#asyncio.run_until_complete(_main_coroutine(application.serve_forever(), functools.partial(asyncio.start_server), _do_nothing, container)) #564
#asyncio.run_until_complete(_main_coroutine(application.serve_forever(), functools.partial(asyncio.run_until_complete), _do_nothing, container)) #709 -539 -
| 2.453125
| 2
|
xml_text/xml_test.py
|
INSPIRE-5Gplus/i5p-wp3-netslice4ssla
| 0
|
12774292
|
<filename>xml_text/xml_test.py
#!/usr/local/bin/python3.4
import os, sys, logging, json, argparse, time, datetime, requests, uuid
import xml.etree.ElementTree as ET
#XML content is parse to a tree structure and its ROOT is retrieved
tree = ET.parse('xml_text/items.xml')
root = tree.getroot()
## READING XML DOCUMENTS
# one specific item attribute
print('Item #2 attribute:')
print(root[0][1].attrib)
# all item attributes
print('\nAll attributes:')
for elem in root:
for subelem in elem:
print(subelem.attrib)
# one specific item's data
print('\nItem #2 data:')
print(root[0][1].text)
# all items data
print('\nAll item data:')
for elem in root:
for subelem in elem:
print(subelem.text)
# count total amount of items
print('\nCounting the XML elements:')
print(len(root[0]))
## WRITING XML DOCUMENTS
# create the file structure
data = ET.Element('data')
items = ET.SubElement(data, 'items')
item1 = ET.SubElement(items, 'item')
item2 = ET.SubElement(items, 'item')
item1.set('name','item1')
item2.set('name','item2')
item1.text = 'item1abc'
item2.text = 'item2abc'
# create a new XML file with the results
mydata = ET.tostring(data)
myfile = open("xml_text/items2.xml", "w")
myfile.write(str(mydata))
## FINDING XML ELEMENTS
# find the first 'item' object
print("\nFind the first 'item' object.")
for elem in root:
print(elem.find('item').get('name'))
# find all "item" objects and print their "name" attribute
print("\nFind all the 'item' objects.")
for elem in root:
for subelem in elem.findall('item'):
# if we don't need to know the name of the attribute(s), get the dict
print(subelem.attrib)
# if we know the name of the attribute, access it directly
print(subelem.get('name'))
## MODIFYING XML ELEMENTS
tree = ET.parse('xml_text/items.xml')
root = tree.getroot()
# changing a field text
for elem in root.iter('item'):
elem.text = 'new text'
# modifying an attribute
for elem in root.iter('item'):
elem.set('name', 'newitem')
# adding an attribute
for elem in root.iter('item'):
elem.set('name2', 'newitem2')
tree.write('xml_text/newitems.xml')
##CREATING XML SUB-ELEMENTS
tree = ET.parse('xml_text/items.xml')
root = tree.getroot()
# adding an element to the root node
attrib = {}
element = root.makeelement('seconditems', attrib)
root.append(element)
# adding an element to the seconditem node
attrib = {'name2': 'secondname2'}
subelement = root[0][1].makeelement('seconditem', attrib)
ET.SubElement(root[1], 'seconditem', attrib)
root[1][0].text = 'seconditemabc'
# create a new XML file with the new element
tree.write('xml_text/newitems2.xml')
##DELETING XML ELEMENTS
#### Removing an attribute
tree = ET.parse('xml_text/items.xml')
root = tree.getroot()
# removing an attribute
root[0][0].attrib.pop('name', None)
# create a new XML file with the results
tree.write('xml_text/newitems3.xml')
#### Removing a sub-element
tree = ET.parse('xml_text/items.xml')
root = tree.getroot()
# removing one sub-element
root[0].remove(root[0][0])
# create a new XML file with the results
tree.write('xml_text/newitems4.xml')
#### Removing al sub-elements
tree = ET.parse('xml_text/items.xml')
root = tree.getroot()
# removing all sub-elements of an element
root[0].clear()
# create a new XML file with the results
tree.write('xml_text/newitems5.xml')
| 3.59375
| 4
|
plots/plot_profiles.py
|
cebarbosa/hydraimf
| 0
|
12774293
|
<reponame>cebarbosa/hydraimf<filename>plots/plot_profiles.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on 24/04/2020
Author : <NAME>
"""
import os
import itertools
import numpy as np
from astropy.table import Table
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.cm as cm
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from matplotlib.patches import Ellipse
import context
def plot_profiles(t, xfield, yfields, output=None, xfracs=None, yfracs=None,
xlim=None, return_axis=False):
global labels
corr = Table.read(os.path.join(wdir, "fit_stats.fits"))
fig = plt.figure(figsize=(context.fig_width, 2. * len(yfields)))
xfracs = [0.2] * len(yfields) if xfracs is None else xfracs
yfracs = [0.2] * len(yfields) if yfracs is None else yfracs
xlim = [None, None] if xlim is None else xlim
gs = gridspec.GridSpec(len(yfields), 1, figure=fig)
gs.update(left=0.12, right=0.99, bottom=0.055, top=0.99, wspace=0.02,
hspace=0.02)
for i, yfield in enumerate(yfields):
print(yfield)
yerr = [t["{}_lerr".format(yfield)], t["{}_uerr".format(yfield)]]
xerr = [t["{}_lerr".format(xfield)], t["{}_uerr".format(xfield)]]
ax = plt.subplot(gs[i])
# ax.set_xscale("log")
ax.errorbar(t[xfield], t[yfield],
yerr=yerr, xerr=xerr, fmt="o", ecolor="0.8", mec="w",
mew=0.5, elinewidth=0.5)
ax.set_xlim(xlim)
plt.ylabel(labels[yfield])
if i+1 < len(yfields):
ax.xaxis.set_ticklabels([])
# plot parameter correlations
idx = np.where((corr["param1"]==xfield) & (corr["param2"]==yfield))[0]
if idx:
a = float(corr["a"][idx])
b = float(corr["b"][idx])
ang = float(corr["ang"][idx])
print(xfield, yfield, a, b, ang)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
xel = xmin + xfracs[i] * (xmax - xmin)
yel = ymin + yfracs[i] * (ymax - ymin)
ellipse = Ellipse((xel, yel), a, b, ang,
facecolor="none", edgecolor="r", linestyle="--")
ax.text(xel - 0.03 * (xmax - xmin),
yel - 0.02 * (ymax - ymin), "$1\sigma$", size=5.5,
c="r")
ax.add_patch(ellipse)
if return_axis:
return gs
plt.xlabel(labels[xfield])
if output is not None:
for fmt in ["pdf", "png"]:
plt.savefig("{}.{}".format(output, fmt), dpi=250)
plt.close()
def plot_single(t, xfield, yfield, return_ax=True, label=None,
figsize=None, output=None):
global labels
figsize = (context.fig_width, 2.8) if figsize is None else figsize
fig = plt.figure(figsize=figsize)
yerr = [t["{}_lerr".format(yfield)], t["{}_uerr".format(yfield)]]
xerr = [t["{}_lerr".format(xfield)], t["{}_uerr".format(xfield)]]
ax = plt.subplot(1, 1, 1)
# ax.set_xscale("log")
ax.errorbar(t[xfield], t[yfield],
yerr=yerr, xerr=xerr, fmt="o", ecolor="C0", mec="w",
mew=0.5, elinewidth=0.5, label=label)
plt.ylabel(labels[yfield])
plt.xlabel(labels[xfield])
plt.subplots_adjust(left=0.11, right=0.985, top=0.98, bottom=0.12,
hspace=0.06)
if return_ax:
return ax
if output is not None:
for fmt in ["pdf", "png"]:
plt.savefig("{}.{}".format(output, fmt), dpi=250)
plt.close()
return
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
new_cmap = matplotlib.colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval,
b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
def plot_sigma_imf(t, figsize=(5,3)):
# Producing plot similar to Spiniello+ 2014
global labels
output = os.path.join(outdir, "sigma_imf")
xfield = "sigma"
yfield = "imf"
label = "NGC 3311"
fig = plt.figure(figsize=figsize)
xs = t[xfield]
ys = t[yfield]
xerrs = np.array([t["{}_lerr".format(xfield)], t["{}_uerr".format(
xfield)]]).T
yerrs = np.array([t["{}_lerr".format(yfield)], t["{}_uerr".format(
yfield)]]).T
R = t["R"]
norm = matplotlib.colors.Normalize(vmin=0, vmax=max(R),
clip=True)
cmap = plt.get_cmap('Blues_r')
new_cmap = truncate_colormap(cmap, 0.0, 0.5)
mapper = cm.ScalarMappable(norm=norm, cmap=new_cmap)
colors = np.array([(mapper.to_rgba(v)) for v in R])
ax = plt.subplot(1, 1, 1)
for x, y, xerr, yerr, c in zip(xs, ys, xerrs, yerrs, colors):
ax.errorbar(x, y, yerr=np.atleast_2d(yerr).T,
xerr=np.atleast_2d(xerr).T, fmt="o",
ecolor=c, mec="w", color=c,
mew=0.5, elinewidth=0.5)
plt.ylabel(labels[yfield])
plt.xlabel(labels[xfield])
a = np.random.normal(2.3, 0.1, 1000)
b = np.random.normal(2.1, 0.2, 1000)
sigma = np.linspace(150, 310, 100)
y = a * np.log10(sigma / 200)[:, np.newaxis] + b
ax.plot(sigma, y.mean(axis=1), "-", c="C1", label="Spiniello et al. (2014)")
ax.plot(sigma, np.percentile(y, 16, axis=1), "--", c="C1")
ax.plot(sigma, np.percentile(y, 84, axis=1), "--", c="C1")
# Plot other authors
a = [4.87, 3.4]
b = [2.33, 2.3]
plabels = ["Ferreras et al. (2013)", "La Barbera et al. (2013)"]
colors = ["C2", "C3"]
for i in range(2):
y = a[i] * np.log10(sigma / 200) + b[i]
ax.plot(sigma, y, "-", c=colors[i], label=plabels[i])
plt.legend(loc=4, frameon=False)
ax.set_xlim(140, 350)
ax.axhline(y=1.35, c="k", ls="--", lw=0.8)
ax.axhline(y=1.8, c="k", ls="--", lw=0.8)
ax.axhline(y=2.35, c="k", ls="--", lw=0.8)
ax.text(325, 1.375, "Kroupa")
ax.text(325, 1.825, "Chabrier")
ax.text(325, 2.375, "Salpeter")
plt.subplots_adjust(left=0.08, right=0.98, top=0.99, bottom=0.105,
hspace=0.06)
cbar_pos = [0.14, 0.18, 0.18, 0.05]
cbaxes = fig.add_axes(cbar_pos)
cbar = plt.colorbar(mapper, cax=cbaxes, orientation="horizontal")
cbar.set_ticks([0, 2, 4, 6, 8])
cbar.ax.xaxis.set_label_position('top')
cbar.set_label("R (kpc)")
for fmt in ["pdf", "png"]:
plt.savefig("{}.{}".format(output, fmt), dpi=250)
plt.close()
def plot_sarzi(t, figsize=(7.24, 2.5)):
global labels
output = os.path.join(outdir, "imf_Z-alphafe-sigma")
R = t["R"]
norm = matplotlib.colors.Normalize(vmin=0, vmax=max(R),
clip=True)
cmap = plt.get_cmap('Blues_r')
new_cmap = truncate_colormap(cmap, 0.0, 0.5)
mapper = cm.ScalarMappable(norm=norm, cmap=new_cmap)
colors = np.array([(mapper.to_rgba(v)) for v in R])
yfield = "imf"
ys = t[yfield]
yerrs = np.array([t["{}_lerr".format(yfield)], t["{}_uerr".format(
yfield)]]).T
xfields = ["Z", "alphaFe", "sigma"]
fig = plt.figure(figsize=figsize)
widths = [1, 1, 1, 0.08]
gs = gridspec.GridSpec(1, 4, figure=fig, width_ratios=widths)
gs.update(left=0.06, right=0.955, bottom=0.13, top=0.98, wspace=0.02,
hspace=0.00)
xlims = [[-0.35, 0.42], [-0.02, 0.38], [170, 360]]
for i, xfield in enumerate(xfields):
xs = t[xfield]
xerrs = np.array([t["{}_lerr".format(xfield)], t["{}_uerr".format(
xfield)]]).T
ax = plt.subplot(gs[i])
for x, y, xerr, yerr, c in zip(xs, ys, xerrs, yerrs, colors):
ax.errorbar(x, y, yerr=np.atleast_2d(yerr).T,
xerr=np.atleast_2d(xerr).T, fmt="o",
ecolor="0.8", mec="w", color=c,
mew=0.5, elinewidth=0.5)
ax.set_xlabel(labels[xfield])
if i == 0:
ax.set_ylabel(labels[yfield])
else:
ax.yaxis.set_ticklabels([])
# IMF lines
ax.axhline(y=1.3, c="k", ls="--", lw=0.8)
# ax.axhline(y=1.8, c="k", ls="--", lw=0.8)
# ax.axhline(y=2.35, c="k", ls="--", lw=0.8)
if i == 0:
ax.text(-0.15, 1.32, "Kroupa", size=6)
# ax.text(-0.32, 1.82, "Chabrier", size=5)
# ax.text(-0.32, 2.37, "Salpeter", size=5)
ax.set_xlim(xlims[i])
ax.set_ylim(0.45, 3.7)
# Specific details for each plot
if xfield == "sigma":
sigma = np.linspace(180, 320, 100)
# Plot other authors
a = [4.87, 3.4]
b = [2.33, 2.3]
plabels = ["Ferreras et al. (2013)", "La Barbera et al. (2013)"]
colors = ["C2", "C3"]
for i in [0]:
y = a[i] * np.log10(sigma / 200) + b[i]
ax.plot(sigma, y, "-", c=colors[i], label=plabels[i])
# Spiniello 2014
# a = np.random.normal(2.3, 0.1, len(sigma))
# b = np.random.normal(2.1-1, 0.2, len(sigma))
# y = a * np.log10(sigma / 200)[:, np.newaxis] + b
# ax.plot(sigma, y.mean(axis=1), "-", c="C1",
# label="Spiniello et al. (2014)")
# ax.plot(sigma, np.percentile(y, 16, axis=1), "--", c="C1")
# ax.plot(sigma, np.percentile(y, 84, axis=1), "--", c="C1")
# La Barbera 2013
b = np.random.normal(2.4, 0.1, len(sigma))
a = np.random.normal(5.4, 0.9, len(sigma))
y = a * np.log10(sigma / 200.)[:, np.newaxis] + b
ax.plot(sigma, y.mean(axis=1), "-", c="C1",
label="La Barbera et al. (2013)")
ax.plot(sigma, np.percentile(y, 16, axis=1), "--", c="C1")
ax.plot(sigma, np.percentile(y, 84, axis=1), "--", c="C1")
plt.legend(loc=4, frameon=False, prop={'size': 6})
if xfield == "Z":
z = np.linspace(-0.4, 0.45, 50)
# Martin-Navarro 2015
a = np.random.normal(3.1, 0.5, len(z))
b = np.random.normal(2.2, 0.1, len(z))
y = a * z[:, np.newaxis] + b
ax.plot(z, y.mean(axis=1), "--", c="C4",
label="Martín-Navarro et al.(2015)")
ax.plot(z, np.percentile(y, 16, axis=1), "--", c="C4")
ax.plot(z, np.percentile(y, 84, axis=1), "--", c="C4")
plt.legend(loc=3, frameon=False, prop={'size': 6})
if xfield == "alphaFe":
ax.plot([0.29, 0.42], [2, 2.9], "-", c="C5",
label="Sarzi et al. (2018)")
plt.legend(loc=4, frameon=False, prop={'size': 6})
cax = fig.add_subplot(gs[3])
cbar = fig.colorbar(mapper, cax=cax, orientation="vertical")
cbar.set_label("R (kpc)")
for fmt in ["pdf", "png"]:
plt.savefig("{}.{}".format(output, fmt), dpi=300)
plt.close()
def get_colors(R, cmapname="Blues_r"):
norm = matplotlib.colors.Normalize(vmin=0, vmax=np.ceil(np.max(R)),
clip=True)
cmap = plt.get_cmap(cmapname)
new_cmap = truncate_colormap(cmap, 0.0, 0.8)
mapper = cm.ScalarMappable(norm=norm, cmap=new_cmap)
return mapper, np.array([(mapper.to_rgba(v)) for v in R])
def plot_imf_relations(t, figsize=(7.24, 4.5)):
global labels, wdir
corr = Table.read(os.path.join(wdir, "fit_stats.fits"))
mapper, colors = get_colors(t["R"])
yfields= ["imf", "alpha"]
xlim = {"T": [None, None], "Z": [-0.2, 0.25], "alphaFe": [0, 0.45],
"NaFe": [None, 0.7], "sigma": [80, 380], "Re": [-0.1, 1.1]}
xelf1 = {"T": 0.8, "Z": 0.85, "alphaFe": 0.85, "NaFe": 0.75, "sigma": 0.85,
"Re": 0.3}
xelf2 = {"T": 0.8, "Z": 0.85, "alphaFe": 0.85, "NaFe": 0.75, "sigma": 0.85,
"Re": 0.3}
xelfs = {"alpha" : xelf2, "imf": xelf1}
ylims = {"imf": (0.0, 3.5), "alpha": (0.5, 2.0)}
xfig = [["sigma", "Z", "alphaFe"], ["NaFe", "T", "Re"]]
yfields = ["imf", "alpha"]
for k, xfields in enumerate(xfig):
fig = plt.figure(figsize=figsize)
gs = gridspec.GridSpec(2, 3, figure=fig)
gs.update(left=0.07, right=0.91, bottom=0.07, top=0.99, wspace=0.03,
hspace=0.05)
for i, (yfield, xfield) in enumerate(itertools.product(yfields,
xfields)):
xs = t[xfield]
ys = t[yfield]
xerrs = np.array([t["{}_lerr".format(xfield)], t["{}_uerr".format(
xfield)]]).T
yerrs = np.array([t["{}_lerr".format(yfield)], t["{}_uerr".format(
yfield)]]).T
xelf = xelfs[yfield]
ax = plt.subplot(gs[i])
# ax.text(0.05, 0.95, "({})".format(letters[i]), transform=ax.transAxes,
# fontsize=10, va='top')
for x, y, xerr, yerr, c in zip(xs, ys, xerrs, yerrs, colors):
ax.errorbar(x, y, yerr=np.atleast_2d(yerr).T,
xerr=np.atleast_2d(xerr).T, fmt="o",
ecolor="0.8", mec="w", color=c,
mew=0.5, elinewidth=0.5)
if i in [0,3]:
ax.set_ylabel(labels[yfield])
else:
ax.yaxis.set_ticklabels([])
if i < 3:
ax.xaxis.set_ticklabels([])
else:
ax.set_xlabel(labels[xfield])
ax.set_xlim(xlim[xfield])
ax.set_ylim(ylims[yfield])
# plot parameter correlations
idx = np.where((corr["param1"]==xfield) & (corr["param2"]==yfield))[0]
if idx:
a = corr["a"][idx]
b = corr["b"][idx]
ang = corr["ang"][idx]
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
xel = xmin + xelf[xfield] * (xmax - xmin)
yel = ymin + 0.2 * (ymax - ymin)
ellipse = Ellipse((xel, yel), a, b, ang,
facecolor="none", edgecolor="r", linestyle="--")
ax.text(xel - 0.03 * (xmax - xmin),
yel - 0.02 * (ymax - ymin), "$1\sigma$", size=5.5,
c="r")
ax.add_patch(ellipse)
####################################################################
# alpha relations for Kroupa and Salpeter
xmin, xmax = ax.get_xlim()
if yfield == "alpha":
ax.axhline(y=1, c="k", ls="--", lw=0.5)
ax.axhline(y=1.55, c="k", ls="--", lw=0.5)
if i == 3:
ax.text(xmin + 0.04 * (xmax - xmin), 1.03, "Kroupa",
size=5.5, c="k")
ax.text(xmin + 0.04 * (xmax - xmin), 1.58, "Salpeter",
size=5.5, c="k")
####################################################################
add_literature_results(ax, xfield, yfield)
plt.legend(loc=3, frameon=False, prop={"size": 5})
cax = inset_axes(ax, # here using axis of the lowest plot
width="10%", # width = 5% of parent_bbox width
height="180%", # height : 340% good for a (4x4) Grid
loc='lower left',
bbox_to_anchor=(1.05, 0.25, 1, 1),
bbox_transform=ax.transAxes,
borderpad=0)
cbar = fig.colorbar(mapper, cax=cax, orientation="vertical")
cbar.set_label("R (kpc)")
output = os.path.join(wdir, "plots/imf_relations")
print(output)
for fmt in ["pdf", "png"]:
plt.savefig("{}_{}.{}".format(output, k+1, fmt), dpi=300)
plt.close()
def plot_imf_individual(t, figsize=(3.54, 2.5)):
global labels, wdir
corr = Table.read(os.path.join(wdir, "fit_stats.fits"))
mapper, colors = get_colors(t["R"])
xlim = {"T": [None, None], "Z": [-0.2, 0.26], "alphaFe": [-.05, 0.48],
"NaFe": [None, 0.7], "sigma": [80, 380], "Re": [-0.1, 1.1],
"logSigma": [7.8, 11.2]}
xelf1 = {"T": 0.8, "Z": 0.85, "alphaFe": 0.85, "NaFe": 0.75, "sigma": 0.85,
"Re": 0.3, "logSigma": 0.8}
xelf2 = {"T": 0.8, "Z": 0.85, "alphaFe": 0.85, "NaFe": 0.75, "sigma": 0.85,
"Re": 0.3, "logSigma": 0.85}
xelfs = {"alpha" : xelf2, "imf": xelf1}
ylims = {"imf": (0.5, 3.6), "alpha": (0.5, 2.2)}
xfields= ["sigma", "Z", "alphaFe", "NaFe", "T", "Re", "logSigma"]
yfields = ["imf"]
xbar = {"T": 0.15, "Z": 0.15, "alphaFe": 0.15, "NaFe": 0.75, "sigma": 0.4,
"Re": 0.15, "logSigma": 0.15}
ybar = {"T": 0.72, "Z": 0.65, "alphaFe": 0.2, "NaFe": 0.75, "sigma": 0.2,
"Re": 0.2, "logSigma": 0.75}
loc = {"T": 2, "Z": 2, "alphaFe": 2, "NaFe": 2, "sigma": 2,
"Re": 1, "logSigma": 2}
for k, xfield in enumerate(xfields):
fig = plt.figure(figsize=figsize)
gs = gridspec.GridSpec(len(yfields), 1, figure=fig)
gs.update(left=0.10, right=0.99, bottom=0.125, top=0.99, wspace=0.03,
hspace=0.05)
for i, yfield in enumerate(yfields):
xs = t[xfield]
ys = t[yfield]
xerrs = np.array([t["{}_lerr".format(xfield)],
t["{}_uerr".format(xfield)]]).T
yerrs = np.array([t["{}_lerr".format(yfield)],
t["{}_uerr".format(yfield)]]).T
xelf = xelfs[yfield]
ax = plt.subplot(gs[i])
for x, y, xerr, yerr, c in zip(xs, ys, xerrs, yerrs, colors):
ax.errorbar(x, y, yerr=np.atleast_2d(yerr).T,
xerr=np.atleast_2d(xerr).T, fmt="o",
ecolor="0.8", mec="w", color=c,
mew=0.5, elinewidth=0.5)
ax.set_ylabel(labels[yfield])
if i + 1 < len(yfields):
ax.xaxis.set_ticklabels([])
if i+ 1 == len(yfields):
ax.set_xlabel(labels[xfield])
ax.set_xlim(xlim[xfield])
ax.set_ylim(ylims[yfield])
# plot parameter correlations
idx = np.where((corr["param1"] == xfield) &
(corr["param2"] == yfield))[0]
if len(idx) > 0:
a = corr["a"][idx]
b = corr["b"][idx]
ang = corr["ang"][idx]
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
xel = xmin + xelf[xfield] * (xmax - xmin)
yel = ymin + 0.2 * (ymax - ymin)
ellipse = Ellipse((xel, yel), a, b, ang,
facecolor="none", edgecolor="0.3",
linestyle="--")
ax.text(xel - 0.02 * (xmax - xmin),
yel - 0.02 * (ymax - ymin), "$1\sigma$", size=5.5,
c="0.3")
ax.add_patch(ellipse)
####################################################################
# alpha values for Kroupa and Salpeter
xmin, xmax = ax.get_xlim()
if yfield == "alpha":
ax.axhline(y=1, c="k", ls="--", lw=0.5)
ax.axhline(y=1.55, c="k", ls="--", lw=0.5)
if i == 1:
ax.text(xmin + 0.04 * (xmax - xmin), 1.03, "Kroupa",
size=5.5, c="k")
ax.text(xmin + 0.04 * (xmax - xmin), 1.58, "Salpeter",
size=5.5, c="k")
####################################################################
add_literature_results(ax, xfield, yfield)
plt.legend(loc=loc[xfield], frameon=False, prop={"size": 4.8},
ncol=1)
cbar_pos=[xbar[xfield], ybar[xfield], 0.25, 0.05]
cbaxes = fig.add_axes(cbar_pos)
cbar = plt.colorbar(mapper, cax=cbaxes, orientation="horizontal")
cbar.set_ticks(np.linspace(0, 16, 5))
# cbar.ax.tick_params(labelsize=labelsize-1)
cbar.ax.xaxis.set_label_position('top')
cbar.ax.xaxis.set_ticks_position('bottom')
# cl = plt.getp(cbar.ax, 'ymajorticklabels')
# plt.setp(cl, fontsize=labelsize+2)
cbar.set_label("R (kpc)")
output = os.path.join(wdir, "plots/imf_{}".format(xfield))
for fmt in ["pdf", "png"]:
plt.savefig("{}.{}".format(output, fmt), dpi=300)
plt.close()
def add_literature_results(ax, xfield, yfield, posacki=False,
mcdermid=False, labarbera=True, ferreras=False):
global labels
xmin, xmax = ax.get_xlim()
####################################################################
# Sarzi et al. 2018
stable = os.path.join(context.home,
"tables/sarzi2017_{}_imf.csv".format(xfield))
if os.path.exists(stable) and yfield == "imf":
x, y = np.loadtxt(stable, delimiter=",", unpack=True)
lsarzi = "M87 (Sarzi et al. 2018)" # if i==5 else None
ax.plot(x, y, "-", c="r", label=None)
ax.plot(x[0], y[0], "^-", c="r", label=lsarzi)
ax.plot(x[0], y[0], "^-", c="r", label=None, mec="r")
####################################################################
# Plot results from Parikh et al. (2018)
xtable = os.path.join(context.home,
"tables/parikh2018_{}.txt".format(xfield))
if os.path.exists(xtable) and yfield == "imf":
ytable = os.path.join(context.home,
"tables/parikh2018_{}.txt".format(yfield))
y = np.loadtxt(ytable).ravel()[::2].reshape(3, 10)
x = np.loadtxt(xtable).ravel()[::2].reshape(3, 10)
colors = ["lightgreen", "limegreen", "green"]
masses = [[9.9,10.2], [10.2,10.5], [10.5,10.8]]
for j in range(3):
lparikh = "$\log M/M_\odot = {0[0]}-{0[1]}$ (Parikh et al. " \
"2018)".format(masses[j])
ax.plot(x[j], y[j], "-", c=colors[j], label=None, mec="w")
ax.plot(x[j][0], y[j][0], "s-", c=colors[j], label=lparikh,
mec=colors[j])
####################################################################
if xfield == "Z" and yfield == "imf":
z = np.linspace(-0.3, 0.2, 50)
# Martin-Navarro 2015
a = np.random.normal(3.1, 0.5, len(z))
b = np.random.normal(2.2, 0.1, len(z))
y = a * z[:, np.newaxis] + b
ax.plot(z, y.mean(axis=1), "--", c="C4",
label="Martín-Navarro et al.(2015)")
# ax.plot(z, np.percentile(y, 16, axis=1), "--", c="C4")
# ax.plot(z, np.percentile(y, 84, axis=1), "--", c="C4")
# velocity dispersion relation
if yfield == "imf" and xfield == "sigma":
sigma = np.linspace(100, 300, 100)
idx = np.where((sigma > 250) & (sigma < 280))[0]
sigma[idx] = np.nan
gamma = 2.4 + 5.4 * np.log10(sigma/200)
ax.plot(sigma, gamma, "--", c="violet",
label="La Barbera et al. (2013)")
####################################################################
# <NAME> 2016
if yfield == "alpha" and xfield == "Re":
re = np.linspace(0, 1, 100)
alpha_vd = np.clip(2.48 - 3.6 * re, 1.1, np.infty)
ax.plot(re, alpha_vd, "-", c="coral",
label="van Dokkum et al. (2016)")
####################################################################
# Posacki et al (2015)
if yfield == "alpha" and xfield == "sigma" and posacki:
sigma = np.linspace(xmin, xmax, 100)
p0 = np.random.normal(0.4, 0.15, 100)
p1 = np.random.normal(0.49, 0.05, 100)
p3 = np.random.normal(-0.07, 0.01, 100)
s = np.log10(sigma / 200)
loga = np.outer(p0, s ** 2) + np.outer(p1, s) + p3[:, np.newaxis]
apos = np.power(10, loga)
ax.fill_between(sigma, 1.55 * np.percentile(apos, 16, axis=0),
1.55 * np.percentile(apos, 84, axis=0),
color="0.8", label="Posacki et al. (2015)")
####################################################################
# Barber et al (2019)
if yfield == "alpha" and xfield == "alphaFe":
x = np.array([-0.4, 0.2])
y = np.array([2, 0.8])
# offset = 0.18 # offset to make it work
offset = 0
ax.plot(x + offset, y, "-", c="gold",
label="LoM - Barber et al. (2019)")
x = np.array([0, 0.4])
y = [1.1, 1.3]
ax.plot(x + offset, y, "-", c="orange",
label="HiM - Barber et al. (2019)")
############################################################################
# McDermid et al. (2014)
if yfield == "alpha" and xfield in ["alphaFe", "Z", "T"] and mcdermid:
if xfield == "alphaFe":
x = np.linspace(-0.05, 0.45, 100)
a = -0.257
b = 0.71
eps = 0.07
xp = x
elif xfield == "T":
x = np.linspace(0.3, 1.2, 100)
a = -0.237
b = 0.126
eps = 0.069
xp = np.power(10, x)
elif xfield == "Z":
x = np.linspace(-0.3, 0.3)
a = -0.1181
b = -0.13
xp = x
eps = 0.07
ax.plot(xp, 1.55 * np.power(10, (a + b * x)), "-",
c="olive", lw=0.7, label="McDermid et al. (2014)")
ax.plot(xp, 1.55 * np.power(10, (a + b * x - eps)), "--",
c="olive", lw=0.7)
ax.plot(xp, 1.55 * np.power(10, (a + b * x + eps)), "--",
c="olive", lw=0.7)
############################################################################
if yfield == "alpha" and xfield == "sigma":
sigma = np.linspace(150, 400, 100)
aa = [1.31, 0.9, 1.05]
bb = [-3.1, -2.2, -2.5]
cc = ["lightblue", "turquoise", "goldenrod"]
ll = ["Treu et al. (2010)", "Conroy et al. (2012)",
"Spiniello et al. (2014)"]
for a, b, l, c in zip(aa, bb, ll, cc):
y = np.power(10, a * np.log10(sigma) + b) * 1.54
ax.plot(sigma, y, "--", c=c, label=l)
if xfield == "logSigma":
x = np.linspace(-1, 1, 100)
if yfield == "imf":
y = 1.3 + 1.84 / (1 + np.exp(-x / 0.24))
else:
y = 1. + 0.98 / (1 + np.exp(-x / 0.24))
ax.plot(x + 10, y, "-", c="brown", label="La Barbera et al. (2019)")
return
if __name__ == "__main__":
labels = {"R": "$R$ (kpc)", "sigma": r"$\sigma_*$ (km/s)",
"V": "$V$ (km/s)", "imf": r"$\Gamma_b$", "Z": "[Z/H]",
"T": "Age (Gyr)", "alphaFe": r"[$\alpha$/Fe]", "NaFe": "[Na/Fe]",
"Re" : "$R / R_e$", "M2L": "$M_*/L_r$",
"alpha": "$\\alpha=(M_*/L_r) / (M_*/L_r)_{\\rm MW}$",
"logSigma": "$\\log \\Sigma$ (M$_\\odot$ / kpc$^2$)"}
dataset = "MUSE"
targetSN = 250
wdir = os.path.join(context.data_dir, dataset, "voronoi",
"sn{}".format(targetSN))
outdir = os.path.join(wdir, "plots")
###########################################################################
# Loading and preparing data
tfile = os.path.join(wdir, "results.fits")
t = Table.read(tfile)
t["R_uerr"] = 0
t["R_lerr"] = 0
t["Re"] = t["R"] / 8.4
t["Re_uerr"] = 0
t["Re_lerr"] = 0
############################################################################
profiles = False
if profiles:
plot_profiles(t, "R", ["imf", "M2L", "alpha"],
output=os.path.join(outdir, "R_imf-M2L-alpha"))
plot_profiles(t, "Re", ["imf", "M2L", "alpha"],
output=os.path.join(outdir, "Re_imf-M2L-alpha"),
xlim=[None, 1.1])
xfracs = [0.75, 0.75, 0.25]
yfracs = [0.2, 0.2, 0.7]
plot_profiles(t, "logSigma", ["imf", "M2L", "alpha"],
output=os.path.join(outdir, "logSigma_imf-M2L-alpha"),
xfracs=xfracs, yfracs=yfracs)
xfracs = [0.85, 0.85, 0.85]
yfracs = [0.3, 0.3, 0.3]
plot_profiles(t, "sigma", ["imf", "M2L", "alpha"],
output=os.path.join(outdir, "sigma_imf-M2L-alpha"),
xfracs=xfracs, yfracs=yfracs)
############################################################################
# plot_imf_relations(t)
plot_imf_individual(t)
############################################################################
| 1.9375
| 2
|
data-gen.py
|
SigmaX-ai/tidre-demo
| 1
|
12774294
|
import pyarrow as pa
import rstr
import random
# Each tuple specifies a type of string to generate. The first entry specifies
# how many unique strings to generate (rstr is pretty slow). The second
# specifies how often to insert a string from that pool of unique strings into
# the actual dataset compared to inserting strings from the other pools. The
# third is the regex that the strings for this pool should match.
generator_config = [
(100, 1, r'.*[tT][eE][rR][aA][tT][iI][dD][eE][ \t\n]+[dD][iI][vV][iI][nN][gG][ \t\n]+([sS][uU][bB])+[sS][uU][rR][fF][aA][cC][eE].*'),
(100, 3, r'.*[Tt][Aa][Xx][Ii].*'),
(300, 20, r'.*.*.*'), # long random strings
(500, 20, r'.*'), # short random strings
]
# Target size for the dataset. Generation stops when either limit is reached.
target_num_rows = 10_000_000
target_num_bytes = 1_000_000_000
# Construct pools of random strings abiding by the generator configuration.
random_strings = []
frequency_norm = 0
for _, frequency, _ in generator_config:
frequency_norm += frequency
cumulative_frequency = 0.0
for num_unique, frequency, regex in generator_config:
print('Creating random strings for /' + regex + '/...')
cumulative_frequency += frequency / frequency_norm
string_pool = [rstr.xeger(regex) for _ in range(num_unique)]
random_strings.append((cumulative_frequency, string_pool))
# Construct the test data.
print('Constructing test data...')
data = []
total_len = 0
print()
while total_len < target_num_bytes and len(data) < target_num_rows:
r = random.random()
for cumulative_frequency, string_pool in random_strings:
if r <= cumulative_frequency:
s = random.choice(string_pool)
total_len += len(s)
data.append(s)
break
if len(data) % 1000 == 0:
print('\033[A\033[K{:.1f}%...'.format(
min(max(total_len / target_num_bytes, len(data) / target_num_rows) * 100, 100)))
# Write the generated data to a record batch.
print('Converting to record batch...')
field = pa.field('text', pa.utf8(), nullable=False)
schema = pa.schema([field])
arrays = [pa.array(data, pa.utf8())]
with pa.RecordBatchFileWriter('input.rb', schema) as writer:
print('Writing file...')
writer.write(pa.RecordBatch.from_arrays(arrays, schema=schema))
print('Done!')
| 2.75
| 3
|
src/VAC_GAN/models/Discriminator.py
|
duartegalvao/Image-Colorization-with-Deep-Learning
| 2
|
12774295
|
<gh_stars>1-10
import tensorflow as tf
class Discriminator:
def __init__(self, seed):
"""
Architecture:
[?, 32, 32, ch] => [?, 16, 16, 64]
[?, 16, 16, 64] => [?, 8, 8, 128]
[?, 8, 8, 128] => [?, 4, 4, 256]
[?, 4, 4, 256] => [?, 4, 4, 512]
[?, 4, 4, 512] => [?, 1, 1, 1]
"""
self.name = 'Discriminator'
self.seed = seed
self.initializer = tf.glorot_uniform_initializer(self.seed)
self.is_training = True
self.kernel_size = 4
# (num_filters, strides)
self.kernels = [
(128, 2),
(256, 2),
(512, 1),
]
self.variables = []
def forward(self, X, reuse_vars=None):
with tf.variable_scope(self.name, reuse=reuse_vars):
output = tf.layers.Conv2D(
name='conv_1',
filters=64,
strides=2,
kernel_size=self.kernel_size,
padding='same',
kernel_initializer=self.initializer)(X)
output = tf.nn.leaky_relu(output, name='leaky_ReLu_1')
for i, kernel in enumerate(self.kernels):
output = tf.layers.Conv2D(
name='conv_'+str(i+2),
filters=kernel[0],
strides=kernel[1],
kernel_size=self.kernel_size,
padding='same',
kernel_initializer=self.initializer)(output)
output = tf.nn.leaky_relu(output, name='leaky_ReLu'+str(i+2))
# PAPER ONE:
output = tf.layers.Conv2D(
name='conv_' + str(i+3),
filters=1,
strides=1,
kernel_size=self.kernel_size,
padding='same',
activation=None,
kernel_initializer=self.initializer)(output)
self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name)
return output
| 2.59375
| 3
|
mini python projects/habbit_tracking/add_pixel.py
|
aliammarkhan/Mini_python_projects
| 0
|
12774296
|
<filename>mini python projects/habbit_tracking/add_pixel.py
import datetime
import requests
# docs https://docs.pixe.la/entry/post-pixel
USERNAME = "YOUR_USERNAME_GOES_HERE"
TOKEN = "YOUR_TOKEN_ID_GOES_HERE"
GRAPH_ID = "GRAPH_ID_GOES_HERE"
#graph endpoint where we want to store our data
endpoint = "https://pixe.la/v1/users/{}/graphs/{}".format(USERNAME,GRAPH_ID)
#get the date in specfied format
today = datetime.datetime.now().strftime("%Y%m%d")
#body of post request
params = {"date":today,"quantity":"2.3"}
#header sent with request
headers = {
"X-USER-TOKEN" : TOKEN
}
#post a requests
response = requests.post(url = endpoint,json = params,headers=headers)
print(response.text)
| 3.203125
| 3
|
passgen/forms.py
|
diyajaiswal11/HackCorona
| 7
|
12774297
|
<gh_stars>1-10
from django import forms
from django.forms import ModelForm
from .models import PassModel
class PassForm(ModelForm):
class Meta:
model= PassModel
fields='__all__'
exclude=['issuedate','uniquenumber','checked']
class DownloadForm(ModelForm):
class Meta:
model=PassModel
fields=['aadharcardnumber']
| 1.890625
| 2
|
littlecheck/__init__.py
|
faho/littlecheck
| 26
|
12774298
|
<reponame>faho/littlecheck
from .littlecheck import *
| 1.015625
| 1
|
training/training/doctype/associate_performance_monitoring_check_sheet/associate_performance_monitoring_check_sheet.py
|
vhrspvl/Minda-Training
| 0
|
12774299
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Ramya and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
import json
from datetime import datetime, date
from frappe.utils import flt, getdate
class AssociatePerformanceMonitoringCheckSheet(Document):
def autoname(self):
if self.employee_code:
self.name = self.employee_code
@frappe.whitelist()
def associate_monitoring(date,line,shift):
associates = frappe.get_list('Associate Performance Monitoring Check Sheet',{'skip_monitoring':0,'line_name':line,'shift':shift},['name','associate','shift','line_name','handover_date'])
as_list = []
for asso in associates:
if frappe.db.exists('Attendance',{'employee':asso.name,'attendance_date':date}):
child = frappe.get_all('Monitoring Table', fields=['*'], filters={'parenttype': 'Associate Performance Monitoring Check Sheet', 'parent': asso.name})
date_list = []
c1 = datetime.strptime(date, "%Y-%m-%d")
c2 = c1.date()
if c2 >= asso.handover_date:
frappe.errprint(c2)
frappe.errprint(asso.handover_date)
if(child):
for c in child:
c1 = datetime.strptime(date, "%Y-%m-%d")
c2 = c1.date()
date_list.append(c.monitoring_date)
if c2 not in date_list:
as_list.append(asso)
else:
as_list.append(asso)
return as_list
@frappe.whitelist()
def mark_monitoring(child,monitoring_date):
table = json.loads(child)
for t in table:
monitoring = frappe.get_doc("Associate Performance Monitoring Check Sheet",t["employee"])
monitoring.append('monitoring_table', {
'process_name':t["process_name"],
'production': t["production"],
'monitoring_date': monitoring_date,
'defect':t["defect"]
})
monitoring.save()
return t
@frappe.whitelist()
def skip_monitoring():
m_list = frappe.get_list('Associate Performance Monitoring Check Sheet',{'skip_monitoring':0})
for m in m_list:
child = frappe.get_all('Monitoring Table', fields=['*'], filters={'parenttype': 'Associate Performance Monitoring Check Sheet', 'parent': m.name})
if child:
frappe.errprint(m.name)
if len(child) >= 15:
frappe.db.set_value('Associate Performance Monitoring Check Sheet',m.name,'skip_monitoring',1)
| 2.109375
| 2
|
xbmanIntegrated/Aclsm-master/jump/migrations/0001_initial.py
|
suntao789/Aclsm
| 0
|
12774300
|
<filename>xbmanIntegrated/Aclsm-master/jump/migrations/0001_initial.py<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-10 21:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Jump_group',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('groupname', models.CharField(blank=True, max_length=30, null=True, verbose_name='\u7ec4\u540d\u79f0')),
('dev_list', models.CharField(blank=True, max_length=9999, null=True, verbose_name='\u673a\u5668')),
('create_date', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': '\u673a\u5668\u5206\u7ec4\u8868',
'verbose_name_plural': '\u673a\u5668\u5206\u7ec4\u8868',
},
),
migrations.CreateModel(
name='Jump_logs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ipaddress', models.GenericIPAddressField(blank=True, null=True, verbose_name='IP')),
('file_path', models.CharField(blank=True, max_length=30, null=True, verbose_name='\u65e5\u5fd7\u6587\u4ef6\u8def\u5f84')),
('create_date', models.DateTimeField(auto_now=True)),
('username', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='\u7528\u6237\u540d')),
],
options={
'verbose_name': '\u65e5\u5fd7\u8868',
'verbose_name_plural': '\u65e5\u5fd7\u8868',
},
),
migrations.CreateModel(
name='Jump_Notice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32, verbose_name=b'\xe9\x80\x9a\xe7\x9f\xa5\xe5\x86\x85\xe5\xae\xb9')),
('status', models.IntegerField(verbose_name=b'\xe7\x8a\xb6\xe6\x80\x81')),
('create_date', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': '\u901a\u77e5\u8868',
'verbose_name_plural': '\u901a\u77e5\u8868',
},
),
migrations.CreateModel(
name='Jump_prem',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('create_date', models.DateTimeField(auto_now_add=True)),
('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='jump.Jump_group', verbose_name='\u7ed1\u5b9a\u7ec4')),
('username', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='\u7528\u6237\u540d')),
],
options={
'verbose_name': '\u7528\u6237\u6743\u9650\u7ed1\u5b9a\u8868',
'verbose_name_plural': '\u7528\u6237\u6743\u9650\u7ed1\u5b9a\u8868',
},
),
migrations.CreateModel(
name='Jump_user',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('username', models.CharField(blank=True, max_length=30, null=True, verbose_name='\u8d26\u6237\u540d\u79f0')),
('password', models.CharField(blank=True, max_length=30, null=True, verbose_name='\<PASSWORD>')),
('permiss', models.TextField(verbose_name='sudo\u6743\u9650')),
('create_date', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': '\u8fdc\u7a0b\u8d26\u6237\u8868',
'verbose_name_plural': '\u8fdc\u7a0b\u8d26\u6237\u8868',
},
),
migrations.AddField(
model_name='jump_group',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='jump.Jump_user', verbose_name='\u7ed1\u5b9a\u8d26\u53f7'),
),
]
| 1.546875
| 2
|
src/PSTs.py
|
cosmobobak/Viridithas-Chess
| 0
|
12774301
|
<filename>src/PSTs.py
from dataclasses import dataclass
import chess
# import numpy as np
p, n, b, r, q, k, P, N, B, R, Q, K = range(12)
piece_values = [[126, 781, 825, 1276, 2538, 0], [208, 854, 915, 1380, 2682, 0]]
@dataclass
class S:
midgame: int
endgame: int
PAWN_NORM: int = 1000 // 126
# 'Bonus' contains Piece-Square parameters.
# Scores are explicit for files A to D, implicitly mirrored for E to H.
Bonus: "list[list[list[S]]]" = [
[ ],
[ # Knight
[ S(-175, -96), S(-92,-65), S(-74,-49), S(-73,-21) ],
[ S( -77, -67), S(-41,-54), S(-27,-18), S(-15, 8) ],
[ S( -61, -40), S(-17,-27), S( 6, -8), S( 12, 29) ],
[ S( -35, -35), S( 8, -2), S( 40, 13), S( 49, 28) ],
[ S( -34, -45), S( 13,-16), S( 44, 9), S( 51, 39) ],
[ S( -9, -51), S( 22,-44), S( 58,-16), S( 53, 17) ],
[ S( -67, -69), S(-27,-50), S( 4,-51), S( 37, 12) ],
[ S(-201,-100), S(-83,-88), S(-56,-56), S(-26,-17) ]
],
[ # Bishop
[ S(-37,-40), S(-4 ,-21), S( -6,-26), S(-16, -8) ],
[ S(-11,-26), S( 6, -9), S( 13,-12), S( 3, 1) ],
[ S(-5 ,-11), S( 15, -1), S( -4, -1), S( 12, 7) ],
[ S(-4 ,-14), S( 8, -4), S( 18, 0), S( 27, 12) ],
[ S(-8 ,-12), S( 20, -1), S( 15,-10), S( 22, 11) ],
[ S(-11,-21), S( 4, 4), S( 1, 3), S( 8, 4) ],
[ S(-12,-22), S(-10,-14), S( 4, -1), S( 0, 1) ],
[ S(-34,-32), S( 1,-29), S(-10,-26), S(-16,-17) ]
],
[ # Rook
[ S(-31, -9), S(-20,-13), S(-14,-10), S(-5, -9) ],
[ S(-21,-12), S(-13, -9), S( -8, -1), S( 6, -2) ],
[ S(-25, 6), S(-11, -8), S( -1, -2), S( 3, -6) ],
[ S(-13, -6), S( -5, 1), S( -4, -9), S(-6, 7) ],
[ S(-27, -5), S(-15, 8), S( -4, 7), S( 3, -6) ],
[ S(-22, 6), S( -2, 1), S( 6, -7), S(12, 10) ],
[ S( -2, 4), S( 12, 5), S( 16, 20), S(18, -5) ],
[ S(-17, 18), S(-19, 0), S( -1, 19), S( 9, 13) ]
],
[ # Queen
[ S( 3,-69), S(-5,-57), S(-5,-47), S( 4,-26) ],
[ S(-3,-54), S( 5,-31), S( 8,-22), S(12, -4) ],
[ S(-3,-39), S( 6,-18), S(13, -9), S( 7, 3) ],
[ S( 4,-23), S( 5, -3), S( 9, 13), S( 8, 24) ],
[ S( 0,-29), S(14, -6), S(12, 9), S( 5, 21) ],
[ S(-4,-38), S(10,-18), S( 6,-11), S( 8, 1) ],
[ S(-5,-50), S( 6,-27), S(10,-24), S( 8, -8) ],
[ S(-2,-74), S(-2,-52), S( 1,-43), S(-2,-34) ]
],
[ # King
[ S(271, 1), S(327, 45), S(271, 85), S(198, 76) ],
[ S(278, 53), S(303,100), S(234,133), S(179,135) ],
[ S(195, 88), S(258,130), S(169,169), S(120,175) ],
[ S(164,103), S(190,156), S(138,172), S( 98,172) ],
[ S(154, 96), S(179,166), S(105,199), S( 70,199) ],
[ S(123, 92), S(145,172), S( 81,184), S( 31,191) ],
[ S( 88, 47), S(120,121), S( 65,116), S( 33,131) ],
[ S( 59, 11), S( 89, 59), S( 45, 73), S( -1, 78) ]
]
];
PBonus: "list[list[S]]" = [ # Pawn (asymmetric distribution)
[ ],
[ S( 2, -8), S( 4, -6), S( 11, 9), S( 18, 5), S( 16, 16), S( 21, 6), S( 9, -6), S( -3,-18) ],
[ S( -9, -9), S(-15, -7), S( 11,-10), S( 15, 5), S( 31, 2), S( 23, 3), S( 6, -8), S(-20, -5) ],
[ S( -3, 7), S(-20, 1), S( 8, -8), S( 19, -2), S( 39,-14), S( 17,-13), S( 2,-11), S( -5, -6) ],
[ S( 11, 12), S( -4, 6), S(-11, 2), S( 2, -6), S( 11, -5), S( 0, -4), S(-12, 14), S( 5, 9) ],
[ S( 3, 27), S(-11, 18), S( -6, 19), S( 22, 29), S( -8, 30), S( -5, 9), S(-14, 8), S(-11, 14) ],
[ S( -7, -1), S( 6,-14), S( -2, 13), S(-11, 22), S( 4, 24), S(-14, 17), S( 10, 7), S( -9, 7) ],
[]
]
# mg_pst = np.zeros((12, 64))
# eg_pst = np.zeros((12, 64))
mg_pst = [[0.0 for _ in range(64)] for _ in range(12)]
eg_pst = [[0.0 for _ in range(64)] for _ in range(12)]
pieces = [W_PAWN, W_KNIGHT, W_BISHOP, W_ROOK, W_QUEEN, W_KING] = range(6)
for color in [chess.WHITE, chess.BLACK]:
offset = 6 if color == chess.BLACK else 0
for pc in pieces:
mg_piece, eg_piece = piece_values[0][pc], piece_values[1][pc]
for sq in chess.SQUARES:
rank = chess.square_rank(sq)
if color == chess.BLACK:
rank = 7 - rank
file = chess.square_file(sq)
if pc == W_PAWN:
if rank in [0, 7]:
continue
# print(f"{pc + offset} {sq} {rank} {file}")
mg_pst[pc + offset][sq] = PBonus[rank][file].midgame + mg_piece * PAWN_NORM
eg_pst[pc + offset][sq] = PBonus[rank][file].endgame + eg_piece * PAWN_NORM
else:
mapping = {4: 3, 5: 2, 6: 1, 7: 0}
access_file = file if file < 4 else mapping[file]
mg_pst[pc + offset][sq] = Bonus[pc][rank][access_file].midgame + mg_piece * PAWN_NORM
eg_pst[pc + offset][sq] = Bonus[pc][rank][access_file].endgame + eg_piece * PAWN_NORM
# print(mg_pst[W_PAWN].reshape((8, 8)))
# print(mg_pst[W_PAWN + 6].reshape((8, 8)))
| 2.34375
| 2
|
model-optimizer/extensions/middle/PixelLinkReshape_test.py
|
apexxs/dldt
| 2
|
12774302
|
<filename>model-optimizer/extensions/middle/PixelLinkReshape_test.py
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from extensions.middle.PixelLinkReshape import PixelLinkReshape
from mo.utils.unittest.graph import build_graph, compare_graphs
nodes_attributes = {
'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
# Reshape layers
'reshape_pack': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
'reshape_pack_data': {'value': None, 'shape': None, 'kind': 'data'},
'reshape_split': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
'reshape_split_data': {'value': None, 'shape': None, 'kind': 'data'},
'reshape_unpack': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
'reshape_unpack_data': {'value': None, 'shape': None, 'kind': 'data'},
'strided_slice': {'type': 'StridedSlice', 'kind': 'op', 'op': 'StridedSlice'},
'strided_slice_data': {'value': None, 'shape': None, 'kind': 'data'},
# Transpose layer
'reshape_split/Permute_before': {'type': 'Permute', 'kind': 'op', 'op': 'Permute'},
'reshape_split/Permute_before_data': {'value': None, 'shape': None, 'kind': 'data'},
'reshape_pack/Permute_after': {'type': 'Permute', 'kind': 'op', 'op': 'Permute'},
'reshape_pack/Permute_after_data': {'value': None, 'shape': None, 'kind': 'data'},
# Softmax layer
'softmax_1': {'type': 'SoftMax', 'kind': 'op', 'op': 'SoftMax'},
'softmax_1_data': {'value': None, 'shape': None, 'kind': 'data'},
}
class ReshapeSoftmaxReshapeTests(unittest.TestCase):
def test_1(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reshape_split'),
('reshape_split', 'reshape_split_data'),
('reshape_split_data', 'reshape_pack'),
('reshape_pack', 'reshape_pack_data'),
('reshape_pack_data', 'softmax_1'),
('softmax_1', 'softmax_1_data'),
('softmax_1_data', 'reshape_unpack'),
('reshape_unpack', 'reshape_unpack_data'),
('reshape_unpack_data', 'strided_slice'),
('strided_slice', 'strided_slice_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 16])},
'reshape_split': {'dim': np.array([1, 227, 227, 8, 2])},
'reshape_split_data': {'shape': np.array([1, 227, 227, 8, 2])},
'softmax_1_data': {'shape': np.array([1 * 227 * 227 * 8, 2])},
'reshape_pack': {'dim': np.array([1 * 227 * 227 * 8, 2])},
'reshape_pack_data': {'shape': np.array([1 * 227 * 227 * 8, 2])},
'reshape_unpack': {'dim': np.array([1, 227, 227, 8, 2])},
'reshape_unpack_data': {'shape': np.array([1, 227, 227, 8, 2])},
'strided_slice': {
'slices': [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(0, 8, 1),
slice(1, 2, 1)],
'shrink_axis_mask': [False, False, False, False, True],
'new_axis_mask': [False, False, False, False, False]},
'strided_slice_data': {'shape': np.array([1, 227, 227, 8])},
})
graph.graph['layout'] = 'NHWC'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reshape_split/Permute_before'),
('reshape_split/Permute_before', 'reshape_split/Permute_before_data'),
('reshape_split/Permute_before_data', 'reshape_split'),
('reshape_split', 'reshape_split_data'),
('reshape_split_data', 'reshape_pack'),
('reshape_pack', 'reshape_pack/Permute_after_data'),
('reshape_pack/Permute_after_data', 'reshape_pack/Permute_after'),
('reshape_pack/Permute_after', 'reshape_pack_data'),
('reshape_pack_data', 'softmax_1'),
('softmax_1', 'softmax_1_data'),
('softmax_1_data', 'strided_slice'),
('strided_slice', 'reshape_unpack_data'),
('reshape_unpack_data', 'reshape_unpack'),
('reshape_unpack', 'strided_slice_data')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 16])},
'reshape_split/Permute_before_data': {'shape': np.array([1, 227, 16, 227])},
'reshape_split_data': {'shape': np.array([1, 227, 227, 8, 2])},
'reshape_pack_data': {'shape': np.array([1, 2, 1 * 227 * 227 * 8])},
'reshape_pack/Permute_after_data': {'shape': np.array([1, 227 * 227 * 8, 2])},
'softmax_1_data': {'shape': np.array([1, 2, 1 * 227 * 227 * 8])},
'reshape_unpack_data': {'shape': np.array([1, 1, 227 * 227 * 8])},
'strided_slice_data': {'shape': np.array([1, 227, 227, 8])}
})
pattern = PixelLinkReshape()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'strided_slice_data', check_op_attrs=True)
self.assertTrue(flag, resp)
| 1.898438
| 2
|
backend/app/src/v1/routes.py
|
alexandersumer/Notare
| 2
|
12774303
|
# -*- coding: utf-8 -*-
###
### DO NOT CHANGE THIS FILE
###
### The code is auto generated, your change will be overwritten by
### code generating.
###
from __future__ import absolute_import
from .api.logout import Logout
from .api.login import Login
from .api.createAccount import Createaccount
from .api.notes import Notes
from .api.notes_note_id import NotesNoteId
from .api.videos import Videos
from .api.videos_video_id_tag import VideosVideoIdTag
from .api.tags import Tags
routes = [
dict(resource=Logout, urls=["/logout"], endpoint="logout"),
dict(resource=Login, urls=["/login"], endpoint="login"),
dict(resource=Createaccount, urls=["/createAccount"], endpoint="createAccount"),
dict(resource=Notes, urls=["/notes"], endpoint="notes"),
dict(resource=NotesNoteId, urls=["/notes/<int:note_id>"], endpoint="notes_note_id"),
dict(resource=Videos, urls=["/videos"], endpoint="videos"),
dict(
resource=VideosVideoIdTag,
urls=["/videos/<video_id>/tag"],
endpoint="videos_video_id_tag",
),
dict(resource=Tags, urls=["/tags"], endpoint="tags"),
]
| 1.695313
| 2
|
0x0D-NoSQL/101-students.py
|
JoseAVallejo12/holbertonschool-web_back_end
| 0
|
12774304
|
#!/usr/bin/env python3
""" Top students """
def top_students(mongo_collection: object):
"""function that returns all students sorted by average score"""
top = mongo_collection.aggregate([
{
"$project": {
"name": "$name",
"averageScore": {"$avg": "$topics.score"}
}
},
{"$sort": {"averageScore": -1}}
])
return top
| 3.375
| 3
|
cronicl/triggers/cron_trigger.py
|
joocer/cronicl
| 0
|
12774305
|
<reponame>joocer/cronicl
"""
cron based trigger
Partial implementation of scheduled trigger using cron notation.
"""
from .base_trigger import BaseTrigger
import datetime
from datetime import timedelta
from ..utils.cron import is_now
from ..exceptions import MissingInformationError
import threading
sleep = threading.Event().wait
def next_event(s):
"""
Forecast the time of the next event based on a cron-like
speficification of the job schedule
"""
dt = datetime.datetime.now()
dt = dt.replace(second=0, microsecond=0)
event = dt
minute, hour, dom, month, dow = s.split(" ")
if dow != "*":
raise NotImplementedError("Event forecasting with DOW not supported")
if month != "*":
raise NotImplementedError("Event forecasting with Month not supported")
if dom != "*":
raise NotImplementedError("Event forecasting with DOM not supported")
if minute != "*":
event = event.replace(minute=int(minute))
if event < dt:
event = event + timedelta(hours=1)
if hour != "*":
event = event.replace(hour=int(hour))
if event < dt:
event = event + timedelta(days=1)
return event
def seconds_until_next_event(s):
event = next_event(s)
return (event - datetime.datetime.now()).total_seconds() // 1
class CronTrigger(BaseTrigger):
"""
Trigger based on a schedule defined as per cron
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if "schedule" not in kwargs:
raise MissingInformationError("cron trigger requires 'schedule' parameter")
self.schedule = kwargs["schedule"]
if self.label:
self.label = self.label + " - " + self.schedule
else:
self.label = self.schedule
def engage(self):
"""
Based on the main loop of cron:
- Examine the task schedule, compute how far in the future it must run.
- Sleep for that period of time.
- On awakening and after verifying the correct time, execute the task.
- Repeat
"""
while True:
if is_now(self.schedule):
self.on_event(str(datetime.datetime.now().isoformat()))
sleep(60)
seconds = seconds_until_next_event(self.schedule)
if seconds < 1:
print(f"negative sleep, waiting 10 seconds")
sleep(10)
else:
print(f"sleeping for {seconds} seconds")
sleep(seconds)
| 2.859375
| 3
|
setup.py
|
truthiswill/wait4disney
| 106
|
12774306
|
from setuptools import setup, find_packages
setup(
name = "disney",
version = "1.0",
description = "A history of Shanghai Disney waiting time",
long_description = "A history of Shanghai Disney waiting time",
license = "Apache License",
url = "http://s.gaott.info",
author = "gtt116",
author_email = "<EMAIL>",
packages = find_packages(),
include_package_data = True,
platforms = "any",
install_requires = [],
scripts = [],
entry_points = {
'console_scripts': [
'disney-fetch = disney.fetch:main',
'disney-publish = disney.publish:main',
]
}
)
| 1.375
| 1
|
excel_helper.py
|
MrBigBang/android_strings_translator_py
| 4
|
12774307
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
' excel_helper.py '
__author__ = '<NAME>'
############## main code ###############
from openpyxl import Workbook
from openpyxl import load_workbook
import os
import const
import datetime
class ExcelHelper(object):
"""Excel 文件操作类"""
def __init__(self, path, file):
super(ExcelHelper, self).__init__()
self.__getxlspath(path, file)
def __getxlspath(self, path, file):
if file.index('.') <= 0:
raise Exception, 'file (%s) is not invalid' % file
file_strs = file.split('.')
if len(file_strs) < 2 or file_strs[1] != 'xlsx':
raise Exception, 'file (%s) is not invalid is not (.xlsx) type file' % file
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise IOError, 'path (%s) is not a dir' % path
xls_file = os.path.join(path, file)
if os.path.exists(xls_file) and not os.path.isfile(xls_file):
raise Exception, '(%s) is not valid file name' % file
self.__xls_file = xls_file
def __getwb(self):
if not os.path.exists(self.__xls_file):
return Workbook()
return load_workbook(self.__xls_file)
def record(self, translated_datas, ):
wb = self.__getwb()
ws = wb.active
headers = ['Name', const.SOURCE, const.TARGET, 'Editor', 'Date']
if ws['A1'].value is None:
for col in range(len(headers)):
ws.cell(row = 1, column = col + 1).value = headers[col]
edit_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
for name in translated_datas:
ws.append([name, translated_datas[name][const.SOURCE], translated_datas[name][const.TARGET], const.EDITOR, edit_time])
wb.save(self.__xls_file)
if __name__ == '__main__':
eh = ExcelHelper('./', 'test.xlsx')
| 2.84375
| 3
|
examples/tsne/data.py
|
e-/ANN
| 19
|
12774308
|
<reponame>e-/ANN
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import argparse
import sys
import struct
parser = argparse.ArgumentParser(description='Generate input data for the tsne example')
parser.add_argument('path', type=str, help='output path')
parser.add_argument('--sample', type=str, help='sample or random')
parser.add_argument('--binary', dest='binary', action='store_true', default=False, help='binary input')
parser.add_argument('-n', type=int, default=60000, help='# of rows to write')
parser.add_argument('-d', type=int, default=784, help='# of dimensions to write')
parser.add_argument('--theta', '-t', type=float, default=0.5, help='theta')
parser.add_argument('--perplexity', '-p', type=float, default=10, help='target perplexity')
parser.add_argument('--output-dims', '-o', type=int, default=2, help='output dimensionality')
parser.add_argument('--max-iter', '-i', type=int, default=300, help='maximum # of iterations')
if __name__ == '__main__':
args = parser.parse_args()
if args.sample:
if not args.binary:
with open(args.sample, 'r') as inf:
with open(args.path, 'w') as outf:
print(args.n, file=outf)
print(args.d, file=outf)
print(args.theta, file=outf)
print(args.perplexity, file=outf)
print(args.output_dims, file=outf)
print(args.max_iter, file=outf)
lines = inf.readlines()
for i in range(args.n):
print(lines[i], file=outf, end='')
else:
with open(args.sample, 'rb') as inf:
with open(args.path, 'w') as outf:
print(args.n, file=outf)
print(args.d, file=outf)
print(args.theta, file=outf)
print(args.perplexity, file=outf)
print(args.output_dims, file=outf)
print(args.max_iter, file=outf)
for i in range(args.n):
inf.seek(i * 4 * args.d)
floats = struct.unpack('f' * args.d, inf.read(args.d * 4))
print(' '.join([str(f) for f in floats]), file=outf)
else:
r = random.random()
with open(args.path, 'w') as outf:
print(args.n, file=outf)
print(args.d, file=outf)
print(args.theta, file=outf)
print(args.perplexity, file=outf)
print(args.output_dims, file=outf)
print(args.max_iter, file=outf)
for i in range(args.n):
print(' '.join([str(random.uniform(-1, 1)) for j in range(args.d)]), file=outf)
| 2.671875
| 3
|
transient/linux.py
|
sruffell/transient
| 0
|
12774309
|
<gh_stars>0
import ctypes
from typing import cast
PR_SET_PDEATHSIG = 1
_PRCTL_SYSCALL = 157
def prctl(option: int, arg2: int = 0, arg3: int = 0, arg4: int = 0, arg5: int = 0) -> int:
prctl = ctypes.CDLL(None).syscall # type: ignore
prctl.restype = ctypes.c_int
prctl.argtypes = (
ctypes.c_long, # The actual syscall number
ctypes.c_int,
ctypes.c_ulonglong,
ctypes.c_ulonglong,
ctypes.c_ulonglong,
ctypes.c_ulonglong,
)
return cast(int, prctl(_PRCTL_SYSCALL, option, arg2, arg3, arg4, arg5))
def set_death_signal(signal: int) -> int:
"""Send `signal` to this process when the parent dies"""
return prctl(PR_SET_PDEATHSIG, signal)
| 2.75
| 3
|
transfo/utils.py
|
qianyingw/rob-kiwi
| 1
|
12774310
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 31 16:55:35 2019
From CS230 Code Examples
@author: qwang
"""
import os
import logging
import shutil
import torch
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%%
def save_dict_to_json(d, json_path):
"""
Save dict of floats to json file
d: dict of float-castable values (np.float, int, float, etc.)
"""
with open(json_path, 'w') as fout:
d = {key: float(value) for key, value in d.items()}
json.dump(d, fout, indent=4)
#%% Checkpoint
def save_checkpoint(state, is_best, checkdir):
"""
Save model and training parameters at checkpoint + 'last.pth.tar'.
If is_best==True, also saves checkpoint + 'best.pth.tar'
Params:
state: (dict) contains model's state_dict, may contain other keys such as epoch, optimizer state_dict
is_best: (bool) True if it is the best model seen till now
checkdir: (string) folder where parameters are to be saved
"""
filepath = os.path.join(checkdir, 'last.pth.tar')
if os.path.exists(checkdir) == False:
os.mkdir(checkdir)
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(checkdir, 'best.pth.tar'))
def load_checkpoint(checkfile, model, optimizer=None):
"""
Load model parameters (state_dict) from checkfile.
If optimizer is provided, loads state_dict of optimizer assuming it is present in checkpoint.
Params:
checkfile: (string) filename which needs to be loaded
model: (torch.nn.Module) model for which the parameters are loaded
optimizer: (torch.optim) optional: resume optimizer from checkpoint
"""
if os.path.exists(checkfile) == False:
raise("File doesn't exist {}".format(checkfile))
checkfile = torch.load(checkfile)
model.load_state_dict(checkfile['state_dict'])
if optimizer:
optimizer.load_state_dict(checkfile['optim_dict'])
return checkfile
#%% Metrics
def metrics(preds, y, th=0.5):
"""
Params:
preds: torch tensor, [batch_size, output_dim]
y: torch tensor, [batch_size]
Yields:
A dictionary of accuracy, f1 score, recall, precision and specificity
"""
# y_preds = preds.argmax(dim=1, keepdim=False) # [batch_size, output_dim] --> [batch_size]
if torch.cuda.device_count() == 1:
# y_preds = (preds[:,1] > th).type(torch.ShortTensor).cuda()
y_preds = (preds[:,1] > th).int().type(torch.LongTensor).cuda()
else:
# y_preds = (preds[:,1] > th).type(torch.ShortTensor)
y_preds = (preds[:,1] > th).int().type(torch.LongTensor)
ones = torch.ones_like(y_preds)
zeros = torch.zeros_like(y_preds)
pos = torch.eq(y_preds, y).sum().item()
tp = (torch.eq(y_preds, ones) & torch.eq(y, ones)).sum().item()
tn = (torch.eq(y_preds, zeros) & torch.eq(y, zeros)).sum().item()
fp = (torch.eq(y_preds, ones) & torch.eq(y, zeros)).sum().item()
fn = (torch.eq(y_preds, zeros) & torch.eq(y, ones)).sum().item()
assert pos == tp + tn
acc = pos / y.shape[0] # torch.FloatTensor([y.shape[0]])
f1 = 2*tp / (2*tp + fp + fn) if (2*tp + fp + fn != 0) else 0
rec = tp / (tp + fn) if (tp + fn != 0) else 0
ppv = tp / (tp + fp) if (tp + fp != 0) else 0
spc = tn / (tn + fp) if (tn + fp != 0) else 0
return {'accuracy': acc, 'f1': f1, 'recall': rec, 'precision': ppv, 'specificity': spc}
#%% Plot performance
def plot_prfs(prfs_json_path):
with open(prfs_json_path) as f:
dat = json.load(f)
# Create scores dataframe
epochs = int(len(dat['prfs'])/2)
train_df = pd.DataFrame(columns=['Loss', 'Accuracy', 'F1', 'Recall', 'Precision', 'Specificity'])
valid_df = pd.DataFrame(columns=['Loss', 'Accuracy', 'F1', 'Recall', 'Precision', 'Specificity'])
for i in range(epochs):
train_df.loc[i] = list(dat['prfs']['train_'+str(i+1)].values())
valid_df.loc[i] = list(dat['prfs']['valid_'+str(i+1)].values())
# Plot
plt.figure(figsize=(15,5))
x = np.arange(len(train_df)) + 1
# Loss / F1
plt.subplot(1, 2, 1)
plt.title("Loss and F1")
plt.plot(x, train_df['Loss'], label="train_loss", color='C5')
plt.plot(x, valid_df['Loss'], label="val_loss", color='C5', linestyle='--')
plt.plot(x, train_df['F1'], label="train_f1", color='C9')
plt.plot(x, valid_df['F1'], label="val_f1", color='C9', linestyle='--')
plt.xticks(np.arange(2, len(x)+2, step=2))
plt.legend(loc='upper right')
# Accuracy / Recall
plt.subplot(1, 2, 2)
plt.title("Accuracy and Recall")
plt.plot(x, train_df['Accuracy'], label="train_acc", color='C0', alpha=0.8)
plt.plot(x, valid_df['Accuracy'], label="val_acc", color='C0', linestyle='--', alpha=0.8)
#plt.plot(x, train_df['F1'], label="train_f1", color='C9')
#plt.plot(x, valid_df['F1'], label="val_f1", color='C9', linestyle='--')
plt.plot(x, train_df['Recall'], label="train_rec", color='C1', alpha=0.8)
plt.plot(x, valid_df['Recall'], label="val_rec", color='C1', linestyle='--', alpha=0.8)
plt.xticks(np.arange(2, len(x)+2, step=2))
plt.legend(loc='lower right')
# Save png
output_dir = os.path.dirname(prfs_json_path)
plt.savefig(os.path.join(output_dir, 'prfs.png'))
| 2.453125
| 2
|
user/analysis.py
|
boyayun/tushare
| 0
|
12774311
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
import os
import sys
import signal
import time
from datetime import datetime
from datetime import timedelta
# import cv2 as cv
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt # 导入模块 matplotlib.pyplot,并简写成 plt
import numpy as np # 导入模块 numpy,并简写成 np
import csv
l_rush = {
# 'amount0': [0, 0, 0, 0, 0, 0, 0, 0, 0],
# 'amount1': [0, 0, 0, 0, 0, 0, 0, 0, 0],
# 'amount2': [0, 0, 0, 0, 0, 0, 0, 0, 0],
'test': [0, 0, 0, 0, 0, 0, 0, 0, 0],
'price__': [0, 0, 0, 0, 0, 0, 0, 0, 0],
'rsi6_12': [0, 0, 0, 0, 0, 0, 0, 0, 0],
'ma4___9': [0, 0, 0, 0, 0, 0, 0, 0, 0],
'ma9__18': [0, 0, 0, 0, 0, 0, 0, 0, 0]}
l_run = {
# 'amount0': [0, 0, 0, 0, 0, 0, 0, 0, 0],
# 'amount1': [0, 0, 0, 0, 0, 0, 0, 0, 0],
# 'amount2': [0, 0, 0, 0, 0, 0, 0, 0, 0],
'test': [0, 0, 0, 0, 0, 0, 0, 0, 0],
'price__': [0, 0, 0, 0, 0, 0, 0, 0, 0],
'rsi6_12': [0, 0, 0, 0, 0, 0, 0, 0, 0],
'ma4___9': [0, 0, 0, 0, 0, 0, 0, 0, 0],
'ma9__18': [0, 0, 0, 0, 0, 0, 0, 0, 0]}
if os.path.exists('./statistics.csv'):
csv_data = pd.read_csv('./statistics.csv', header=None) # 读取数据
data = csv_data.values.tolist()
name = 'init'
price = 100
hold = False
buy = 1
sell = 1
number = -1
total = -100
for i in data:
code = i[0]
tag = i[2]
if i[3] == 'run':
k = 0
l_run[tag][0] += 1
for j in range(1, 9-k):
if i[5+k] > i[5+j+k]:
l_run[tag][j+k] += 1
elif i[3] == 'rush':
k = 0
l_rush[tag][0] += 1
for j in range(1, 9-k):
if i[5+k] < i[5+k+j]:
l_rush[tag][j+k] += 1
# print(i[1])
if i[1] != name:
print('name:', name, round(price, 2))
number += 1
name = i[1]
total += price
price = 100
hold = False
if i[2] == 'ma9__18':
if i[3] == 'rush' and hold == False:
hold = True
buy = i[6]
if i[3] == 'run' and hold == True:
hold = False
sell = i[6]
price = price*sell/buy
# print('name:', name, round(price, 2))
print('name:', name, round(price, 2))
total += price
number += 1
print('total:', round(total, 2), number, round(total/number, 2))
# exit(0)
print('rush', l_rush)
for (key, value) in l_rush.items():
if value[0] != 0:
v1 = value[1]/value[0]*100
v2 = value[2]/value[0]*100
v3 = value[3]/value[0]*100
v4 = value[4]/value[0]*100
v5 = value[5]/value[0]*100
v10 = value[6]/value[0]*100
v20 = value[7]/value[0]*100
v30 = value[8]/value[0]*100
print('%s:1day:%.2f%%,2day:%.2f%%,3day:%.2f%%,4day:%.2f%%,5day:%.2f%%,10day:%.2f%%,20day:%.2f%%,30day:%.2f%%' % (
key, v1, v2, v3, v4, v5, v10, v20, v30))
print('\nrun', l_run)
for (key, value) in l_run.items():
if value[0] != 0:
v1 = value[1]/value[0]*100
v2 = value[2]/value[0]*100
v3 = value[3]/value[0]*100
v4 = value[4]/value[0]*100
v5 = value[5]/value[0]*100
v10 = value[6]/value[0]*100
v20 = value[7]/value[0]*100
v30 = value[8]/value[0]*100
print('%s:1day:%.2f%%,2day:%.2f%%,3day:%.2f%%,4day:%.2f%%,5day:%.2f%%,10day:%.2f%%,20day:%.2f%%,30day:%.2f%%' % (
key, v1, v2, v3, v4, v5, v10, v20, v30))
| 2.03125
| 2
|
src/test/test_xml2fasta.py
|
yutake27/HMDM
| 2
|
12774312
|
<filename>src/test/test_xml2fasta.py
import os
import sys
sys.path.append(os.path.abspath('..'))
import xml2fasta
xml_path = '../../blast-xml/pdbaa_20200712/1bxo_1.xml'
fasta_path = '../../blast-xml/pdbaa_20200712/1bxo_1.fasta'
xml2fasta.xml2fasta(xml_path, fasta_path)
xml_path = '../../blast-xml/pdbaa_20200712/4gg1_1.xml'
fasta_path = '../../blast-xml/pdbaa_20200712/4gg1_1.fasta'
xml2fasta.xml2fasta(xml_path, fasta_path)
| 1.875
| 2
|
tests/unit/datasources_test.py
|
jamesmistry/weaveq
| 0
|
12774313
|
<reponame>jamesmistry/weaveq
# -*- coding: utf-8 -*-
"""@package datasources_test
Tests for weaveq.datasources
"""
import unittest
import tempfile
import json
import os
import types
import sys
from weaveq.datasources import AppDataSourceBuilder, JsonLinesDataSource, JsonDataSource, CsvDataSource, ElasticsearchDataSource
from weaveq import wqexception
class TestConfig(unittest.TestCase):
"""Tests Config class
"""
def test_data_source_introspection(self):
"""Data sources are correctly discovered and indexed.
"""
expected_data_sources = ["json_lines", "jsl", "json", "js", "elasticsearch", "el", "csv"]
expected_data_sources.sort()
subject = AppDataSourceBuilder({})
self.assertEquals(len(subject._source_type_mappings), len(expected_data_sources))
self.assertEquals(subject._source_type_mappings["json_lines"], JsonLinesDataSource)
self.assertEquals(subject._source_type_mappings["jsl"], JsonLinesDataSource)
self.assertEquals(subject._source_type_mappings["json"], JsonDataSource)
self.assertEquals(subject._source_type_mappings["js"], JsonDataSource)
self.assertEquals(subject._source_type_mappings["csv"], CsvDataSource)
self.assertEquals(subject.valid_source_types, ", ".join(expected_data_sources))
def test_uri_parse_valid(self):
"""Parse a correctly formatted source type out of a URI string.
"""
subject = AppDataSourceBuilder({})
self.assertEquals(subject._parse_uri("json_lines:/test/uri"), {"source_type":"json_lines", "uri":"/test/uri", "data_source_class":JsonLinesDataSource})
def test_uri_parse_case_insensitive_source_type(self):
"""Parse a correctly formatted source type out of a URI string, regardless of source type case.
"""
subject = AppDataSourceBuilder({})
self.assertEquals(subject._parse_uri("JSON_lines:/test/uri"), {"source_type":"json_lines", "uri":"/test/uri", "data_source_class":JsonLinesDataSource})
def test_uri_parse_alternative_source_type_ident(self):
"""Parse a correctly formatted source type out of a URI string, using an alternative source type ident string.
"""
subject = AppDataSourceBuilder({})
self.assertEquals(subject._parse_uri("jsl:/test/uri"), {"source_type":"json_lines", "uri":"/test/uri", "data_source_class":JsonLinesDataSource})
def test_parse_uri_invalid(self):
"""Parse a missing source type from a URI string
"""
subject = AppDataSourceBuilder({})
with self.assertRaises(wqexception.DataSourceBuildError):
subject._parse_uri("/test/uri")
def test_parse_uri_not_greedy(self):
"""Make sure the source type parsing finishes at the first colon
"""
subject = AppDataSourceBuilder({})
self.assertEquals(subject._parse_uri("json_lines:not_a_type:/test/uri"), {"source_type":"json_lines", "uri":"not_a_type:/test/uri", "data_source_class":JsonLinesDataSource})
def test_parse_uri_invalid_source_type(self):
"""Parse an invalid source type from a URI string
"""
subject = AppDataSourceBuilder({})
with self.assertRaises(wqexception.DataSourceBuildError):
subject._parse_uri("invalid_source_type:/test/uri")
def test_datasource_construction(self):
"""Construct a DataSource object correctly from a URI and filter string.
"""
subject = AppDataSourceBuilder({"data_sources":{"elasticsearch":{"hosts":["127.0.0.1:5601"]}}})
constructed_datasource = subject("elasticsearch:test_index_name", "test_filter_string")
self.assertTrue(isinstance(constructed_datasource, ElasticsearchDataSource))
self.assertEquals(constructed_datasource.index_name, "test_index_name")
self.assertEquals(constructed_datasource.filter_string, "test_filter_string")
def test_elasticds_no_hosts_config(self):
"""Elasticsearch datasource is not configured with hosts.
"""
with self.assertRaises(wqexception.DataSourceBuildError):
subject = AppDataSourceBuilder({"data_sources":{"elasticsearch":{}}})("elasticsearch:test_index_name", "test_filter_string")
def test_elasticds_default_config(self):
"""Elasticsearch datasource config items are correctly set to defaults.
"""
subject = AppDataSourceBuilder({"data_sources":{"elasticsearch":{"hosts":["127.0.0.1:5601"]}}})("elasticsearch:test_index_name", "test_filter_string")
self.assertEquals(subject.config["hosts"], ["127.0.0.1:5601"])
self.assertEquals(subject.config["timeout"], 10)
self.assertEquals(subject.config["use_ssl"], False)
self.assertEquals(subject.config["verify_certs"], False)
self.assertEquals(subject.config["ca_certs"], None)
self.assertEquals(subject.config["client_cert"], None)
self.assertEquals(subject.config["client_key"], None)
def test_elasticds_supplied_config(self):
"""Elasticsearch datasource config applied correctly
"""
subject = AppDataSourceBuilder({"data_sources":{"elasticsearch":{"hosts":["127.0.0.1:5601","10.10.10.1:1280"],"timeout":20,"use_ssl":True,"verify_certs":True,"ca_certs":"/tmp/ca_certs","client_cert":"/tmp/client_cert","client_key":"/tmp/client_key"}}})("elasticsearch:test_index_name", "test_filter_string")
self.assertEquals(subject.config["hosts"], ["127.0.0.1:5601","10.10.10.1:1280"])
self.assertEquals(subject.config["timeout"], 20)
self.assertEquals(subject.config["use_ssl"], True)
self.assertEquals(subject.config["verify_certs"], True)
self.assertEquals(subject.config["ca_certs"], "/tmp/ca_certs")
self.assertEquals(subject.config["client_cert"], "/tmp/client_cert")
self.assertEquals(subject.config["client_key"], "/tmp/client_key")
def test_json_lines_batch_load(self):
"""json_lines data source batch loads a file successfully.
"""
test_data = '{"test_field_1a":"test_value_1a", "test_field_2a":"test_value_2a"}\n{"test_field_1b":"test_value_1b", "test_field_2b":"test_value_2b"}\n{"test_field_1c":"test_value_1c", "test_field_2c":"test_value_2c"}'
tmpfile = tempfile.mkstemp()
with open(tmpfile[1], "wb") as config_file:
if (sys.version_info.major >= 3):
config_file.write(bytes(test_data, "utf-8"))
else:
config_file.write(test_data)
try:
subject = JsonLinesDataSource(tmpfile[1], None)
self.assertEquals(subject.batch(), [{"test_field_1a":"test_value_1a", "test_field_2a":"test_value_2a"},{"test_field_1b":"test_value_1b", "test_field_2b":"test_value_2b"},{"test_field_1c":"test_value_1c", "test_field_2c":"test_value_2c"}])
finally:
os.close(tmpfile[0])
os.unlink(tmpfile[1])
def test_json_lines_stream_load(self):
"""json_lines data source stream loads a file successfully.
"""
test_data = '{"test_field_1a":"test_value_1a", "test_field_2a":"test_value_2a"}\n{"test_field_1b":"test_value_1b", "test_field_2b":"test_value_2b"}\n{"test_field_1c":"test_value_1c", "test_field_2c":"test_value_2c"}'
tmpfile = tempfile.mkstemp()
with open(tmpfile[1], "wb") as config_file:
if (sys.version_info.major >= 3):
config_file.write(bytes(test_data, "utf-8"))
else:
config_file.write(test_data)
try:
result = []
subject = JsonLinesDataSource(tmpfile[1], None)
for record in subject.stream():
result.append(record)
self.assertEquals(result, [{"test_field_1a":"test_value_1a", "test_field_2a":"test_value_2a"},{"test_field_1b":"test_value_1b", "test_field_2b":"test_value_2b"},{"test_field_1c":"test_value_1c", "test_field_2c":"test_value_2c"}])
finally:
os.close(tmpfile[0])
os.unlink(tmpfile[1])
def test_json_lines_load_error(self):
"""json_lines data source encounters error loading file.
"""
test_data = '{"test_field_1a":"test_value_1a", "test_field_2a":"test_value_2a"}\n{"test_field_1b":"test_value_1b", "test_field_2b":"test_value_2b"}\n{"test_field_1c":"test_value_1c", "test_field_2c":"test_value_2c"'
tmpfile = tempfile.mkstemp()
with open(tmpfile[1], "wb") as config_file:
if (sys.version_info.major >= 3):
config_file.write(bytes(test_data, "utf-8"))
else:
config_file.write(test_data)
try:
result = []
subject = JsonLinesDataSource(tmpfile[1], None)
with self.assertRaises(Exception):
subject.batch()
finally:
os.close(tmpfile[0])
os.unlink(tmpfile[1])
def test_json_lines_utf8(self):
"""json_lines data source handles UTF-8 OK.
"""
test_data = u'{"กว่า":"κόσμε", "test_field_2a":"test_value_2a"}\n{"test_field_1b":"いろはにほへとちりぬるを", "Heizölrückstoßabdämpfung":"test_value_2b"}\n{"test_field_1c":"test_value_1c", "test_field_2c":"test_value_2c"}'.encode("utf-8")
tmpfile = tempfile.mkstemp()
with open(tmpfile[1], "wb") as config_file:
if (sys.version_info.major >= 3):
config_file.write(test_data)
else:
config_file.write(test_data)
try:
subject = JsonLinesDataSource(tmpfile[1], None)
self.assertEquals(subject.batch(), [{u"กว่า":u"κόσμε", "test_field_2a":"test_value_2a"},{"test_field_1b":u"いろはにほへとちりぬるを", u"Heizölrückstoßabdämpfung":"test_value_2b"},{"test_field_1c":"test_value_1c", "test_field_2c":"test_value_2c"}])
finally:
os.close(tmpfile[0])
os.unlink(tmpfile[1])
def test_json_batch_load(self):
"""json data source batch loads a file successfully.
"""
test_data = '[{"test_field_1a":"test_value_1a", "test_field_2a":"test_value_2a"},{"test_field_1b":"test_value_1b", "test_field_2b":"test_value_2b"},{"test_field_1c":"test_value_1c", "test_field_2c":"test_value_2c"}]'
tmpfile = tempfile.mkstemp()
with open(tmpfile[1], "wb") as config_file:
if (sys.version_info.major >= 3):
config_file.write(bytes(test_data, "utf-8"))
else:
config_file.write(test_data)
try:
subject = JsonDataSource(tmpfile[1], None)
self.assertEquals(subject.batch(), [{"test_field_1a":"test_value_1a", "test_field_2a":"test_value_2a"},{"test_field_1b":"test_value_1b", "test_field_2b":"test_value_2b"},{"test_field_1c":"test_value_1c", "test_field_2c":"test_value_2c"}])
finally:
os.close(tmpfile[0])
os.unlink(tmpfile[1])
def test_json_stream_load(self):
"""json data source stream loads a file successfully.
"""
test_data = '[{"test_field_1a":"test_value_1a", "test_field_2a":"test_value_2a"},{"test_field_1b":"test_value_1b", "test_field_2b":"test_value_2b"},{"test_field_1c":"test_value_1c", "test_field_2c":"test_value_2c"}]'
tmpfile = tempfile.mkstemp()
with open(tmpfile[1], "wb") as config_file:
if (sys.version_info.major >= 3):
config_file.write(bytes(test_data, "utf-8"))
else:
config_file.write(test_data)
try:
subject = JsonDataSource(tmpfile[1], None)
result = []
for record in subject.stream():
result.append(record)
self.assertEquals(result, [{"test_field_1a":"test_value_1a", "test_field_2a":"test_value_2a"},{"test_field_1b":"test_value_1b", "test_field_2b":"test_value_2b"},{"test_field_1c":"test_value_1c", "test_field_2c":"test_value_2c"}])
finally:
os.close(tmpfile[0])
os.unlink(tmpfile[1])
def test_json_no_list_root(self):
"""json data source stream cannot load a document without a list root.
"""
test_data = '{"a":{"test_field_1a":"test_value_1a", "test_field_2a":"test_value_2a"},"b":{"test_field_1b":"test_value_1b", "test_field_2b":"test_value_2b"},"c":{"test_field_1c":"test_value_1c", "test_field_2c":"test_value_2c"}}'
tmpfile = tempfile.mkstemp()
with open(tmpfile[1], "wb") as config_file:
if (sys.version_info.major >= 3):
config_file.write(bytes(test_data, "utf-8"))
else:
config_file.write(test_data)
try:
subject = JsonDataSource(tmpfile[1], None)
with self.assertRaises(wqexception.DataSourceError):
subject.batch()
finally:
os.close(tmpfile[0])
os.unlink(tmpfile[1])
def test_json_utf8(self):
"""json data source handles UTF-8 OK.
"""
test_data = u'[{"กว่า":"κόσμε", "test_field_2a":"test_value_2a"},{"test_field_1b":"いろはにほへとちりぬるを", "Heizölrückstoßabdämpfung":"test_value_2b"},{"test_field_1c":"test_value_1c", "test_field_2c":"test_value_2c"}]'.encode("utf-8")
tmpfile = tempfile.mkstemp()
with open(tmpfile[1], "wb") as config_file:
if (sys.version_info.major >= 3):
config_file.write(test_data)
else:
config_file.write(test_data)
try:
subject = JsonDataSource(tmpfile[1], None)
self.assertEquals(subject.batch(), [{u"กว่า":u"κόσμε", "test_field_2a":"test_value_2a"},{"test_field_1b":u"いろはにほへとちりぬるを", u"Heizölrückstoßabdämpfung":"test_value_2b"},{"test_field_1c":"test_value_1c", "test_field_2c":"test_value_2c"}])
finally:
os.close(tmpfile[0])
os.unlink(tmpfile[1])
def test_csv_batch_load(self):
"""csv data source batch loads a file successfully with field names.
"""
test_data = '"field_a","field_b","field_c"\n"row0cola","row0colb","row0colc"\n"row1cola","row1colb","row1colc"\n"row2cola","row2colb","row2colc"\n'
tmpfile = tempfile.mkstemp()
with open(tmpfile[1], "wb") as config_file:
if (sys.version_info.major >= 3):
config_file.write(bytes(test_data, "utf-8"))
else:
config_file.write(test_data)
try:
subject = CsvDataSource(tmpfile[1], None, {"first_row_names":True})
self.assertEquals(subject.batch(), [{"field_a":"row0cola", "field_b":"row0colb", "field_c":"row0colc"},{"field_a":"row1cola", "field_b":"row1colb", "field_c":"row1colc"},{"field_a":"row2cola", "field_b":"row2colb", "field_c":"row2colc"}])
finally:
os.close(tmpfile[0])
os.unlink(tmpfile[1])
def test_csv_stream_load(self):
"""csv data source stream loads a file successfully with field names.
"""
test_data = '"field_a","field_b","field_c"\n"row0cola","row0colb","row0colc"\n"row1cola","row1colb","row1colc"\n"row2cola","row2colb","row2colc"\n'
tmpfile = tempfile.mkstemp()
with open(tmpfile[1], "wb") as config_file:
if (sys.version_info.major >= 3):
config_file.write(bytes(test_data, "utf-8"))
else:
config_file.write(test_data)
try:
subject = CsvDataSource(tmpfile[1], None, {"first_row_names":True})
result = []
for record in subject.stream():
result.append(record)
self.assertEquals(result, [{"field_a":"row0cola", "field_b":"row0colb", "field_c":"row0colc"},{"field_a":"row1cola", "field_b":"row1colb", "field_c":"row1colc"},{"field_a":"row2cola", "field_b":"row2colb", "field_c":"row2colc"}])
finally:
os.close(tmpfile[0])
os.unlink(tmpfile[1])
def test_csv_no_field_names(self):
"""csv data source loads a file with no field names.
"""
test_data = '"row0cola","row0colb","row0colc"\n"row1cola","row1colb","row1colc"\n"row2cola","row2colb","row2colc"\n'
tmpfile = tempfile.mkstemp()
with open(tmpfile[1], "wb") as config_file:
if (sys.version_info.major >= 3):
config_file.write(bytes(test_data, "utf-8"))
else:
config_file.write(test_data)
try:
subject = CsvDataSource(tmpfile[1], None, {"first_row_names":False})
self.assertEquals(subject.batch(), [{"column_1":"row0cola", "column_2":"row0colb", "column_3":"row0colc"},{"column_1":"row1cola", "column_2":"row1colb", "column_3":"row1colc"},{"column_1":"row2cola", "column_2":"row2colb", "column_3":"row2colc"}])
finally:
os.close(tmpfile[0])
os.unlink(tmpfile[1])
def test_csv_utf8(self):
"""csv input with UTF-8 characters
"""
test_data = u'"กว่า","áðan","τὴν"\n"κόσμε","row0colb","row0colc"\n"row1cola","Heizölrückstoßabdämpfung","row1colc"\n"row2cola","row2colb","いろはにほへとちりぬるを"\n'.encode("utf-8")
tmpfile = tempfile.mkstemp()
with open(tmpfile[1], "wb") as config_file:
if (sys.version_info.major >= 3):
config_file.write(test_data)
else:
config_file.write(test_data)
try:
subject = CsvDataSource(tmpfile[1], None, {"first_row_names":True})
self.assertEquals(subject.batch(), [{u"กว่า":u"κόσμε", u"áðan":"row0colb", u"τὴν":"row0colc"},{u"กว่า":"row1cola", u"áðan":u"Heizölrückstoßabdämpfung", u"τὴν":"row1colc"},{u"กว่า":"row2cola", u"áðan":"row2colb", u"τὴν":u"いろはにほへとちりぬるを"}])
finally:
os.close(tmpfile[0])
os.unlink(tmpfile[1])
| 2.390625
| 2
|
src/toil/lib/threading.py
|
danieldanciu/toil
| 0
|
12774314
|
<filename>src/toil/lib/threading.py
# Copyright (C) 2015-2018 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 5.14.2018: copied into Toil from https://github.com/BD2KGenomics/bd2k-python-lib
from __future__ import absolute_import
from future.utils import raise_
from builtins import range
import sys
import threading
if sys.version_info >= (3, 0):
from threading import BoundedSemaphore
else:
from threading import _BoundedSemaphore as BoundedSemaphore
class BoundedEmptySemaphore( BoundedSemaphore ):
"""
A bounded semaphore that is initially empty.
"""
def __init__( self, value=1, verbose=None ):
super( BoundedEmptySemaphore, self ).__init__( value, verbose )
for i in range( value ):
assert self.acquire( blocking=False )
class ExceptionalThread(threading.Thread):
"""
A thread whose join() method re-raises exceptions raised during run(). While join() is
idempotent, the exception is only during the first invocation of join() that successfully
joined the thread. If join() times out, no exception will be re reraised even though an
exception might already have occured in run().
When subclassing this thread, override tryRun() instead of run().
>>> def f():
... assert 0
>>> t = ExceptionalThread(target=f)
>>> t.start()
>>> t.join()
Traceback (most recent call last):
...
AssertionError
>>> class MyThread(ExceptionalThread):
... def tryRun( self ):
... assert 0
>>> t = MyThread()
>>> t.start()
>>> t.join()
Traceback (most recent call last):
...
AssertionError
"""
exc_info = None
def run( self ):
try:
self.tryRun( )
except:
self.exc_info = sys.exc_info( )
raise
def tryRun( self ):
super( ExceptionalThread, self ).run( )
def join( self, *args, **kwargs ):
super( ExceptionalThread, self ).join( *args, **kwargs )
if not self.is_alive( ) and self.exc_info is not None:
type, value, traceback = self.exc_info
self.exc_info = None
raise_(type, value, traceback)
# noinspection PyPep8Naming
class defaultlocal(threading.local):
"""
Thread local storage with default values for each field in each thread
>>>
>>> l = defaultlocal( foo=42 )
>>> def f(): print(l.foo)
>>> t = threading.Thread(target=f)
>>> t.start() ; t.join()
42
"""
def __init__( self, **kwargs ):
super( defaultlocal, self ).__init__( )
self.__dict__.update( kwargs )
| 2.296875
| 2
|
grid/utils.py
|
parthatom/Grid
| 0
|
12774315
|
"""Utility functions."""
import os
def exec_os_cmd(command):
return os.popen(command).read()
| 2.125
| 2
|
__init__.py
|
ishay2b/KittiBox
| 0
|
12774316
|
<filename>__init__.py<gh_stars>0
def git_root():
''' return the root location of git rep
'''
import subprocess
gitroot = subprocess.Popen(['git', 'rev-parse', '--show-toplevel'], stdout=subprocess.PIPE).communicate()[0].rstrip().decode('utf-8')
return gitroot
| 2.25
| 2
|
services/backend/project/api/search.py
|
kzkaneoka/custom-job-search
| 0
|
12774317
|
<gh_stars>0
from flask import Blueprint, jsonify, request
from project.api.sites import Indeed
search_blueprint = Blueprint("search", __name__)
@search_blueprint.route("/search", methods=["POST"])
def search_jobs():
post_data = request.get_json()
response_object = {"status": "fail", "message": "Invalid payload."}
if not post_data:
return jsonify(response_object), 400
try:
words = post_data["words"]
location = post_data["location"]
offset = post_data["offset"]
indeed = Indeed(words, location, offset)
jobs = indeed.search()
response_object["data"] = {}
if jobs:
response_object["data"]["jobs"] = jobs
response_object["data"]["words"] = post_data["words"]
response_object["data"]["location"] = post_data["location"]
response_object["data"]["offset"] = post_data["offset"]
response_object["status"] = "success"
response_object["message"] = "Successfully searched."
return jsonify(response_object), 200
else:
response_object["message"] = "Sorry. Can't find jobs."
return response_object, 400
except KeyError:
return response_object, 400
| 2.71875
| 3
|
tests/tpath/dn3/conf.py
|
kajigga/pop
| 48
|
12774318
|
DYNE = {'dn1': ['dn1']}
| 1.273438
| 1
|
2015/15_ScienceforHungryPeople/ingredient.py
|
deanearlwright/AdventOfCode
| 1
|
12774319
|
# ======================================================================
# Science for Hungry People
# Advent of Code 2015 Day 15 -- <NAME> -- https://adventofcode.com
#
# Python implementation by Dr. <NAME> III
# ======================================================================
# ======================================================================
# i n g r e d i e n t . p y
# ======================================================================
"Ingredient for the Advent of Code 2015 Day 15 puzzle"
# ----------------------------------------------------------------------
# import
# ----------------------------------------------------------------------
import re
# ----------------------------------------------------------------------
# constants
# ----------------------------------------------------------------------
# Butterscotch: capacity -1, durability -2, flavor 6, texture 3, calories 8
RE_INGREDIENT = re.compile("([A-Za-z]+): capacity (-?[0-9]+), durability (-?[0-9]+)," +
" flavor (-?[0-9]+), texture (-?[0-9]+), calories (-?[0-9]+)")
# ======================================================================
# Ingredient
# ======================================================================
class Ingredient(object): # pylint: disable=R0902, R0205
"Object for Science for Hungry People"
def __init__(self, text=None, part2=False):
# 1. Set the initial values
self.part2 = part2
self.text = text
self.name = ""
self.qualities = [0, 0, 0, 0]
self.cals = 0
# 2. Process text (if any)
if text is not None and len(text) > 0:
match = RE_INGREDIENT.match(text)
if not match:
print("Unable to parse", text)
else:
name, capacity, durability, flavor, texture, calories = match.groups()
self.name = name
self.qualities = [int(capacity), int(durability), int(flavor), int(texture)]
self.cals = int(calories)
def properties(self, teaspoons=1):
"Return the score for the ingredient"
return [teaspoons * _ for _ in self.qualities]
def calories(self, teaspoons=1):
"Return the calaries"
return teaspoons * self.cals
# ----------------------------------------------------------------------
# module initialization
# ----------------------------------------------------------------------
if __name__ == '__main__':
pass
# ======================================================================
# end i n g r e d i e n t . p y end
# ======================================================================
| 2.515625
| 3
|
utils/data.py
|
liuaoy/deep-time-series
| 0
|
12774320
|
from torch import Tensor, Generator
from typing import TypeVar, List, Optional, Tuple, Sequence
from torch import default_generator
from torch.utils.data import Dataset, Subset
T_co = TypeVar('T_co', covariant=True)
T = TypeVar('T')
from torch._utils import _accumulate
from torch import randperm
import torch
class Subset(Dataset[T_co]):
r"""
Subset of a dataset at specified indices.
Arguments:
dataset (Dataset): The whole Dataset
indices (sequence): Indices in the whole set selected for subset
"""
dataset: Dataset[T_co]
indices: Sequence[int]
def __init__(self, dataset: Dataset[T_co], indices: Sequence[int]) -> None:
self.dataset = dataset
self.indices = indices
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
def inverse_transform(self, x):
if hasattr(self.dataset, "inverse_transform"):
return self.dataset.inverse_transform(x)
else:
return x
def order_split(dataset: Dataset[T], lengths: list) -> List[Subset[T]]:
r"""
Randomly split a dataset into non-overlapping new datasets of given lengths.
Optionally fix the generator for reproducible results, e.g.:
>>> order_split(range(10), [3, 7], generator=torch.Generator().manual_seed(42))
>>> order_split(range(10), [3, -1], generator=torch.Generator().manual_seed(42))
Arguments:
dataset (Dataset): Dataset to be split
lengths (list): lengths of splits to be produced
"""
try:
idx = lengths.index(-1)
lengths[idx] = len(dataset) - sum(lengths) + 1
except:
# Cannot verify that dataset is Sized
if sum(lengths) != len(dataset): # type: ignore
raise ValueError("Sum of input lengths does not equal the length of the input dataset!")
# indices = randperm(sum(lengths), generator=generator).tolist()
indices = torch.arange(sum(lengths), dtype=torch.long).tolist()
return [Subset(dataset, indices[offset - length : offset]) for offset, length in zip(_accumulate(lengths), lengths)]
| 2.78125
| 3
|
molsysmt/_private/digestion/group_indices.py
|
uibcdf/MolModMTs
| 0
|
12774321
|
<reponame>uibcdf/MolModMTs
import numpy as np
def digest_group_indices(group_indices):
if type(group_indices)==str:
if group_indices in ['all', 'All', 'ALL']:
group_indices = 'all'
else:
raise ValueError()
elif type(group_indices) in [int, np.int64, np.int32]:
group_indices = np.array([group_indices], dtype='int64')
elif hasattr(group_indices, '__iter__'):
group_indices = np.array(group_indices, dtype='int64')
return group_indices
| 2.8125
| 3
|
octopus/api/graph.py
|
ZarvisD/octopus
| 2
|
12774322
|
from graphviz import Digraph
from octopus.api.edge import (EDGE_UNCONDITIONAL,
EDGE_CONDITIONAL_TRUE, EDGE_CONDITIONAL_FALSE,
EDGE_FALLTHROUGH, EDGE_CALL)
import logging
log = logging.getLogger(__name__)
log.setLevel(level=logging.DEBUG)
def insert_edges_to_graph(graph, edges, call):
# remove duplicate edges
edges = list(set(edges))
# create link between block
for edge in edges:
if edge.type == EDGE_UNCONDITIONAL:
graph.edge(edge.node_from, edge.node_to, color='blue')
elif edge.type == EDGE_CONDITIONAL_TRUE:
graph.edge(edge.node_from, edge.node_to, color='green')
elif edge.type == EDGE_CONDITIONAL_FALSE:
graph.edge(edge.node_from, edge.node_to, color='red')
elif edge.type == EDGE_FALLTHROUGH:
graph.edge(edge.node_from, edge.node_to, color='cyan')
elif edge.type == EDGE_CALL and call:
graph.edge(edge.node_from, edge.node_to, color='yellow')
else:
raise Exception('Edge type unknown')
class Graph(object):
def __init__(self, basicblocks, edges, functions=None,
filename='graph.gv', design=None):
self.basicblocks = basicblocks
self.edges = edges
self.filename = filename
self.design = design or {'shape': 'box', 'fontname': 'Courier',
'fontsize': '30.0', 'rank': 'same'}
def view_ssa(self, call=False, view=True):
self.view(view=view, call=call, ssa=True)
def view_simplify(self, call=False, view=True):
self.view(view=view, call=call, simplify=True)
def view(self, view=True, simplify=False, call=False, ssa=False):
g = Digraph(self.filename, filename=self.filename)
with g.subgraph(name='global', node_attr=self.design) as c:
c.label = 'global'
# create all the basicblocks (blocks)
for basicblock in self.basicblocks:
if simplify:
# create node
c.node(basicblock.name, label=basicblock.name)
else:
if ssa:
label = basicblock.instructions_ssa()
else:
label = basicblock.instructions_details()
# the escape sequences "\n", "\l" and "\r"
# divide the label into lines, centered,
# left-justified, and right-justified, respectively.
label = label.replace('\n', '\l')
# create node
c.node(basicblock.name, label=label)
# insert edges on the graph
insert_edges_to_graph(g, self.edges, call)
g.render(self.filename, view=view)
# g.view()
class CFGGraph(Graph):
def __init__(self, cfg, filename='graph.cfg.gv', design=None):
Graph.__init__(self, cfg.basicblocks, cfg.edges, filename=filename,
design=design)
self.cfg = cfg
def view_functions_ssa(self, call=False, view=True):
self.view_functions(view=view, call=call, ssa=True)
def view_functions_simplify(self, call=False, view=True):
self.view_functions(view=view, call=call, simplify=True)
def view_functions(self, view=True, simplify=False, call=False, ssa=False, color='grey'):
g = Digraph('G', filename=self.filename)
g.attr(rankdir='TB')
g.attr(overlap='scale')
g.attr(splines='spline')
g.attr(ratio='fill')
count = 0
for func in self.cfg.functions:
with g.subgraph(name='cluster_%d' % count, node_attr=self.design) as c:
if func.name == func.prefered_name:
name = func.name
else:
name = func.prefered_name + ' - ' + func.name
c.attr(label=name)
c.attr(color=color)
c.attr(fontsize='50.0')
c.attr(overlap='false')
c.attr(splines='spline')
c.attr(ratio='fill')
# create all the basicblocks (blocks)
for basicblock in func.basicblocks:
if simplify:
# create node
c.node(basicblock.name, label=basicblock.name, splines='true')
else:
if ssa:
label = basicblock.instructions_ssa()
else:
label = basicblock.instructions_details()
# the escape sequences "\n", "\l" and "\r"
# divide the label into lines, centered,
# left-justified, and right-justified, respectively.
label = label.replace('\n', '\l')
# create node
c.node(basicblock.name, label=label)
count += 1
# insert edges on the graph
insert_edges_to_graph(g, self.cfg.edges, call)
g.render(self.filename, view=view)
# g.view()
| 2.75
| 3
|
qc/__init__.py
|
awohns/stdpopsim
| 0
|
12774323
|
<gh_stars>0
# Main entry point for stdpopsim_qc
# Species definitions.
from . import homo_sapiens_qc # NOQA
from . import drosophlia_melanogaster_qc # NOQA
| 1.15625
| 1
|
pyro/contrib/bnn/__init__.py
|
Capri2014/pyro
| 10
|
12774324
|
from pyro.contrib.bnn.hidden_layer import HiddenLayer
__all__ = [
"HiddenLayer",
]
| 1.0625
| 1
|
Curso/paquete/42_map.py
|
jsalmoralp/Python-Proyecto-Apuntes
| 0
|
12774325
|
"""
Map: Aplica una función a dcada elemento de una lista iterable, dvolviendo otra lista.
"""
def elevar_cuadrado(num):
# return num * num
return pow(num, 2)
# numeros = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
numeros = list(range(1, 11)) # Del 1 al 10
print(numeros)
numeros_elevados = list(map(elevar_cuadrado, numeros))
| 4.125
| 4
|
quarkchain/experimental/random_sampling_simulator.py
|
QuarkChain/pyquarkchain
| 237
|
12774326
|
<filename>quarkchain/experimental/random_sampling_simulator.py
import random
committee_size = 150
shard_size = 1024
pool_size = 150 * 1024
# Percentage of attackers in pool
attacker_p = 0.15
attacker_n = int(attacker_p * pool_size)
# Attack threshold (a committee with t percent of attackers)
attacker_tn = int(committee_size / 3)
# Monte-carlo trials
trials = 100000
# Pool members 1 - attacker; 2 - honest validator
pool = [1 for i in range(attacker_n)]
pool.extend([0 for i in range(pool_size - attacker_n)])
attacked_trials = 0
for trial in range(trials):
if trial != 0 and trial % 10 == 0:
print("Trial %d, attack prob: %f" % (trial, attacked_trials / trial))
random.shuffle(pool)
for j in range(shard_size):
if sum(pool[j * committee_size : (j + 1) * committee_size]) >= attacker_tn:
attacked_trials += 1
break
print("Attack prob: %f" % (attacked_trials / trials))
| 2.71875
| 3
|
example_project/development.py
|
EnvSys/django-moderation
| 97
|
12774327
|
from example_project.settings import *
DEBUG = True
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
| 1.21875
| 1
|
systrade/trading/brokers.py
|
pdghawk/systrade
| 1
|
12774328
|
""" Module for Brokers
Brokers hold data, and provide it or subsets of it on request
when requesting price for buying and selling, prices will likely differ
"""
import copy
import pandas as pd
from pandas.tseries.offsets import DateOffset
class PaperBroker:
def __init__(self,
data_df,
slippage_time=DateOffset(seconds=0),
transaction_cost=0.0,
spread_pct=0.0):
""" create a homemade paper trading brokerage account
Args:
- data_df: pandas dataframe, indexed by time, columns as ticker names
Keyword Args:
- slippage_time: pandas DateOffset object, specifying delay broker
has between request and real-time info
(default 0 seconds)
- transcation_cost: cost to perform a transaction (default 0)
- spread_pct: the spread percentage between buy/sell price,
defaults to 0%.
"""
if isinstance(data_df, pd.DataFrame):
self._historical_data = data_df
else:
raise TypeError("data_df supplied to PaperBroker should be a \
pandas DataFrame ")
if isinstance(slippage_time, pd.DateOffset):
self._slippage_time = slippage_time
else:
raise TypeError("splippage time should be a pandas DateOffset")
if isinstance(transaction_cost,(float,int)):
if transaction_cost>=0:
self.transaction_cost = transaction_cost
else:
raise ValueError("transcation fee cannot be < 0")
else:
raise TypeError("transcation_cost should be a number")
if isinstance(spread_pct,(float,int)):
if (spread_pct <= 100.0) and spread_pct >=0.0:
self.spread_pct = spread_pct
else:
raise ValueError("spread_pct should be a percentage: on [0,1]")
else:
raise TypeError("spread_pct should be a number")
def clone(self):
return copy.deepcopy(self)
def next_extant_time(self,time):
if time<=self._historical_data.index.max():
t_ind = self._historical_data.index.get_loc(time, 'backfill')
time = self._historical_data.index[t_ind]
return time
else:
raise ValueError("requesting a time later than available in data")
# ---------- information requests ------------------------------------------
def get_timeindex_subset(self,t0,t1):
if not isinstance(t0,pd.Timestamp):
raise TypeError("t0 should be a pandas timestamp")
if not isinstance(t1,pd.Timestamp):
raise TypeError("t1 should be a pandas timestamp")
if t0<self._historical_data.index.min():
raise ValueError("requesting data prior to earliest time")
if t1>self._historical_data.index.max():
raise ValueError("requesting data after latest time")
return copy.deepcopy(self._historical_data.loc[t0:t1].index)
def get_firstlast_times(self):
t0 = self._historical_data.index.min()
t1 = self._historical_data.index.max()
return t0,t1
def get_tick_list(self):
return self._historical_data.columns.to_list()
def get_price_list(self,ticker_list,time0,time1):
if isinstance(ticker_list,str):
ticker_list=[ticker_list]
if set(ticker_list).issubset(self._historical_data.columns):
return self._historical_data.loc[time0:time1][ticker_list]
else:
raise ValueError("ticker_list contained tickers that do not exist in historical data")
def get_data_subset(self,ticker,time):
max_time = self._historical_data.index.max()
return self.get_price_list(ticker,time,max_time)
def get_unslipped_price(self,ticker,time):
#time = time+self.broker._slippage_time
time = self.next_extant_time(time)
if ticker in self._historical_data:
return self._historical_data.loc[time][ticker]
else:
raise ValueError("ticker:",ticker," not available in historical_data")
def get_price(self,ticker,time):
time = time+self._slippage_time
time = self.next_extant_time(time)
if ticker in self._historical_data:
return self._historical_data.loc[time][ticker], self.transaction_cost,time
else:
raise ValueError("ticker:",ticker," not available in historical_data")
def get_buy_price(self,ticker,time):
p,f,t = self.get_price(ticker,time)
return p*(1.0+self.spread_pct/200.0),f,t
def get_sell_price(self,ticker,time):
p,f,t = self.get_price(ticker,time)
return p*(1.0-self.spread_pct/200.0),f,t
| 3.21875
| 3
|
htpt/frame.py
|
ben-jones/facade
| 0
|
12774329
|
# <NAME>
# Fall 2013
# htpt
# frame.py: ensure in-order delivery of frames for the htpt project
import threading
import struct
#from random import randint
from buffers import Buffer
from constants import *
class FramingException(Exception):
pass
class SeqNumber():
# initialize this to -1 so that the first sequence number comes out to 0
_seqNum = -1
_lock = threading.Lock()
@classmethod
def setSeqNum(cls, seqNum):
cls._lock.acquire()
cls._seqNum = seqNum
cls._lock.release()
@classmethod
def getSequenceAndIncrement(cls):
"""
In a thread safe manner, get the sequence number and increment.
Note: this function is called only when a new client connects
"""
cls._lock.acquire()
cls._seqNum = ((cls._seqNum + 1) % MAX_SEQ_NUM)
cls._lock.release()
return cls._seqNum
class SessionID():
"""Class to generate a new session ID when a new client connects to the server"""
_sessionID = 0
_lock = threading.Lock()
@classmethod
def setSessionID(cls, sessionID):
cls._lock.acquire()
cls._sessionID = sessionID
cls._lock.release()
@classmethod
def getSessionIDAndIncrement(cls):
"""
In a thread safe manner, get the session ID.
Note: this function is called only when a new client connects
"""
cls._lock.acquire()
cls._sessionID = ((cls._sessionID + 1) % MAX_SESSION_NUM)
cls._lock.release()
return cls._sessionID
class Assembler():
"""Class to Assemble a data frame with headers before sending to encoder"""
def __init__(self, sessionID=0):
"""Initialize SeqNumber object and sessionID"""
# initialize the seq number to 0 and start sending frames
self.seqNum = SeqNumber()
self.setSessionID(sessionID)
def setSessionID(self, sessionID):
"""
Set the session ID for this sender
Note: this should be called by the client upon receiving the ACK
packet (first packet) back from the server. The client should get
the session ID from the Disassembler and add it here
"""
self.sessionID = sessionID
def getSessionID(self):
"""
Get the sessionID for this assembler
Note: this is not an instance of the SessionID class which
generates unique sessionIDs. This stores the generated ID.
Note: this function will be called when creating the flags for an
outgoing frame
"""
return self.sessionID
def generateFlags(self, **kwargs):
"""Generates a 4-bit string of bits
Parameters: kwargs- additional keyword arguments specified for the
function. Currently, the only additional option is 'more_data' and
'SYN', which are assigned a boolean integer value (0/1). These
set appropriate bits in flags.
Example syntax: generateFlags(more_data=1, SYN=0)
flags format: [ more_data | SYN | X | X | X | X | X | X ]"""
flags = '00000000'
flag_list = list(flags)
if 'more_data' in kwargs:
more_data = kwargs['more_data']
flag_list[0]=str(more_data)
if 'SYN' in kwargs:
SYN_flag = kwargs['SYN']
flag_list[1]=str(SYN_flag)
flags = "".join(flag_list)
return int(flags, 2)
def getSeqNum(self):
"""Get sequence number after incrementing"""
sequenceNumber = self.seqNum.getSequenceAndIncrement()
return sequenceNumber
def getHeaders(self, **kwargs):
"""Create a 4 byte struct header in network byte order
16-bit sequence num | 8-bit session ID | 8-bit flag
unsigned short (H) | unsigned char (B) | unsigned char (B) packed
Calls functions to get:
seqNum- 2 byte sequence number of the frame
sessionID - 1 byte char int assigned by server
flags - 8 bit int. check kwargs and set appropriate bit
returns: header string (struct) packedused in assemble function
"""
self.sequenceNumber = self.getSeqNum()
self.sessionID = self.getSessionID()
self.flags = self.generateFlags(**kwargs)
headerString = struct.pack('!HBB', self.sequenceNumber, self.sessionID, self.flags)
return headerString
def assemble(self, data, **kwargs):
"""Assemble frame as headers + data
Parameters: data, **kwargs
data is simply a string
**kwargs is dict of flags to be set in headers"""
headers = self.getHeaders(**kwargs)
frame = headers+data
return frame
class Disassembler:
"""Class to Disassemble a decoded packet into headers+data before sending to buffers"""
def __init__(self, callback):
self.callback = callback
# allocate a buffer to receive data
self.buffer = Buffer()
self.buffer.addCallback(self.callback)
def disassemble(self, frame):
"""
Disassemble received (decoded) frame to headers and data
Parameters: frame is the raw bytes after decoding url or cookie
headers are the first 4 bytes, data is what follows.
should be called from main() after urlEncode.decode(). raw data,
seqNum are then sent to Buffer.recvData() to flush it.
we assume data is simply a string
"""
# split to headers + data
headers = frame[:4]
self.retrieveHeaders(headers)
data = frame[4:]
# receive, reorder and flush at buffer
# print "In disassemble: {} {}".format(data, self.buffer.buffer)
self.buffer.recvData(data, self.seqNum)
return data
def retrieveHeaders(self, headers):
"""Extract 4 byte header to seqNum, sessionID, Flags"""
seqNum, sessionID, flags = parseHeaders(headers)
self.seqNum = seqNum
# retrieve flags
self.flags = flags
# retrieved header sets the session ID if SYN flag is set
# also add a check: if SYN flag not checked then
# self.sessionid == headerTuple[1]
#if self.flags & (1<<7):
self.setSessionID(sessionID)
# TODO: if flags = '1000' i.e. more_data, then send pull_request to
# server for more data
def getSessionID(self):
"""Return session ID to upper abstraction"""
return self.sessionID
def setSessionID(self, sessionID=0):
"""Set sessionID at client or server"""
self.sessionID = sessionID
#def flush(self):
# self.buffer.flush()
def parseHeaders(headers):
""" Parse the headers and return the values"""
headerTuple = struct.unpack('!HBB', headers)
seqNum = headerTuple[0]
sessionID = headerTuple[1]
flags = headerTuple[2]
return seqNum, sessionID, flags
def initServerConnection(frame, passwords, callback):
"""
Respond to an initialization request from a client
1. ensure that the password is correct-> do this by verifying that
it matches one of the passwords in the list
2. initialize the sessionID for this client
3. return an assembler and disassembler for this client
Note: this is a module method, it is not associated with an object
"""
# parse the headers
headers = frame[:4]
seqNum, sessionID, flags = parseHeaders(headers)
data = frame[4:]
# Part 1: validate the password
# if this is a bad login attempt, then return False
if data not in passwords:
print "len: {} frame: {} seqNum: {} sessionID: {} flags: {} ".format(len(frame), frame, str(seqNum), str(sessionID), str(flags))
return False, False
# Part 2: initialize the session id using the SessionID class methods
sessionID = SessionID.getSessionIDAndIncrement()
# Part 3: return an assembler and disassembler for the client
sender = Assembler()
# print "seqNum: {}".format(sender.seqNum._seqNum)
sender.setSessionID(sessionID)
receiver = Disassembler(callback)
receiver.setSessionID(sessionID)
return sender, receiver
| 2.703125
| 3
|
tests/fakes/serial.py
|
ltowarek/arinna
| 0
|
12774330
|
<reponame>ltowarek/arinna<filename>tests/fakes/serial.py
#!/usr/bin/env python3
class FakeSerial:
def __init__(self):
self._last_written_data = None
self._response = None
self.read_data = []
@property
def last_written_data(self):
return self._last_written_data
@property
def response(self):
return self._response
@response.setter
def response(self, value):
self._response = bytearray(value)
def write(self, data):
self._last_written_data = data
def read(self):
while self._response:
yield bytes([self._response.pop(0)])
def read_until(self, expected):
output = b''
for b in self.read():
if b == expected:
break
output += b
return output
| 2.78125
| 3
|
day4/openCVEx.9.py
|
minssoj/Learning_cnn
| 0
|
12774331
|
import cv2
# cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture('../datasets/opencv/fish.mp4')
while True:
_ret, frame = cap.read()
frame = cv2.resize(frame, (500,400))
cv2.imshow('opencv camera', frame)
k = cv2.waitKey(1) #1msec 대기
if k==27 or k==13 : break
cap.release()
cv2.destroyAllWindows()
import numpy as np
while True:
_ret, frame = cap.read()
frame = cv2.resize(frame, (500,400))
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
h = hsv[:, :, 0] #hue
s = hsv[:, :, 1] #saturation
v = hsv[:, :, 2] # value brighthness
img = np.zeros(h.shape, dtype=np.uint8)
img[((h < 50) | (h > 200)) & (s > 100)] = 255
cv2.imshow('opencv camera', img)
k = cv2.waitKey(1) #1msec 대기
if k==27 or k==13 : break
cap.release()
cv2.destroyAllWindows()
| 2.984375
| 3
|
tests/test_node.py
|
account-login/arggen
| 2
|
12774332
|
<filename>tests/test_node.py<gh_stars>1-10
from arggen import Root, Block, Condition, If, ElseIf, Else, Context, collect_node
def test_block():
block = Block('head')
block.add_child('asdf')
block.add_child('1234')
assert '\n'.join(block.to_source(0)) == '''
head {
asdf
1234
}'''[1:]
def test_if():
ifthen = If('abc')
ifthen.add_child('asdf')
assert '\n'.join(ifthen.to_source(0)) == '''
if (abc) {
asdf
}'''[1:]
def test_condition():
cond = Condition()
ifthen = If('a').add_child('aaa')
elseif = ElseIf('b').add_child('bbb')
cond.add_child(ifthen).add_child(elseif)
assert '\n'.join(cond.to_source(0)) == '''
if (a) {
aaa
} else if (b) {
bbb
}'''[1:]
elsethen = Else().add_child('ccc')
cond.add_child(elsethen)
assert '\n'.join(cond.to_source(0)) == '''
if (a) {
aaa
} else if (b) {
bbb
} else {
ccc
}'''[1:]
def test_condition_dangling_else():
cond = Condition().add_child(
Else().add_child('asdf'))
assert '\n'.join(cond.to_source(0)) == 'asdf'
def test_nested():
block = Block('head')\
.add_child('123')\
.add_child('asdf')\
.add_child(Condition()
.add_child(If('a')
.add_child('aaa'))
.add_child(ElseIf('b')
.add_child(Block('bbb')
.add_child('BBB'))))
assert '\n'.join(block.to_source(0)) == '''
head {
123
asdf
if (a) {
aaa
} else if (b) {
bbb {
BBB
}
}
}'''[1:]
def test_collect_node():
def g(ctx: Context):
with ctx.BLOCK('head'):
yield 'abc'
with ctx.IF('a'):
yield 'AAA'
with ctx.CONDITION():
with ctx.IF('b'):
yield 'BBB'
with ctx.ELSE():
yield 'CCC'
yield 'zzz'
assert collect_node(g) == \
Root().add_child(Block('head')\
.add_child('abc')\
.add_child(If('a').add_child('AAA'))\
.add_child(Condition()
.add_child(If('b').add_child('BBB'))
.add_child(Else().add_child('CCC')))\
.add_child('zzz'))
| 2.625
| 3
|
sparql-client/tests/genquery.py
|
vlastocom/sparql-client
| 28
|
12774333
|
<reponame>vlastocom/sparql-client<filename>sparql-client/tests/genquery.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import six.moves.urllib.request
import six.moves.urllib.parse
import six.moves.urllib.error
import six.moves.urllib.request
import six.moves.urllib.error
import six.moves.urllib.parse
statement = open('code.rq').read()
query = {'query': statement, 'format': 'xml'}
qs = six.moves.urllib.parse.urlencode(query)
print(qs)
url = 'http://dbpedia.org/sparql?' \
+ six.moves.urllib.parse.urlencode(query)
opener = \
six.moves.urllib.request.build_opener(six.moves.urllib.request.HTTPHandler)
six.moves.urllib.request.install_opener(opener)
req = six.moves.urllib.request.Request(url)
# req.add_header("Accept", "application/xml")
try:
conn = six.moves.urllib.request.urlopen(req, timeout=10)
except Exception:
conn = None
if not conn:
raise IOError('Failure in open')
data = conn.read()
conn.close()
print(data)
| 2.125
| 2
|
python/src/data_structure/data_structure.py
|
yipwinghong/Algorithm
| 9
|
12774334
|
# coding=utf-8
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x, next=None):
self.val = x
self.next = next
class DoubleNode(object):
def __init__(self, key, val, pre=None, next=None):
self.key = key
self.val = val
self.pre = pre
self.next = next
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x, left=None, right=None):
self.val = x
self.left = left
self.right = right
class TrieNode(object):
def __init__(self, end=False):
self.children = []
for i in range(26):
self.children.append(None)
self.end = end
def set_end(self):
self.end = True
@property
def is_end(self):
return self.end
class RandomNode(object):
def __init__(self, val, next, random):
self.val = val
self.next = next
self.random = random
class GraphNode(object):
def __init__(self, val, neighbors):
self.val = val
self.neighbors = neighbors
class QuadTreeNode(object):
def __init__(self, val, is_leaf, top_left,
top_right, bottom_left, bottom_right):
self.val = val
self.is_leaf = is_leaf
self.top_left = top_left
self.top_right = top_right
self.bottom_left = bottom_left
self.bottom_right = bottom_right
| 3.84375
| 4
|
final_project/poi_id.py
|
puthli/Udacity_ud120
| 0
|
12774335
|
<reponame>puthli/Udacity_ud120
#!/usr/bin/python
# poi_id.py
# Creates a dataset and classifier definition fot the
# final project in the udacity ud120 machine learning introduction course
# Note: script changed to run on python 3.6
#
# to run: python3 poi_id.py
import matplotlib.pyplot as plt
import numpy
#
from sklearn.naive_bayes import GaussianNB
#
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
#
from sklearn import svm
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
from sklearn.preprocessing import StandardScaler
#
from sklearn.grid_search import GridSearchCV
#
import sys
import pickle
#
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
from tester import dump_classifier_and_data
def tuneDecisionTreeClassifier(features, labels):
classifier = tree.DecisionTreeClassifier()
parameters = {'min_samples_split':[2, 4, 6, 8, 10], 'max_features':[2,3,4,5]}
tuned_classifier = GridSearchCV(classifier, parameters)
tuned_classifier.fit(features, labels)
print("best estimator = ", tuned_classifier.best_params_)
def getTunedDecisionTree(features, labels):
#return tree.DecisionTreeClassifier(min_samples_split=8, max_features=4)
clf = RandomForestClassifier(n_estimators=10, min_samples_split=8, max_features=4)
clf.fit(features, labels)
print("Feature importances", clf.feature_importances_ )
return clf
def tuneSVM(features, labels):
classifier = svm.SVC()
selector = SelectKBest(k=4)
scaler = StandardScaler()
pca = PCA()
parameters = {'C':[1, 10, 20, 50, 100, 200, 500], 'kernel':['linear', 'rbf']}
#unfortunately the pipeline does not work with GridSearchCV
tuned_classifier = GridSearchCV(classifier.fit(
scaler.fit_transform(selector.fit_transform(pca.fit_transform(features, labels), labels), labels), labels), parameters)
tuned_classifier.fit(features, labels)
print("best estimator = ", tuned_classifier.best_params_)
def getTunedSVM():
# returns a SVM in a pipeline
# salary and total_payments are correlated, the same goes for other features, so use PCA to factor those out.
pca = PCA()
# to prevent overfitting, a limited number of the best components is selected
# number of components was determined with trial and error from 3 to all
selector = SelectKBest(k=4)
# scale features for svm so each feature is treaded with equal importance
scaler = StandardScaler()
# create the tuned classifier (tuned with a GridSearchCV)
classifier = svm.SVC(kernel = 'linear', C=50)
# put the pca, selector, scaler and classifier in a pipeline for the grader
pipeline = Pipeline([("pca",pca),("selector", selector), ("scaler",scaler),("classifier",classifier)])
#
return pipeline
def getTunedNB():
# returns a GaussianNB classifier in a pipeline
# salary and total_payments are correlated, the same goes for other features, so use PCA to factor those out.
pca = PCA()
# to prevent overfitting, a limited number of the best components is selected
# number of components was determined with trial and error from 3 to all
selector = SelectKBest(k=4)
# scale features for svm so each feature is treaded with equal importance
scaler = StandardScaler()
# create the tuned classifier (tuned with a GridSearchCV)
classifier = GaussianNB()
# put the pca, selector, scaler and classifier in a pipeline for the grader
pipeline = Pipeline([("pca",pca),("selector", selector), ("scaler",scaler),("classifier",classifier)])
#
return pipeline
# create extra derived features
# not all derived features are used, but all were played around with and visualised in a scatterplot
def create_features(dataset):
for key in data_dict.keys():
element = data_dict[key]
from_messages = element['from_messages']
from_poi_to_this_person = element['from_poi_to_this_person']
from_this_person_to_poi = element['from_this_person_to_poi']
to_messages = element['to_messages']
shared_receipt_with_poi = element['shared_receipt_with_poi']
total_payments = element['total_payments']
total_stock_value = element['total_stock_value']
try:
# added devide by 1 to force numeric interpretation
element['total_value'] = (total_payments + total_stock_value) / 1
element['total_poi_emails'] = (from_poi_to_this_person + from_this_person_to_poi ) / 1
element['total_poi_emails_as_fraction_of_total'] = (from_this_person_to_poi+from_poi_to_this_person) / (to_messages+from_messages)
element['from_poi_emails_as_fraction_of_total'] = (from_poi_to_this_person) / (from_messages)
element['to_poi_emails_as_fraction_of_total'] = (from_poi_to_this_person) / (to_messages)
element['shared_receipts_with_poi_as_fraction_of_inbox'] = (shared_receipt_with_poi) / (to_messages)
except:
element['total_value'] = "NaN"
element['total_poi_emails'] = "NaN"
element['total_poi_emails_as_fraction_of_total'] = "NaN"
element['from_poi_emails_as_fraction_of_total'] = "NaN"
element['to_poi_emails_as_fraction_of_total'] = "NaN"
element['shared_receipts_with_poi_as_fraction_of_inbox'] ="NaN"
return data_dict
# not used
# considered looking for references to LJM in inboxes, sent by POI's before 14 august 2001
# realised that most people in the data set are not in the mail zip file
# but luckily decided to tune decision tree with derived features first.
def count_ljm_references(key, element):
sys.path.append( "../tools/" )
from parse_out_email_text import parseOutText
import os
# get email references to LJM (Debt hiding vehicle)
element['LJM_mentions_in_sent_mail'] = 0.
keyparts = key.split()
initial = keyparts[len(keyparts)-1]
lastname = keyparts[0]
if len(initial) == 1:
email_address = lastname.lower() + "-" + initial.lower()
path = "../maildir/"+email_address+"/inbox/"
if os.path.exists(path):
temp_counter = 0
for filename in os.listdir(path):
temp_counter = temp_counter + 1
email = open(path + filename, "r")
email_text = parseOutText(email)
if email_text.find("LJM"):
ljm_mentions = element['LJM_mentions_in_sent_mail']
element['LJM_mentions_in_sent_mail'] = ljm_mentions + 1
print("Processing key ", key, " LJM Mentions: ", element['LJM_mentions_in_sent_mail'])
def visualise_features(data_dictionary, feature_x, feature_y, show=False, text_labels=True):
# visualise two axes of the feature space
# last two parameters determine if the interactive python graph is run
# and if the names of the labels are shown (useful for outlier evaluation)
plt.clf()
x = numpy.zeros(len(data_dictionary))
y = numpy.zeros(len(data_dictionary))
colours = []
labels = list(data_dictionary.keys())
for i in range(0, len(data_dictionary)-1):
label = labels[i]
x_data = data_dictionary[label][feature_x]
y_data = data_dictionary[label][feature_y]
if x_data!="NaN":
x[i] = x_data
if y_data!="NaN":
y[i] = y_data
if data_dictionary[label]['poi']:
colours.append('red')
else:
colours.append('green')
if text_labels:
plt.annotate(label, (x[i], y[i]))
plt.scatter(x, y, color=colours)
plt.xlabel(feature_x)
plt.ylabel(feature_y)
if show:
plt.show()
filename = feature_x + "-" + feature_y +".png"
plt.savefig(filename, bbox_inches='tight')
### Task 1: Select what features you'll use.
#Moved further down (after feature creation)
### Load the dictionary containing the dataset
with open("final_project_dataset.pkl", "rb") as data_file:
data_dict = pickle.load(data_file)
### Task 2: Remove outliers
#visually identified the TOTAL outlier in a scatterplot
data_dict.pop('TOTAL', None)
### Task 3: Create new feature(s)
### Store to my_dataset for easy export below.
my_dataset = create_features(data_dict)
# write scatterplot to disk for use in headless docker instance
visualise_features(my_dataset, 'total_value', 'shared_receipts_with_poi_as_fraction_of_inbox', False, False)
visualise_features(my_dataset, 'total_value', 'shared_receipt_with_poi', False, False)
# features selected by hand after visualising in a scatterplot
features_list = ['poi', 'total_payments', 'total_stock_value', 'exercised_stock_options'
, 'shared_receipts_with_poi_as_fraction_of_inbox', 'long_term_incentive']
# print the used features to the log so they are visible above the scores.
print(features_list)
### Extract features and labels from dataset for local testing
data = featureFormat(my_dataset, features_list, remove_all_zeroes=True, sort_keys = True)
labels, features = targetFeatureSplit(data)
### Task 4: Try a varity of classifiers
### Please name your classifier clf for easy export below.
# --- Results shown below of initial classifiers
#
# --- Initially SVM gives bad recall: Accuracy: 0.85733 Precision: 0.37085 Recall: 0.10050
# SVC experiments:
# linear kernel initially gives improved precision and recall over rbf kernel
# rbf: Accuracy: 0.85700 Precision: 0.14976 Recall: 0.01550 F1: 0.02809
# linear: Accuracy: 0.85707 Precision: 0.37004 Recall: 0.10250 F1: 0.16053
#
# --- GaussianNB gives reasonable recall and good Precision, but was unable to tune further
# Adding more features reduced the Recall score.
# Results:
# Pipeline(steps=[('pca', PCA(copy=True, iterated_power='auto', n_components=None, random_state=None,
# svd_solver='auto', tol=0.0, whiten=False)), ('scaler', StandardScaler(copy=True, with_mean=True, with_std=True)), ('classifier', GaussianNB(priors=None))])
# Accuracy: 0.85253 Precision: 0.41627 Recall: 0.26350 F1: 0.32272 F2: 0.28437
# Total predictions: 15000 True positives: 527 False positives: 739 False negatives: 1473 True negatives: 12261
#
# --- Decision Tree gives best Precision and Recall and is in the final script
# out of the box decision tree gives reasonable recall and precision: Accuracy: 0.79107 Precision: 0.23799 Recall: 0.25750 F1: 0.24736
### Task 5: Tune your classifier to achieve better than .3 precision and recall
### using our testing script. Check the tester.py script in the final project
### folder for details on the evaluation method, especially the test_classifier
### function. Because of the small size of the dataset, the script uses
### stratified shuffle split cross validation. For more info:
### http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.StratifiedShuffleSplit.html
# --- Best results with tuned linear SVC still have disappointing recall:
# Pipeline(steps=[('pca', PCA(copy=True, n_components=None, whiten=False)), ('selector', SelectKBest(k=17, score_func=<function f_classif at 0x7f847445f378>)), ('scaler', StandardScaler(copy=True, with_mean=True, with_std=True)), ('classifier', SVC(C=50, cache_size=200, class_weight=None, coef0=0.0,
# decision_function_shape=None, degree=3, gamma='auto', kernel='linear',
# max_iter=-1, probability=False, random_state=None, shrinking=True,
# tol=0.001, verbose=False))])
# Accuracy: 0.83967 Precision: 0.33630 Recall: 0.20800 F1: 0.25703 F2: 0.22518
# Total predictions: 15000 True positives: 416 False positives: 821 False negatives: 1584 True negatives: 12179
#
# --- Tuned DecisionTreeClassifier gives adequate Recall and Precision scores
# DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=None,
# max_features=3, max_leaf_nodes=None, min_samples_leaf=1,
# min_samples_split=8, min_weight_fraction_leaf=0.0,
# presort=False, random_state=None, splitter='best')
# Accuracy: 0.83080 Precision: 0.34905 Recall: 0.31100 F1: 0.32893 F2: 0.31793
# Total predictions: 15000 True positives: 622 False positives: 1160 False negatives: 1378 True negatives: 11840
#
# --- Tuned RandomForestClassifier gives even better scores
# RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
# max_depth=None, max_features=4, max_leaf_nodes=None,
# min_impurity_split=1e-07, min_samples_leaf=1,
# min_samples_split=8, min_weight_fraction_leaf=0.0,
# n_estimators=10, n_jobs=1, oob_score=False, random_state=None,
# verbose=0, warm_start=False)
# Accuracy: 0.87367 Precision: 0.53892 Recall: 0.36350 F1: 0.43416 F2: 0.38881
# Total predictions: 15000 True positives: 727 False positives: 622 False negatives: 1273 True negatives: 12378
# --- Tune the classifier
# tuneDecisionTreeClassifier(features, labels)
#
# --- note: tuneSVM Takes hours to run
# tuneSVM(features, labels)
# clf = getTunedSVM()
# clf = getTunedNB()
clf = getTunedDecisionTree(features, labels)
# Evaluation by the cross_validation in tester.py
import tester
tester.main()
### Task 6: Dump your classifier, dataset, and features_list so anyone can
### check your results. You do not need to change anything below, but make sure
### that the version of poi_id.py that you submit can be run on its own and
### generates the necessary .pkl files for validating your results.
dump_classifier_and_data(clf, my_dataset, features_list)
| 2.953125
| 3
|
binlin/utils/log.py
|
UKPLab/inlg2019-revisiting-binlin
| 1
|
12774336
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import logging
import sys
def setup_logging():
logger = get_main_logger()
add_console_handler(logger, level=logging.DEBUG)
mute_matplotlib_handler()
return logger
def get_main_logger(level=logging.DEBUG):
# get a top-level "mypackage" logger,
# set its log level to DEBUG,
# BUT PREVENT IT from propagating messages to the root logger
logger = logging.getLogger('main')
logger.setLevel(level)
logger.propagate = 0
return logger
def add_console_handler(logger, level):
# Based on: https://stackoverflow.com/questions/25187083/python-logging-to-multiple-handlers-at-different-log-levels
simple_fmt = '[%(name)5s] [%(levelname)9s] %(message)s'
simple_formatter = logging.Formatter(simple_fmt)
# create a console handler
# and set its log level to the command-line option
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(level)
console_handler.setFormatter(simple_formatter)
logger.addHandler(console_handler)
def add_file_handler(logger, level, log_fn):
# https://stackoverflow.com/questions/10973362/python-logging-function-name-file-name-line-number-using-a-single-file#10974508
detailed_fmt = '[%(asctime)s - %(levelname)8s] [%(filename)20s: %(lineno)3s] %(message)s'
detailed_formatter = logging.Formatter(detailed_fmt, datefmt='%Y-%m-%d,%H:%M:%S')
# create a file handler
# and set its log level to DEBUG
log_fn = os.path.abspath(log_fn)
file_handler = logging.FileHandler(log_fn)
file_handler.setLevel(level)
file_handler.setFormatter(detailed_formatter)
logger.addHandler(file_handler)
return logger
def mute_matplotlib_handler():
# set WARNING for Matplotlib
matplotlib_logger = logging.getLogger('matplotlib')
matplotlib_logger.setLevel(logging.WARNING)
logger = setup_logging()
| 2.984375
| 3
|
lib/LineNotify.py
|
fukuyama012/watch_angle
| 1
|
12774337
|
# -*- coding: utf-8 -*-
import requests
from config import config
class LineNotify(object):
"""NotifyClass for LINE"""
NOTIFY_API_URL = "https://notify-api.line.me/api/notify"
def __init__(self, api_url = NOTIFY_API_URL, authority = config.AUTHORITY_TOKEN):
self.apiUrl = api_url
self.headers = {"Authorization" : "Bearer "+ authority}
def notify(self, message):
payload = {"message" : message}
#payload = {"message" : message, 'stickerPackageId': 2, 'stickerId': 144}
return self._post(payload)
def _post(self, payload):
return requests.post(self.apiUrl, headers = self.headers, data = payload, files = "")
| 2.828125
| 3
|
RestAdm/utils/log_middleware.py
|
xeroCBW/testmodel
| 0
|
12774338
|
<filename>RestAdm/utils/log_middleware.py<gh_stars>0
import json
import socket
import time
import logging
from django.http import QueryDict
from django.utils.deprecation import MiddlewareMixin
class RequestLogMiddleware(MiddlewareMixin):
def __init__(self, get_response=None):
self.get_response = get_response
self.apiLogger = logging.getLogger('api')
# super().__init__()
def process_request(self, request):
request.start_time = time.time()
def process_response(self, request, response):
# print('----RequestLogMiddleware start ....---')
request_body = dict()
if request.method == 'POST':
request_body = request.POST
if request.method == 'PUT':
request_body = QueryDict(request.body)
# if response['content-type'] == 'application/json':
# if getattr(response, 'streaming', False):
# response_body = '<<<Streaming>>>'
# else:
# response_body = response.content
# else:
# response_body = '<<<Not JSON>>>'
response_body = dict()
if hasattr(response,'accepted_media_type') and response.accepted_media_type == 'application/json':
response_body = response.data
else:
response_body = "<<<NOT JSON>>>"
log_data = {
'user': request.user.pk,
'remote_address': request.META['REMOTE_ADDR'],
'server_hostname': socket.gethostname(),
'request_method': request.method,
'request_path': request.get_full_path(),
'request_body': request_body,
'response_status': response.status_code,
'response_body': response_body,
'run_time': time.time() - request.start_time,
}
# print(json.dumps(log_data,indent=4,ensure_ascii=False))
# try:
# body = json.loads(request.body)
# except Exception:
# body = dict()
# body.update(dict(request.POST))
self.apiLogger.info("{} {} {} {} {} {}".format(
request.user, request.method, request.path,json.dumps(log_data,ensure_ascii=False),
response.status_code, response.reason_phrase))
return response
| 2.09375
| 2
|
custom_components/sg_renamer.py
|
swissglider/homeassistant_custome_components
| 0
|
12774339
|
<gh_stars>0
"""
Renames the friendly_name from entity_name and writtes it into the entity_registry.
Version V.0.0.1
Package:
sg_renamer.py
configuration.yaml:
# entity_name musst be --> <<group_name>> --> without spaces and
sg_renamer:
- platform_name: hue --> plattform_name to rename all entities
domain:
light --> domains to rename from the platform
For more details about this Class, please refer to the documentation at
https://github.com/swissglider/homeassistant_custome_components
"""
import logging
import yaml
import homeassistant.helpers.entity as entity_helper
from homeassistant.helpers.event import track_state_change
from homeassistant.components.group import ENTITY_ID_FORMAT as GROUP_ID_FORMAT
# Initialize the logger
_LOGGER = logging.getLogger(__name__)
# The domain of your component. Equal to the filename of your component.
DOMAIN = "sg_renamer"
def setup(hass, config):
conf = config.get(DOMAIN, {})
to_rename_comp = {}
for panel in conf:
if 'platform_name' not in panel:
_LOGGER.critical('Error in config: following Parameter nor set - name')
return False
platform_name = panel['platform_name']
domain_filter = []
if 'domain' in panel:
filters = panel['domain'].split( )
for filter in filters:
domain_filter.append(filter)
to_rename_comp[platform_name] = {
'platform_name': platform_name,
'domain': domain_filter
}
#entity_id = entity_helper.generate_entity_id('{}','renamer', hass=hass)
name_changer = NameChanger(to_rename_comp, hass, DOMAIN)
hass.services.register(DOMAIN, 'rename', name_changer.rename)
# Return boolean to indicate that initialization was successfully.
return True
class NameChanger:
def __init__(self, to_rename_comp, hass, domain):
self._to_rename_comp = to_rename_comp
self._hass = hass
self._domain = domain
def rename(self, call):
for comp_name in self._to_rename_comp:
comp = self._to_rename_comp[comp_name]
entities = self._get_all_entities(comp['domain'])
entities = self._get_filtered_by_plattform(entities, comp['platform_name'])
self._rename_all_frienly_names(entities)
def _get_all_entities(self, domain_filters):
entities = []
if domain_filters and len(domain_filters) != 0:
for domain_filter in domain_filters:
entities = entities + self._hass.states.entity_ids(domain_filter)
else:
entities = self._hass.states.entity_ids()
return entities
def _get_filtered_by_plattform(self, entities, platform_name):
return_entities = []
PATH_REGISTRY = 'entity_registry.yaml'
path = self._hass.config.path(PATH_REGISTRY)
data = None
with open(path) as fp:
data = yaml.load(fp)
for entity_id, info in data.items():
if (entity_id in entities) and (str(info['platform']) == str(platform_name)):
return_entities.append(entity_id)
return return_entities
def _get_friendly_name(self, name):
''' return a friendly name from name. '''
if name:
name = name.replace(".", " ")
name = name.replace("_", " ")
name = name.title()
return name
# ========================================================
# ========================== Change Registry Name
# ========================================================
def _changeFriendlyName(self, enitity_name, friendly_name):
''' Change the Registry Name of the entity. '''
import requests
import json
url = 'http://localhost:8123/api/config/entity_registry/' + str(enitity_name)
payload = {'name': str(friendly_name)}
headers = {'content-type': 'application/json'}
r = requests.post(url, data=json.dumps(payload), headers=headers)
if r.status_code is not 200:
_LOGGER.warning('Frienly Name(' + friendly_name + ') change was not successfull for Entity: ' + str(enitity_name) + " Status-Code: " + str(r.status_code))
def _rename_all_frienly_names(self, entities):
''' Change all entities Registry Names to friendly name. '''
for entity in entities:
object_name = entity.partition('.')[2]
friendly_name = object_name.partition('_')[2]
friendly_name = self._get_friendly_name(friendly_name)
self._changeFriendlyName(entity, friendly_name)
| 2.375
| 2
|
utils.py
|
hyang1990/model_based_energy_constrained_compression
| 16
|
12774340
|
<gh_stars>10-100
import os
import torch
from torch.utils.data.sampler import SubsetRandomSampler, Sampler
from torchvision import datasets, transforms
import torch.nn.functional as F
class SubsetSequentialSampler(Sampler):
r"""Samples elements sequentially from a given list of indices, without replacement.
Arguments:
indices (sequence): a sequence of indices
"""
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return (self.indices[i] for i in range(len(self.indices)))
def __len__(self):
return len(self.indices)
def get_mnist32(batch_size, val_batch_size, data_root='./mnist_dataset', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'mnist-data'))
kwargs.pop('input_size', None)
num_workers = kwargs.setdefault('num_workers', 1)
print("Building MNIST data loader with {} workers".format(num_workers))
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(root=data_root, train=True, download=True,
transform=transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(root=data_root, train=False, download=True,
transform=transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=val_batch_size, shuffle=False, **kwargs)
ds.append(test_loader)
train_loader4eval = torch.utils.data.DataLoader(
datasets.MNIST(root=data_root, train=True, download=True,
transform=transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=val_batch_size, shuffle=False, **kwargs)
ds.append(train_loader4eval)
ds = ds[0] if len(ds) == 1 else ds
return ds
def get_data_loaders(data_dir, dataset='imagenet', batch_size=32, val_batch_size=512, num_workers=0, nsubset=-1,
normalize=None):
if dataset == 'imagenet':
traindir = os.path.join(data_dir, 'train')
valdir = os.path.join(data_dir, 'val')
if normalize is None:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if nsubset > 0:
rand_idx = torch.randperm(len(train_dataset))[:nsubset]
print('use a random subset of data:')
print(rand_idx)
train_sampler = SubsetRandomSampler(rand_idx)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=(train_sampler is None),
num_workers=num_workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=val_batch_size, shuffle=False,
num_workers=num_workers, pin_memory=True)
# use 10K training data to see the training performance
train_loader4eval = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=val_batch_size, shuffle=False,
num_workers=num_workers, pin_memory=True,
sampler=SubsetRandomSampler(torch.randperm(len(train_dataset))[:10000]))
return train_loader, val_loader, train_loader4eval
elif dataset == 'mnist-32':
return get_mnist32(batch_size=batch_size, val_batch_size=val_batch_size, num_workers=num_workers)
else:
raise NotImplementedError
def ncorrect(output, target, topk=(1,)):
"""Computes the numebr of correct@k for the specified values of k"""
maxk = max(topk)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].float().sum().item()
res.append(correct_k)
return res
def eval_loss_acc1_acc5(model, data_loader, loss_func=None, cuda=True, class_offset=0):
val_loss = 0.0
val_acc1 = 0.0
val_acc5 = 0.0
num_data = 0
with torch.no_grad():
model.eval()
for data, target in data_loader:
num_data += target.size(0)
target.data += class_offset
if cuda:
data, target = data.cuda(), target.cuda()
output = model(data)
if loss_func is not None:
val_loss += loss_func(model, data, target).item()
# val_loss += F.cross_entropy(output, target).item()
nc1, nc5 = ncorrect(output.data, target.data, topk=(1, 5))
val_acc1 += nc1
val_acc5 += nc5
# print('acc:{}, {}'.format(nc1 / target.size(0), nc5 / target.size(0)))
val_loss /= len(data_loader)
val_acc1 /= num_data
val_acc5 /= num_data
return val_loss, val_acc1, val_acc5
def cross_entropy(input, target, label_smoothing=0.0, size_average=True):
""" Cross entropy that accepts soft targets
Args:
pred: predictions for neural network
targets: targets (long tensor)
size_average: if false, sum is returned instead of mean
Examples::
input = torch.FloatTensor([[1.1, 2.8, 1.3], [1.1, 2.1, 4.8]])
input = torch.autograd.Variable(out, requires_grad=True)
target = torch.FloatTensor([[0.05, 0.9, 0.05], [0.05, 0.05, 0.9]])
target = torch.autograd.Variable(y1)
loss = cross_entropy(input, target)
loss.backward()
"""
if label_smoothing <= 0.0:
return F.cross_entropy(input, target)
assert input.dim() == 2 and target.dim() == 1
target_ = torch.unsqueeze(target, 1)
one_hot = torch.zeros_like(input)
one_hot.scatter_(1, target_, 1)
one_hot = torch.clamp(one_hot, max=1.0-label_smoothing, min=label_smoothing/(one_hot.size(1) - 1.0))
if size_average:
return torch.mean(torch.sum(-one_hot * F.log_softmax(input, dim=1), dim=1))
else:
return torch.sum(torch.sum(-one_hot * F.log_softmax(input, dim=1), dim=1))
def joint_loss(model, data, target, teacher_model, distill, label_smoothing=0.0):
criterion = lambda pred, y: cross_entropy(pred, y, label_smoothing=label_smoothing)
output = model(data)
if distill <= 0.0:
return criterion(output, target)
else:
with torch.no_grad():
teacher_output = teacher_model(data).data
distill_loss = torch.mean((output - teacher_output) ** 2)
if distill >= 1.0:
return distill_loss
else:
class_loss = criterion(output, target)
# print("distill loss={:.4e}, class loss={:.4e}".format(distill_loss, class_loss))
return distill * distill_loss + (1.0 - distill) * class_loss
def argmax(a):
return max(range(len(a)), key=a.__getitem__)
def expand_user(path):
return os.path.abspath(os.path.expanduser(path))
def model_snapshot(model, new_file, old_file=None, verbose=False):
from collections import OrderedDict
import torch
if isinstance(model, torch.nn.DataParallel):
model = model.module
if old_file and os.path.exists(expand_user(old_file)):
if verbose:
print("Removing old model {}".format(expand_user(old_file)))
os.remove(expand_user(old_file))
if verbose:
print("Saving model to {}".format(expand_user(new_file)))
state_dict = OrderedDict()
for k, v in model.state_dict().items():
if v.is_cuda:
v = v.cpu()
state_dict[k] = v
torch.save(state_dict, expand_user(new_file))
| 2.5
| 2
|
Algorithms/max_unique.py
|
ridwanmsharif/Algorithms
| 2
|
12774341
|
seen = []
# Prduces the length of the longest Substring
# thats comprised of just unique characters
def max_diff(string):
seen = [0]*256
curr_start = 0
max_start = 0
unique = 0
max_unique = 0
for n,i in enumerate(string):
if seen[assn_num(i)] == 0:
unique += 1
else:
if unique > max_unique:
max_unique = unique
while unique > 1:
if seen[assn_num(string[curr_start])] == 1:
unique -= 1
seen[assn_num(string[curr_start])] -= 1
curr_start += 1
else:
seen[assn_num(string[curr_start])] -= 1
curr_start += 1
seen[assn_num(i)] += 1
if unique > max_unique:
max_unique = unique
return max_unique
def assn_num(char):
num = ord(char)-ord('a')
return num
| 3.375
| 3
|
toutiao-backend/Test/6-cache/test_cache.py
|
weiyunfei520/toutiao
| 0
|
12774342
|
import requests, json
"""登录测试 POST /v1_0/authorizations"""
url = 'http://127.0.0.1:5000/v1_0/authorizations'
REDIS_SENTINELS = [('127.0.0.1', '26380'),
('127.0.0.1', '26381'),
('127.0.0.1', '26382'),]
REDIS_SENTINEL_SERVICE_NAME = 'mymaster'
from redis.sentinel import Sentinel
_sentinel = Sentinel(REDIS_SENTINELS)
redis_master = _sentinel.master_for(REDIS_SENTINEL_SERVICE_NAME)
redis_master.set('app:code:13161933309', '123456')
# 构造raw application/json形式的请求体
data = json.dumps({'mobile': '13161933309', 'code': '123456'})
# requests发送 POST raw application/json 请求
resp = requests.post(url, data=data, headers={'Content-Type': 'application/json'})
print(resp.json())
token = resp.json()['data']['token']
print(token)
"""测试 查询缓存获取用户信息 /v1_0/user"""
url = 'http://127.0.0.1:5000/v1_0/user'
headers = {'Authorization': 'Bearer {}'.format(token)}
resp = requests.get(url, headers=headers)
print(resp.json())
| 2.921875
| 3
|
skills_taxonomy_v2/pipeline/tk_data_analysis/get_bulk_metadata.py
|
nestauk/skills-taxonomy-v2
| 3
|
12774343
|
<filename>skills_taxonomy_v2/pipeline/tk_data_analysis/get_bulk_metadata.py
"""
The TextKernel data is stored in 686 separate files each with 100k job adverts.
In this script we extract some key metadata for each job advert to be stored in a single dictionary.
This will be useful for some analysis pieces.
"""
import boto3
import pandas as pd
from tqdm import tqdm
import json
import gzip
import os
from collections import defaultdict
from skills_taxonomy_v2 import BUCKET_NAME
from skills_taxonomy_v2.getters.s3_data import get_s3_data_paths, save_to_s3, load_s3_data
s3 = boto3.resource("s3")
if __name__ == "__main__":
tk_data_path = "inputs/data/textkernel-files/"
output_dir = "outputs/tk_data_analysis_new_method/"
all_tk_data_paths = get_s3_data_paths(
s3, BUCKET_NAME, tk_data_path, file_types=["*.jsonl*"]
)
file_num = 0
count_tk_files = 0
job_id_file_list = defaultdict(list)
job_id_date_list = defaultdict(list)
job_id_meta_list = defaultdict(list)
job_id_job_list = defaultdict(list)
job_id_location_list = defaultdict(list)
all_tk_date_count = defaultdict(int)
all_tk_region_count = defaultdict(int)
all_tk_subregion_count = defaultdict(int)
for file_name in tqdm(all_tk_data_paths):
data = load_s3_data(s3, BUCKET_NAME, file_name)
for d in data:
# Save out as little info as possible to make file smaller
job_id_file_list[d["job_id"]].append(file_name.split(tk_data_path)[1])
job_id_date_list[d["job_id"]].append(d.get("date"))
if d.get("date"):
all_tk_date_count[d.get("date")] += 1
else:
all_tk_date_count["Not given"] += 1
job_id_meta_list[d["job_id"]].append([
d.get("source_website"),
d.get("language"),
])
organization_industry = d.get("organization_industry")
job_id_job_list[d["job_id"]].append([
d.get("job_title"),
organization_industry.get("label")
if organization_industry
else None,
])
region = d.get("region")
subregion = d.get("subregion")
job_id_location_list[d["job_id"]].append([
d.get("location_name"),
d.get("location_coordinates"),
region.get("label") if region else None,
subregion.get("label") if subregion else None,
])
if region:
all_tk_region_count[region.get("label")] += 1
else:
all_tk_region_count["Not given"] += 1
if subregion:
all_tk_subregion_count[subregion.get("label")] += 1
else:
all_tk_subregion_count["Not given"] += 1
count_tk_files += 1
if count_tk_files == 50:
print("Saving data ...")
save_to_s3(
s3,
BUCKET_NAME,
job_id_file_list,
os.path.join(output_dir, f"metadata_file/{file_num}.json"),
)
save_to_s3(
s3,
BUCKET_NAME,
job_id_date_list,
os.path.join(output_dir, f"metadata_date/{file_num}.json"),
)
save_to_s3(
s3,
BUCKET_NAME,
job_id_meta_list,
os.path.join(output_dir, f"metadata_meta/{file_num}.json"),
)
save_to_s3(
s3,
BUCKET_NAME,
job_id_job_list,
os.path.join(output_dir, f"metadata_job/{file_num}.json"),
)
save_to_s3(
s3,
BUCKET_NAME,
job_id_location_list,
os.path.join(output_dir, f"metadata_location/{file_num}.json"),
)
file_num += 1
count_tk_files = 0
job_id_file_list = defaultdict(list)
job_id_date_list = defaultdict(list)
job_id_meta_list = defaultdict(list)
job_id_job_list = defaultdict(list)
job_id_location_list = defaultdict(list)
print("Saving remainder data ...")
save_to_s3(
s3,
BUCKET_NAME,
job_id_file_list,
os.path.join(output_dir, f"metadata_file/{file_num}.json"),
)
save_to_s3(
s3,
BUCKET_NAME,
job_id_date_list,
os.path.join(output_dir, f"metadata_date/{file_num}.json"),
)
save_to_s3(
s3,
BUCKET_NAME,
job_id_meta_list,
os.path.join(output_dir, f"metadata_meta/{file_num}.json"),
)
save_to_s3(
s3,
BUCKET_NAME,
job_id_job_list,
os.path.join(output_dir, f"metadata_job/{file_num}.json"),
)
save_to_s3(
s3,
BUCKET_NAME,
job_id_location_list,
os.path.join(output_dir, f"metadata_location/{file_num}.json"),
)
print("Saving counts data ...")
save_to_s3(
s3,
BUCKET_NAME,
all_tk_date_count,
os.path.join(output_dir, f"metadata_date/all_tk_date_count.json"),
)
save_to_s3(
s3,
BUCKET_NAME,
all_tk_region_count,
os.path.join(output_dir, f"metadata_location/all_tk_region_count.json"),
)
save_to_s3(
s3,
BUCKET_NAME,
all_tk_subregion_count,
os.path.join(output_dir, f"metadata_location/all_tk_subregion_count.json"),
)
| 2.515625
| 3
|
cisco-ios-xe/ydk/models/cisco_ios_xe/Cisco_IOS_XE_poe_oper.py
|
Maikor/ydk-py
| 0
|
12774344
|
""" Cisco_IOS_XE_poe_oper
This module contains a collection of YANG definitions for
monitoring power over ethernet feature in a Network Element.
Copyright (c) 2016\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class IlpowerPdClass(Enum):
"""
IlpowerPdClass (Enum Class)
Name of the power class
.. data:: poe_null = 0
List of POE interfaces, keyed by interface name
.. data:: poe_unknown = 1
Power class unknown
.. data:: poe_cisco = 2
Power class cisco
.. data:: poe_ieee0 = 3
IEEE power class 0
.. data:: poe_ieee1 = 4
IEEE power class 1
.. data:: poe_ieee2 = 5
IEEE power class 2
.. data:: poe_ieee3 = 6
IEEE power class 3
.. data:: poe_ieee4 = 7
IEEE power class 4
.. data:: poe_ieee5 = 8
IEEE power class 5
.. data:: poe_ieee_unknown_class = 9
IEEE power class unknown
"""
poe_null = Enum.YLeaf(0, "poe-null")
poe_unknown = Enum.YLeaf(1, "poe-unknown")
poe_cisco = Enum.YLeaf(2, "poe-cisco")
poe_ieee0 = Enum.YLeaf(3, "poe-ieee0")
poe_ieee1 = Enum.YLeaf(4, "poe-ieee1")
poe_ieee2 = Enum.YLeaf(5, "poe-ieee2")
poe_ieee3 = Enum.YLeaf(6, "poe-ieee3")
poe_ieee4 = Enum.YLeaf(7, "poe-ieee4")
poe_ieee5 = Enum.YLeaf(8, "poe-ieee5")
poe_ieee_unknown_class = Enum.YLeaf(9, "poe-ieee-unknown-class")
class PoeOperData(Entity):
"""
Informaton about POEs
.. attribute:: poe_port
List of POE interfaces, keyed by interface name
**type**\: list of :py:class:`PoePort <ydk.models.cisco_ios_xe.Cisco_IOS_XE_poe_oper.PoeOperData.PoePort>`
"""
_prefix = 'poe-ios-xe-oper'
_revision = '2018-02-04'
def __init__(self):
super(PoeOperData, self).__init__()
self._top_entity = None
self.yang_name = "poe-oper-data"
self.yang_parent_name = "Cisco-IOS-XE-poe-oper"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("poe-port", ("poe_port", PoeOperData.PoePort))])
self._leafs = OrderedDict()
self.poe_port = YList(self)
self._segment_path = lambda: "Cisco-IOS-XE-poe-oper:poe-oper-data"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(PoeOperData, [], name, value)
class PoePort(Entity):
"""
List of POE interfaces, keyed by interface name
.. attribute:: intf_name (key)
Name of the POE interface
**type**\: str
.. attribute:: poe_intf_enabled
POE interface admin state
**type**\: bool
.. attribute:: power_used
Power used by PD device
**type**\: :py:class:`Decimal64<ydk.types.Decimal64>`
**range:** \-92233720368547758.08..92233720368547758.07
.. attribute:: pd_class
Class of the PD device
**type**\: :py:class:`IlpowerPdClass <ydk.models.cisco_ios_xe.Cisco_IOS_XE_poe_oper.IlpowerPdClass>`
"""
_prefix = 'poe-ios-xe-oper'
_revision = '2018-02-04'
def __init__(self):
super(PoeOperData.PoePort, self).__init__()
self.yang_name = "poe-port"
self.yang_parent_name = "poe-oper-data"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['intf_name']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('intf_name', (YLeaf(YType.str, 'intf-name'), ['str'])),
('poe_intf_enabled', (YLeaf(YType.boolean, 'poe-intf-enabled'), ['bool'])),
('power_used', (YLeaf(YType.str, 'power-used'), ['Decimal64'])),
('pd_class', (YLeaf(YType.enumeration, 'pd-class'), [('ydk.models.cisco_ios_xe.Cisco_IOS_XE_poe_oper', 'IlpowerPdClass', '')])),
])
self.intf_name = None
self.poe_intf_enabled = None
self.power_used = None
self.pd_class = None
self._segment_path = lambda: "poe-port" + "[intf-name='" + str(self.intf_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XE-poe-oper:poe-oper-data/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(PoeOperData.PoePort, ['intf_name', 'poe_intf_enabled', 'power_used', 'pd_class'], name, value)
def clone_ptr(self):
self._top_entity = PoeOperData()
return self._top_entity
| 1.976563
| 2
|
Twitter_Sentiment_Analysis.py
|
FiazBinSayeed/Twitter-Sentiment-Analysis
| 1
|
12774345
|
<filename>Twitter_Sentiment_Analysis.py
import tweepy
from textblob import TextBlob
import pandas as pd
import numpy as np
import re
import matplotlib.pyplot as plt
import sys
plt.style.use('fivethirtyeight')
api_key = ""
api_secret_key = ""
access_token = ""
access_token_secret = ""
auth_handler = tweepy.OAuthHandler(consumer_key = api_key, consumer_secret = api_secret_key)
auth_handler.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth_handler)
search_term = "Google"
tweet_amount = 100
tweets = tweepy.Cursor(api.search, q=search_term, lang='en').items(tweet_amount)
polarity = 0
for tweet in tweets:
final_text = tweet.text.replace('RT', '')
if final_text.startswith(' @'):
position = final_text.index(':')
final_text = final_text[position+2:]
if final_text.startswith('@'):
position = final_text.index(' ')
final_text = final_text[position+2]
analysis = TextBlob(final_text)
polarity += analysis.polarity
print(polarity)
| 3.125
| 3
|
train.py
|
ihsangkcl/RFM
| 0
|
12774346
|
<filename>train.py
import torch
from utils.utils import data_prefetcher_two, cal_fam, setup_seed, calRes
from pretrainedmodels import xception
import utils.datasets_profiles as dp
from torch.utils.data import DataLoader
from torch.optim import Adam
import numpy as np
import argparse
import random
import time
np.set_printoptions(precision=3)
parser = argparse.ArgumentParser()
parser.add_argument('--device', default="cuda:0", type=str)
parser.add_argument('--modelname', default="xception", type=str)
parser.add_argument('--distributed', default=False, action='store_true')
parser.add_argument('--upper', default="xbase", type=str,
help='the prefix used in save files')
parser.add_argument('--eH', default=120, type=int)
parser.add_argument('--eW', default=120, type=int)
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--max_batch', default=500000, type=int)
parser.add_argument('--num_workers', default=4, type=int)
parser.add_argument('--logbatch', default=3000, type=int)
parser.add_argument('--savebatch', default=30000, type=int)
parser.add_argument('--seed', default=5, type=int)
parser.add_argument('--lr', default=0.0002, type=float, help='learning rate')
parser.add_argument('--pin_memory', '-p', default=False, action='store_true')
parser.add_argument('--resume_model', default=None)
parser.add_argument('--resume_optim', default=None)
parser.add_argument('--save_model', default=True, action='store_true')
parser.add_argument('--save_optim', default=False, action='store_true')
args = parser.parse_args()
modelname = args.modelname
upper = args.upper
#args.resume_model = "./models/baseline.pth"
#args.resume_model = "./models/xbase_xception_model_batch_12000"
args.resume_model = "./models/dffd_xception_model-RFM_"
#args.resume_model = "./models/dffd_xception_model-baseline_"
def Eval(model, lossfunc, dtloader):
model.eval()
sumloss = 0.
y_true_all = None
y_pred_all = None
with torch.no_grad():
for (j, batch) in enumerate(dtloader):
x, y_true = batch
y_pred = model.forward(x.cuda())
loss = lossfunc(y_pred, y_true.cuda())
sumloss += loss.detach()*len(x)
y_pred = torch.nn.functional.softmax(
y_pred.detach(), dim=1)[:, 1].flatten()
if y_true_all is None:
y_true_all = y_true
y_pred_all = y_pred
else:
y_true_all = torch.cat((y_true_all, y_true))
y_pred_all = torch.cat((y_pred_all, y_pred))
return sumloss/len(y_true_all), y_true_all.detach(), y_pred_all.detach()
def Log(log):
print(log)
f = open("./logs/"+upper+"_"+modelname+".log", "a")
f.write(log+"\n")
f.close()
if __name__ == "__main__":
Log("\nModel:%s BatchSize:%d lr:%f" % (modelname, args.batch_size, args.lr))
torch.cuda.set_device(args.device)
setup_seed(args.seed)
print("cudnn.version:%s enabled:%s benchmark:%s deterministic:%s" % (torch.backends.cudnn.version(), torch.backends.cudnn.enabled, torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic))
MAX_TPR_4 = 0.
model = eval(modelname)(num_classes=2, pretrained=False).cuda()
if args.distributed:
model = torch.nn.DataParallel(model)
optim = Adam(model.parameters(), lr=args.lr, weight_decay=0)
if args.resume_model is not None:
model.load_state_dict(torch.load(args.resume_model,map_location='cuda:0'))
if args.resume_optim is not None:
optim.load_state_dict(torch.load(args.resume_optim))
lossfunc = torch.nn.CrossEntropyLoss()
dataset = dp.Stylespace()
trainsetR = dataset.getTrainsetR()
trainsetF = dataset.getTrainsetF()
validset = dataset.getValidset()
testsetR = dataset.getTestsetR()
TestsetList, TestsetName = dataset.getsetlist(real=False, setType=2)
setup_seed(args.seed)
traindataloaderR = DataLoader(
trainsetR,
batch_size=int(args.batch_size/2),
shuffle=True,
pin_memory=args.pin_memory,
num_workers=args.num_workers
)
traindataloaderF = DataLoader(
trainsetF,
batch_size=int(args.batch_size/2),
shuffle=True,
pin_memory=args.pin_memory,
num_workers=args.num_workers
)
validdataloader = DataLoader(
validset,
batch_size=args.batch_size*2,
pin_memory=args.pin_memory,
num_workers=args.num_workers
)
testdataloaderR = DataLoader(
testsetR,
batch_size=args.batch_size*2,
pin_memory=args.pin_memory,
num_workers=args.num_workers
)
testdataloaderList = []
for tmptestset in TestsetList:
testdataloaderList.append(
DataLoader(
tmptestset,
batch_size=args.batch_size*2,
pin_memory=args.pin_memory,
num_workers=args.num_workers
)
)
print("Loaded model")
batchind = 0
e = 0
sumcnt = 0
sumloss = 0.
while True:
# prefetcher = data_prefetcher_two(traindataloaderR, traindataloaderF)
# data, y_true = prefetcher.next()
# while data is not None and batchind < args.max_batch:
# stime = time.time()
# sumcnt += len(data)
# ''' ↓ the implementation of RFM ↓ '''
# model.eval()
# mask = cal_fam(model, data)
# imgmask = torch.ones_like(mask)
# imgh = imgw = 224
# for i in range(len(mask)):
# maxind = np.argsort(mask[i].cpu().numpy().flatten())[::-1]
# pointcnt = 0
# for pointind in maxind:
# pointx = pointind//imgw
# pointy = pointind % imgw
# if imgmask[i][0][pointx][pointy] == 1:
# maskh = random.randint(1, args.eH)
# maskw = random.randint(1, args.eW)
# sh = random.randint(1, maskh)
# sw = random.randint(1, maskw)
# top = max(pointx-sh, 0)
# bot = min(pointx+(maskh-sh), imgh)
# lef = max(pointy-sw, 0)
# rig = min(pointy+(maskw-sw), imgw)
# imgmask[i][:, top:bot, lef:rig] = torch.zeros_like(imgmask[i][:, top:bot, lef:rig])
# pointcnt += 1
# if pointcnt >= 3:
# break
# data = imgmask * data + (1-imgmask) * (torch.rand_like(data)*2-1.)
# ''' ↑ the implementation of RFM ↑ '''
# model.train()
# y_pred = model.forward(data)
# loss = lossfunc(y_pred, y_true)
# flood = (loss-0.04).abs() + 0.04
# sumloss += loss.detach()*len(data)
# data, y_true = prefetcher.next()
# optim.zero_grad()
# flood.backward()
# optim.step()
# batchind += 1
# print("Train %06d loss:%.5f avgloss:%.5f lr:%.6f time:%.4f" % (batchind, loss, sumloss/sumcnt, optim.param_groups[0]["lr"], time.time()-stime), end="\r")
if batchind % args.logbatch == 0:
print()
# Log("epoch:%03d batch:%06d loss:%.5f avgloss:%.5f" % (e, batchind, loss, sumloss/sumcnt))
loss_valid, y_true_valid, y_pred_valid = Eval(model, lossfunc, validdataloader)
ap, acc, AUC, TPR_2, TPR_3, TPR_4, fprs, tprs ,ths = calRes(y_true_valid, y_pred_valid)
Log("AUC:%.6f TPR_2:%.6f TPR_3:%.6f TPR_4:%.6f %s" % (AUC, TPR_2, TPR_3, TPR_4, "validset"))
loss_r, y_true_r, y_pred_r = Eval(model, lossfunc, testdataloaderR)
sumAUC = sumTPR_2 = sumTPR_3 = sumTPR_4 = sumFPRS = sumTPRS = sumTHS = 0
for i, tmptestdataloader in enumerate(testdataloaderList):
loss_f, y_true_f, y_pred_f = Eval(model, lossfunc, tmptestdataloader)
ap, acc, AUC, TPR_2, TPR_3, TPR_4, fprs, tprs, ths = calRes(torch.cat((y_true_r, y_true_f)), torch.cat((y_pred_r, y_pred_f)))
sumAUC += AUC
sumTPR_2 += TPR_2
sumTPR_3 += TPR_3
sumTPR_4 += TPR_4
np.savetxt('./logs/fprs'+TestsetName[i]+'.out', fprs, delimiter=',')
np.savetxt('./logs/tprs'+TestsetName[i]+'.out', tprs, delimiter=',')
#np.savetxt('./logs/fprs'+TestsetName[i]+'.out', fprs, delimiter=',')
#sumFPRS += fprs
#sumTPRS += tprs
#sumTHS += ths
Log("AUC:%.6f TPR_2:%.6f TPR_3:%.6f TPR_4:%.6f %s" % (AUC, TPR_2, TPR_3, TPR_4, TestsetName[i]))
if len(testdataloaderList) > 1:
Log("AUC:%.6f TPR_2:%.6f TPR_3:%.6f TPR_4:%.6f Test" %
(sumAUC/len(testdataloaderList), sumTPR_2/len(testdataloaderList), sumTPR_3/len(testdataloaderList), sumTPR_4/len(testdataloaderList)))
TPR_4 = (sumTPR_4)/len(testdataloaderList)
if batchind % args.savebatch == 0 or TPR_4 > MAX_TPR_4:
MAX_TPR_4 = TPR_4
if args.save_model:
torch.save(model.state_dict(), "./models/" + upper+"_"+modelname+"_model_batch_"+str(batchind))
if args.save_optim:
torch.save(optim.state_dict(), "./models/" + upper+"_"+modelname+"_optim_batch_"+str(batchind))
print("-------------------------------------------")
# e += 1
| 1.976563
| 2
|
calibration/StereographicCalibration.py
|
sebalander/sebaPhD
| 6
|
12774347
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 13 19:00:40 2016
@author: sebalander
"""
from numpy import zeros, sqrt, array, tan, arctan, prod, cos
from cv2 import Rodrigues
from lmfit import minimize, Parameters
#from calibration import calibrator
#xypToZplane = calibrator.xypToZplane
#
## %% ========== ========== PARAMETER HANDLING ========== ==========
#def formatParameters(rVec, tVec, linearCoeffs, distCoeffs):
# params = Parameters()
#
# if prod(rVec.shape) == 9:
# rVec = Rodrigues(rVec)[0]
#
# rVec = rVec.reshape(3)
#
# for i in range(3):
# params.add('rvec%d'%i,
# value=rVec[i], vary=True)
# params.add('tvec%d'%i,
# value=tVec[i], vary=True)
#
# # image center
# params.add('cameraMatrix0',
# value=linearCoeffs[0], vary=False)
# params.add('cameraMatrix1',
# value=linearCoeffs[1], vary=False)
#
# # k
# params.add('distCoeffs',
# value=distCoeffs, vary=False)
#
# return params
#
#def retrieveParameters(params):
# '''
#
# '''
# rvec = zeros((3,1))
# tvec = zeros((3,1))
# for i in range(3):
# rvec[i,0] = params['rvec%d'%i].value
# tvec[i,0] = params['tvec%d'%i].value
#
# cameraMatrix = zeros(2)
# cameraMatrix[0] = params['cameraMatrix0'].value
# cameraMatrix[1] = params['cameraMatrix1'].value
#
# distCoeffs = params['distCoeffs'].value
#
# return rvec, tvec, cameraMatrix, distCoeffs
# %% ========== ========== DIRECT ========== ==========
def radialDistort(rh, k, quot=False, der=False):
'''
returns distorted radius using distortion coefficient k
optionally it returns the distortion quotioent rpp = rp * q
'''
k.shape = 1
th = arctan(rh)
tanth = tan(th / 2)
rd = k * tanth
if der:
# rpp wrt rp
dDdH = k / cos(th / 2)**2 / 2 / (1 + rh**2)
# calculate quotient
q = rd / rh
# q wrt rpp
dQdH = ((dDdH - q) / rh).reshape((1, -1)) # deriv wrt undistorted coords
dQdK = (tanth / rh).reshape((1, -1))
if quot:
return q, dQdH, dQdK
else:
return rd, dQdH, dQdK
else:
if quot:
return rd / rh
else:
return rd
## we asume that intrinsic distortion paramters is just a scalar: distCoeffs=k
#def direct(fiducialPoints, rVec, tVec, linearCoeffs, distCoeffs):
# # format as matrix
# try:
# rVec.reshape(3)
# rVec = Rodrigues(rVec)[0]
# except:
# pass
#
# xyz = rVec.dot(fiducialPoints[0].T)+tVec
#
# xp = xyz[0]/xyz[2]
# yp = xyz[1]/xyz[2]
#
# rp = sqrt(xp**2 + yp**2)
# thetap = arctan(rp)
#
# rpp = distCoeffs*tan(thetap/2)
#
# rpp_rp = rpp/rp
#
# xpp = xp*rpp_rp
# ypp = yp*rpp_rp
#
# u = xpp + linearCoeffs[0]
# v = ypp + linearCoeffs[1]
#
# return array([u,v]).reshape((fiducialPoints.shape[1],1,2))
#
#def residualDirect(params, fiducialPoints, imageCorners):
# rVec, tVec, linearCoeffs, distCoeffs = retrieveParameters(params)
#
# projectedCorners = direct(fiducialPoints,
# rVec,
# tVec,
# linearCoeffs,
# distCoeffs)
#
# return imageCorners[:,0,:] - projectedCorners[:,0,:]
#
#def calibrateDirect(fiducialPoints, imageCorners, rVec, tVec, linearCoeffs, distCoeffs):
# initialParams = formatParameters(rVec, tVec, linearCoeffs, distCoeffs) # generate Parameters obj
#
# out = minimize(residualDirect,
# initialParams,
# args=(fiducialPoints,
# imageCorners))
#
# rvecOpt, tvecOpt, _, _ = retrieveParameters(out.params)
#
# return rvecOpt, tvecOpt, out.params
# %% ========== ========== INVERSE ========== ==========
def radialUndistort(rd, k, quot=False, der=False):
'''
takes distorted radius and returns the radius undistorted
optionally it returns the undistortion quotient rd = rh * q
'''
# polynomial coeffs, grade 7
# # (k1,k2,p1,p2[,k3[,k4,k5,k6[,s1,s2,s3,s4[,τx,τy]]]])
k.shape = -1
thetap = 2 * arctan(rd / k)
rh = tan(thetap)
retVal = True
if der:
# derivada de la directa
q, dQdH, dQdK = radialDistort(rh, k, quot, der)
if quot:
return q, retVal, dQdH, dQdK
else:
return rh, retVal, dQdH, dQdK
else:
if quot:
# returns q
return rd / rh, retVal
else:
return rh, retVal
#def inverse(imageCorners, rVec, tVec, linearCoeffs, distCoeffs):
#
# xpp = imageCorners[:,0,0]-linearCoeffs[0]
# ypp = imageCorners[:,0,1]-linearCoeffs[1]
# rpp = sqrt(xpp**2 + ypp**2)
#
# thetap = 2*arctan(rpp/distCoeffs)
#
# rp = tan(thetap)
#
# rp_rpp = rp/rpp
#
# xp = xpp * rp_rpp
# yp = ypp * rp_rpp
#
# # project to z=0 plane. perhaps calculate faster with homography function?
# XYZ = xypToZplane(xp, yp, rVec, tVec)
#
# return XYZ
#
#
#def residualInverse(params, fiducialPoints, imageCorners):
# rVec, tVec, linearCoeffs, distCoeffs = retrieveParameters(params)
#
# projectedFiducialPoints = inverse(imageCorners,
# rVec,
# tVec,
# linearCoeffs,
# distCoeffs)
#
# return fiducialPoints[0,:,:2] - projectedFiducialPoints[0,:,:2]
#
#def calibrateInverse(fiducialPoints, imageCorners, rVec, tVec, linearCoeffs, distCoeffs):
# initialParams = formatParameters(rVec, tVec, linearCoeffs, distCoeffs) # generate Parameters obj
#
# out = minimize(residualInverse,
# initialParams,
# args=(fiducialPoints,
# imageCorners))
#
# rvecOpt, tvecOpt, _, _ = retrieveParameters(out.params)
#
# return rvecOpt, tvecOpt, out.params
| 2.15625
| 2
|
org/apache/helix/messaging/handling/HelixTaskResult.py
|
davzhang/helix-python-binding
| 3
|
12774348
|
# package org.apache.helix.messaging.handling
#from org.apache.helix.messaging.handling import *
#from java.util import HashMap
#from java.util import Map
class HelixTaskResult:
def __init__(self):
self._success = False
self._message = ""
self._taskResultMap = {}
self._interrupted = False
self._exception = None
def isSucess(self):
"""
Returns boolean
"""
return self._success
def isInterrupted(self):
"""
Returns boolean
"""
return self._interrupted
def setInterrupted(self, interrupted):
"""
Returns void
Parameters:
interrupted: boolean
"""
self._interrupted = interrupted
def setSuccess(self, success):
"""
Returns void
Parameters:
success: boolean
"""
self._success = success
def getMessage(self):
"""
Returns String
"""
return self._message
def setMessage(self, message):
"""
Returns void
Parameters:
message: String
"""
self._message = message
def getTaskResultMap(self):
"""
Returns Map<String, String>
"""
return self._taskResultMap
def setException(self, e):
"""
Returns void
Parameters:
e: Exception
"""
self._exception = e
def getException(self):
"""
Returns Exception
"""
return self._exception
| 2.171875
| 2
|
tests/test_unpipe.py
|
python-pipe/hellp
| 123
|
12774349
|
from sspipe import p, px, unpipe
def test_unpipe_active():
a_pipe = px + 1 | px * 5
func = unpipe(a_pipe)
assert func(0) == 5
def test_unpipe_passive():
func = lambda x: (x + 1) * 5
func = unpipe(func)
assert func(0) == 5
| 2.75
| 3
|
src/scripts/x+y2kmeans.py
|
ai-ku/upos
| 4
|
12774350
|
<filename>src/scripts/x+y2kmeans.py
#!/usr/bin/env python
import sys, gzip #argparse
from optparse import OptionParser
from collections import defaultdict as dd
parser = OptionParser()
#parser = argparse.ArgumentParser(description='Finds unique x-y pairs and concatenates their vectors. Requires scode output to stdin.')
parser.add_option('-p', '--pairs', help='wordsub output', dest="pairs")
(args, op) = parser.parse_args()
if args.pairs.endswith('.gz'):
f = gzip.open(args.pairs)
else:
f = open(args.pairs)
pairs = dd(int)
for line in f:
line = line.strip().split("\t")
pairs[(line[0], line[1])] += 1
scode_x = {}
scode_y = {}
for line in sys.stdin:
if line.startswith('0:'):
add_to = scode_x
elif line.startswith('1:'):
add_to = scode_y
line = line[2:].strip().split("\t")
add_to[line[0]] = "\t".join(line[2:])
for pair, count in pairs.iteritems():
word, sub = pair
print "%s_%s\t%d\t%s\t%s" % (word, sub, count, scode_x[word], scode_y[sub])
| 2.421875
| 2
|