hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9a9a50511abf52afcd61e02d8aeff1032454c0a6
| 3,379
|
py
|
Python
|
utils/videoJob.py
|
dbpeng/aws-lambda-python-example-zencoder
|
3c3e2d2ea88be824a62e41f16d6bdd79deeef2a0
|
[
"MIT"
] | 1
|
2018-05-01T11:54:33.000Z
|
2018-05-01T11:54:33.000Z
|
utils/videoJob.py
|
dbpeng/aws-lambda-python-example-zencoder
|
3c3e2d2ea88be824a62e41f16d6bdd79deeef2a0
|
[
"MIT"
] | 1
|
2021-06-01T22:18:53.000Z
|
2021-06-01T22:18:53.000Z
|
utils/videoJob.py
|
dbpeng/aws-lambda-python-example-zencoder
|
3c3e2d2ea88be824a62e41f16d6bdd79deeef2a0
|
[
"MIT"
] | null | null | null |
import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from datetime import datetime
import json
from base import Session, engine, Base
from enum import Enum
VIDEOS_S3_PATH = os.environ["VIDEOS_S3_PATH"]
class VideoJobState(Enum):
INIT = 0
DONE = 3
CANCEL = 4
class VideoTranscodeJob(Base):
__tablename__ = "TranscodingJob"
id = Column("ID", Integer, primary_key=True)
src = Column("Src", String(100))
dst = Column("Dst", String(100))
playUrl = Column("VideoUrl", String(256))
config = Column("Config", String(100))
vendor = Column("Vendor", String(100))
jobId = Column("JobId", String(100))
progress = Column("Progress", Integer)
webhook = Column("Webhook", String(300))
created_At = Column("Created_At", DateTime, default=datetime.now)
updated_At = Column("Updated_At", DateTime, onupdate=datetime.now)
def __init__(self):
self.progress = 0
def setConfig(self, config):
self.config = config
filename = "profiles/"+self.config+".json"
with open(filename, 'r') as f:
datastore = json.load(f)
self.configContext = datastore
def getConfig(self):
return self.config
def getConfigContext(self):
return self.configContext
def setSrc(self, src):
self.src = src
def getSrc(self):
return self.src
def setPlaybackUrl(self, url):
# TODO: should validate url scheme here
self.playUrl = url
def getPlaybackUrl(self):
return self.playUrl
def setDst(self, dst):
# this part needs a revamp, we should not by default assume it's HLS
self.dst = VIDEOS_S3_PATH + dst + "/playlist.m3u8"
def getDst(self):
return self.dst
def setVendor(self, vendorId):
self.vendor = vendorId
def getVendor(self):
return self.vendor
def setJobId(self, jobid):
self.jobId = jobid
def getJobId(self):
return self.jobId
def setWebhook(self, url):
self.webhook = url
def getWebhook(self):
return self.webhook
def setProgress(self, status):
self.progress = status
def getProgress(self):
return self.progress
def getCreatedTime(self):
return self.createTime
def getUpdatedTime(self):
return self.updatedTime
def setId(self, id):
self.id = id
def getId(self):
return self.id
def getJobDescription(self):
# self.configContext['input'] = self.getSrc()
for output in self.configContext['output']:
output['base_url'] = self.getDst()
return self.configContext
def submit(self):
pass
# if __name__ == "__main__":
# session = Session()
# vjob = VideoTranscodeJob()
# vjob.setSrc("s3://wowza-video/hk33456678.mp4")
# vjob.setDst("13ffjsdhr")
# vjob.setConfig("zen-hls")
# vjob.setJobId("13556245")
# vjob.setVendor("zencoder")
# session.add(vjob)
# session.commit()
# # jobs = session.query(VideoTranscodeJob).all()
# # for job in jobs:
# # job.setProgress(4)
# # session.commit()
# session.close()
| 25.406015
| 76
| 0.629772
| 2,520
| 0.745783
| 0
| 0
| 0
| 0
| 0
| 0
| 787
| 0.232909
|
9a9af8b29d8ddd5b44627798d65817d8e0c206e0
| 3,411
|
py
|
Python
|
alipay/aop/api/domain/MybankCreditSceneprodCommonQueryModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/MybankCreditSceneprodCommonQueryModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/MybankCreditSceneprodCommonQueryModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MybankCreditSceneprodCommonQueryModel(object):
def __init__(self):
self._app_seq_no = None
self._ext_param = None
self._operation_type = None
self._org_code = None
self._product_code = None
self._seq_no = None
@property
def app_seq_no(self):
return self._app_seq_no
@app_seq_no.setter
def app_seq_no(self, value):
self._app_seq_no = value
@property
def ext_param(self):
return self._ext_param
@ext_param.setter
def ext_param(self, value):
self._ext_param = value
@property
def operation_type(self):
return self._operation_type
@operation_type.setter
def operation_type(self, value):
self._operation_type = value
@property
def org_code(self):
return self._org_code
@org_code.setter
def org_code(self, value):
self._org_code = value
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
@property
def seq_no(self):
return self._seq_no
@seq_no.setter
def seq_no(self, value):
self._seq_no = value
def to_alipay_dict(self):
params = dict()
if self.app_seq_no:
if hasattr(self.app_seq_no, 'to_alipay_dict'):
params['app_seq_no'] = self.app_seq_no.to_alipay_dict()
else:
params['app_seq_no'] = self.app_seq_no
if self.ext_param:
if hasattr(self.ext_param, 'to_alipay_dict'):
params['ext_param'] = self.ext_param.to_alipay_dict()
else:
params['ext_param'] = self.ext_param
if self.operation_type:
if hasattr(self.operation_type, 'to_alipay_dict'):
params['operation_type'] = self.operation_type.to_alipay_dict()
else:
params['operation_type'] = self.operation_type
if self.org_code:
if hasattr(self.org_code, 'to_alipay_dict'):
params['org_code'] = self.org_code.to_alipay_dict()
else:
params['org_code'] = self.org_code
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
if self.seq_no:
if hasattr(self.seq_no, 'to_alipay_dict'):
params['seq_no'] = self.seq_no.to_alipay_dict()
else:
params['seq_no'] = self.seq_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MybankCreditSceneprodCommonQueryModel()
if 'app_seq_no' in d:
o.app_seq_no = d['app_seq_no']
if 'ext_param' in d:
o.ext_param = d['ext_param']
if 'operation_type' in d:
o.operation_type = d['operation_type']
if 'org_code' in d:
o.org_code = d['org_code']
if 'product_code' in d:
o.product_code = d['product_code']
if 'seq_no' in d:
o.seq_no = d['seq_no']
return o
| 29.405172
| 79
| 0.588977
| 3,294
| 0.965699
| 0
| 0
| 1,489
| 0.436529
| 0
| 0
| 424
| 0.124304
|
9a9d13bd5f6b65068699065c4f4e5d2b6027979d
| 32,570
|
py
|
Python
|
train_end2end.py
|
lyn1874/daml
|
edd89c3baf018cdb407208d137364fcefd913896
|
[
"MIT"
] | null | null | null |
train_end2end.py
|
lyn1874/daml
|
edd89c3baf018cdb407208d137364fcefd913896
|
[
"MIT"
] | null | null | null |
train_end2end.py
|
lyn1874/daml
|
edd89c3baf018cdb407208d137364fcefd913896
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 13 18:23:08 2019
This scrip is for training the experiement end2end
@author: li
"""
import tensorflow as tf
import models.AE as AE
import optimization.loss_tf as loss_tf
from data import read_frame_temporal as rft
import numpy as np
import os
import math
import cv2
import shutil
import const
def train_end2end(args, data_set, model_type, motion_method, version=0, bg_ind=None, augment_opt="none"):
model_mom_for_load_data = args.datadir
path_mom = args.expdir
if data_set == "ucsd1":
stat = [8,6,2,5]
train_ucsd1(stat, model_type, motion_method, version)
elif data_set == "ucsd2":
stat = [8,6,2,4]
train_ucsd2(stat, model_type, motion_method, version)
elif data_set == "avenue":
stat = [6,6,2,4]
train_avenue(stat, model_type, augment_opt, version)
elif data_set == "shanghaitech_allinone":
stat = [6,6,2,4]
train_shanghaitech_allinone(stat, model_type, version)
elif data_set == "shanghaitech_multiple":
stat = [6,6,2,4]
train_shanghaitech_multiple(stat, model_type, motion_method,
version, bg_ind)
# elif data_set is "moving_mnist":
# # 6, 6, 1, 4
# train_moving_mnist(model_mom_for_load_data, path_mom, stat, model_type, version)
def train_fps(model_mom_for_load_data, path_mom):
# 31,32,33,34
version = 0
interval_group = np.arange(11)[1:] * 2
learn_opt = "learn_fore"
data_set = "ucsd2"
motion_method = "conv3d"
model_type = "2d_2d_pure_unet"
time_step = 6
args.z_mse_ratio = 0.001
for single_interval in interval_group:
delta = single_interval
train_model(args.datadir, args.expdir, data_set, time_step, delta, model_type, motion_method,
single_interval, version,
None, 4, learn_opt)
def train_ucsd1_group():
stat = [8, 6, 2, 5]
model_type = "2d_2d_pure_unet"
motion_method = "convlstm"
version = [0, 1, 2, 3]
for single_version in version:
train_ucsd1(stat, model_type, motion_method, single_version)
def train_ucsd1(stat, model_type, motion_method, version):
data_set = "ucsd1"
time_step, delta, interval, num_enc_layer = stat
train_model(args.datadir, args.expdir, data_set, time_step, delta, model_type, motion_method,
interval, version,
None,
num_enc_layer, "learn_fore")
def train_ucsd2_group():
stat = [8, 6, 2, 4]
model_type = "2d_2d_pure_unet"
motion_method = "convlstm"
for single_version in [2, 3]:
train_ucsd2(stat, model_type, motion_method, single_version)
def train_ucsd2(stat, model_type, motion_method, version):
data_set = "ucsd2"
time_step, delta, interval, num_enc_layer = stat
train_model(args.datadir, args.expdir, data_set, time_step, delta, model_type, motion_method, interval,
version, None, num_enc_layer, "learn_fore")
def train_avenue_group():
data_dir = args.datadir
model_dir = args.expdir
stat = [6, 6, 2, 4]
motion_method = "conv3d"
augment_opt = "none"
for single_version in [2, 3]:
train_avenue(data_dir, model_dir, stat, "2d_2d_pure_unet", motion_method,
augment_opt, single_version)
def train_avenue(stat, model_type, motion_method, augment_opt, version):
data_set = "avenue"
args.augment_option = augment_opt
if augment_opt == "add_dark_auto":
learn_opt = "learn_full"
else:
learn_opt = "learn_fore"
time_step, delta, interval, num_enc_layer = stat
train_model(args.datadir, args.expdir, data_set, time_step, delta, model_type, motion_method,
interval, version,
None,
num_enc_layer, learn_opt)
def train_shanghaitech_allinone(stat, model_type, version):
motion_method = "conv3d"
time_step, delta, interval, num_enc_layer = stat
data_set = "shanghaitech"
train_model(args.datadir, args.expdir, data_set, time_step, delta, model_type,
motion_method, interval, version, None, num_enc_layer, "learn_fore")
def train_shanghaitech_multiple(stat, model_type, motion_method, version,
bg_ind=None):
if bg_ind[0] == 0:
bg_ind = [2, 3, 7, 9, 11]
for single_bg_ind in bg_ind:
train_shanghaitech_for_per_bg(args.datadir, args.expdir, stat, model_type, motion_method,
single_bg_ind, version)
def train_shanghaitech_for_per_bg(model_mom_for_load_data, path_mom, stat, model_type, motion_method, bg_ind, version):
time_step, delta, interval, num_enc_layer = stat
data_set = "shanghaitech"
train_model(model_mom_for_load_data, path_mom, data_set, time_step, delta, model_type,
motion_method, interval, version, None, num_enc_layer, "learn_fore",
bg_index_pool=[bg_ind])
def train_moving_mnist():
motion_method = "conv3d"
data_set = "moving_mnist"
version = 2
model_type = "2d_2d_unet_no_shortcut"
z_mse_ratio = 0.001
args.z_mse_ratio = z_mse_ratio
num_layer = [5]
stat_group = [[6, 2, 1]]
for single_layer in num_layer:
for single_stat in stat_group:
time_step, delta, interval = single_stat
num_enc_layer = single_layer
train_model(args.datadir, args.expdir, data_set, time_step, delta, model_type,
motion_method, interval, version, None, num_enc_layer, "learn_full")
def train_moving_mnist_single_digit(model_group):
"""This function train a pure autoencoder for moving mnist single digit dataset
The goal of this type of experiments is to hope the latent can show some pattern between
anomalies and normal"""
motion_method = "conv3d"
data_set = "moving_mnist_single_digit"
version = 1 # version 1 means the activation layer in the last convolutional block is changed from
# learky-relu to tanh
args.z_mse_ratio = 0.001
num_layer = [5, 4]
stat = [6, 2, 1]
for model_type in model_group:
for single_layer in num_layer:
time_step, delta, interval = stat
num_enc_layer = single_layer
train_model(args.datadir, args.expdir, data_set, time_step, delta, model_type,
motion_method, interval, version, None, num_enc_layer, "learn_full")
def train_seq2seq(version):
data_set = "ucsd2"
motion_method = "conv3d"
model_type = "many_to_one"
for time_step in [4, 6, 8]:
stat = [time_step, 2, 2, 4]
train_model(args.datadir, args.expdir, data_set, stat[0], stat[1], model_type,
motion_method, stat[2], version, None, stat[-1], "learn_fore", None)
def train_model(model_mom_for_load_data, path_mom, data_set, time_step, delta, model_type, motion_method,
single_interval, version, ckpt_dir, num_enc_layer, learn_opt, bg_index_pool=None):
print("-------------------Start to train the model------------------------------")
args.data_set = data_set
interval_input = np.array([single_interval])
bg_index = None
args.num_encoder_layer = num_enc_layer
args.num_decoder_layer = num_enc_layer
args.time_step = time_step
args.single_interval = single_interval
args.delta = delta
args.learn_opt = learn_opt
args.bg_index_pool = bg_index_pool
model_dir = path_mom + "ano_%s_motion_end2end/" % args.data_set
if not bg_index_pool:
model_dir = model_dir + "time_%d_delta_%d_gap_%d_%s_%s_%s_enc_%d_version_%d" % (time_step,
delta, single_interval, model_type, motion_method, learn_opt, num_enc_layer, version)
else:
model_dir = model_dir + "time_%d_delta_%d_gap_%d_%s_%s_%s_enc_%d_bg_%d_version_%d" % (
time_step,
delta, single_interval, model_type, motion_method,
learn_opt, num_enc_layer, bg_index_pool[0], version)
tmf = TrainMainFunc(args, model_mom_for_load_data, model_dir, ckpt_dir, time_step, interval_input, delta,
train_index=bg_index,
bg_index_pool=bg_index_pool)
tmf.build_running()
def read_data(model_mom, data_set, concat_option, time_step, interval_input, delta, bg_index_pool=None):
if data_set != "shanghaitech":
train_im, test_im, imshape, targ_shape = rft.get_video_data(model_mom, data_set).forward()
train_im_interval, in_shape, out_shape = rft.read_frame_interval_by_dataset(data_set, train_im,
time_step, concat_option,
interval_input, delta)
else:
train_im_group = []
if not bg_index_pool:
bg_index_pool = np.arange(13)[1:]
for single_bg_index in bg_index_pool:
if single_bg_index < 10:
bg_index = "bg_index_0%d" % single_bg_index
else:
bg_index = "bg_index_%d" % single_bg_index
print("--------loading data from bg %s---------------" % bg_index)
test_im, test_la, imshape, targ_shape = rft.get_video_data(model_mom, args.data_set).forward(bg_index)
test_im_interval, in_shape, out_shape = rft.read_frame_interval_by_dataset(data_set, test_im,
time_step, concat_option,
interval=interval_input,
delta=delta)
train_im_group.append(test_im_interval)
train_im_interval = np.array([v for j in train_im_group for v in j])
return train_im_interval, imshape, targ_shape, in_shape, out_shape
class TrainMainFunc(object):
def __init__(self, args, model_mom, model_dir, ckpt_dir, time_step, interval_input=np.array([1]), delta=None,
train_index=None, bg_index_pool=None):
if not os.path.exists(model_dir):
os.makedirs(model_dir)
concat_option = "conc_tr"
train_im_interval, imshape, targ_shape, in_shape, out_shape = read_data(model_mom, args.data_set,
concat_option, time_step,
interval_input, delta,
bg_index_pool=bg_index_pool)
args.output_dim = targ_shape[-1]
if concat_option == "conc_tr":
args.num_prediction = 1
else:
args.num_prediction = out_shape[0]
self.args = args
self.model_mom = model_mom
self.model_dir = model_dir
self.ckpt_dir = ckpt_dir
self.data_set = args.data_set
self.train_index = train_index
self.temp_shape = [in_shape, out_shape]
self.targ_shape = targ_shape
self.imshape = imshape
self.output_dim = args.output_dim
self.concat = "conc_tr"
self.time_step = time_step
self.delta = delta
self.interval = interval_input[0]
self.test_im = train_im_interval
self.input_option = args.input_option
self.augment_option = args.augment_option
self.darker_value = args.darker_value
self.learn_opt = args.learn_opt
self.model_type = args.model_type
self.z_mse_ratio = args.z_mse_ratio
[lrate_g_step, lrate_g], [lrate_z_step, lrate_z], [epoch, batch_size] = const.give_learning_rate_for_init_exp(self.args)
self.lrate_g_decay_step = lrate_g_step
self.lrate_g_init = lrate_g
self.lrate_z_decay_step = lrate_z_step
self.lrate_z_init = lrate_z
self.batch_size = batch_size
self.max_epoch = epoch
print(args)
def read_tensor(self):
imh, imw, ch = self.targ_shape
placeholder_shape = [None, 2, self.temp_shape[0][0]]
shuffle_option = True
if "/project/" in self.model_dir:
repeat = 20
else:
repeat = 1
images_in = tf.placeholder(tf.string, shape=placeholder_shape, name='tr_im_path')
image_queue = rft.dataset_input(self.model_mom, self.data_set, images_in, self.learn_opt,
self.temp_shape, self.imshape, self.targ_shape[:2], self.batch_size,
augment_option=self.augment_option,
darker_value=self.darker_value,
conc_option=self.concat, shuffle=shuffle_option,
train_index=None,
epoch_size=repeat)
image_init = image_queue.make_initializable_iterator()
image_batch = image_init.get_next()
x_input = image_batch[0] # [batch_size, num_input_channel, imh, imw, ch]
x_output = image_batch[1] # [batch_size, self.output_dim, imh, imw, ch]
im_background = image_batch[-1]
print("=========================================")
print("The input of the model", x_input)
print("The output of the model", x_output)
print("The background of the data", im_background)
print("=========================================")
x_input = tf.concat([x_input, x_output], axis=1) # th==already subtract the background.
if self.learn_opt == "learn_fore":
x_real_input = x_input + im_background
else:
x_real_input = x_input
self.x_real_input = tf.transpose(x_real_input, perm=(1, 0, 2, 3, 4))
x_input = tf.transpose(x_input, perm=(1, 0, 2, 3, 4)) # num_frame, batch_size, imh, imw, ch
# the last input of x_input is for prediction
im_background = tf.transpose(im_background, perm=(1, 0, 2, 3, 4)) # num_frame, batch_size, imh, imw, ch
if "crop" in self.input_option:
x_input = tf.reshape(x_input, shape=[(self.time_step + 1) * self.batch_size, imh, imw, ch])
crop_size = self.input_option.strip().split("crop_")[1]
crop_h, crop_w = crop_size.strip().split("_")
crop_h, crop_w = int(crop_h), int(crop_w)
x_input_crop, stride_size, crop_box_h_w = rft.get_crop_image(x_input, crop_h, crop_w)
x_input_crop = tf.concat([x_input_crop],
axis=0) # [num_regions, (num_time+1)*batch_size, crop_height, crop_weight,ch]
num_box = x_input_crop.get_shape().as_list()[0]
x_input_crop = tf.reshape(x_input_crop,
shape=[num_box, self.time_step + 1, self.batch_size, crop_h, crop_w, ch])
x_input_crop = tf.transpose(x_input_crop, perm=(1, 0, 2, 3, 4, 5))
x_input_crop = tf.reshape(x_input_crop,
shape=[self.time_step + 1, num_box * self.batch_size, crop_h, crop_w, ch])
x_input = x_input_crop # [time, num_box*batch, croph, cropw, ch]
x_input = tf.transpose(x_input, perm=(1, 0, 2, 3, 4)) # [batch, time, c_h, c_w, ch]
x_input = tf.random.shuffle(x_input)
if crop_h >= 128:
x_input = x_input[:4] # this is for batch size
print("The actual number of box", num_box)
x_input = tf.transpose(x_input, perm=(1, 0, 2, 3, 4)) # [time, batch, c_h, c_w, ch]
self.x_real_input = x_input
return images_in, x_input, image_init, im_background
def build_graph(self):
num_recons_output = self.time_step
image_placeholder, x_input, image_init, im_background = self.read_tensor()
# --build encoder-------------#
model_use = AE.DAML(self.args)
p_x_recons, p_x_pred, latent_space_gt, latent_space_pred = model_use.forward(x_input)
if "crop" not in self.input_option:
if self.learn_opt == "learn_full":
print("====the reconstruction is full frame=============")
elif self.learn_opt == "learn_fore":
print("====the reconstruction is frame - background=====")
if self.model_type != "many_to_one":
p_x_recons = p_x_recons + im_background
p_x_pred = p_x_pred + im_background
if self.model_type == "2d_2d_pure_unet":
x_recons_gt = self.x_real_input[1:self.time_step] # [num_recons, batch_size, imh, imw, ch]
elif self.model_type == "2d_2d_unet_no_shortcut":
x_recons_gt = self.x_real_input[:self.time_step]
else:
x_recons_gt = []
x_pred_gt = self.x_real_input[-1:]
print("=============================================================")
print("----the input for the model-----------------", x_input)
print("----the groundtruth for reconstruction------", x_recons_gt)
print("----the reconstructed frames----------------", p_x_recons)
print("----the groundtruth for prediction----------", x_pred_gt)
print("----the predicted frame---------------------", p_x_pred)
print("----the gt latent space---------------------", latent_space_gt)
print("----the predicted latent space--------------", latent_space_pred)
print("=============================================================")
if self.model_type== "2d_2d_pure_unet" or self.model_type== "2d_2d_unet_no_shortcut":
if "moving_mnist" not in self.data_set:
mse_pixel = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(x_recons_gt, p_x_recons), (-1, -2, -3)))
else:
mse_pixel = tf.keras.losses.binary_crossentropy(y_true=x_recons_gt, y_pred=p_x_recons,
from_logits=False)
mse_pixel = tf.reduce_mean(tf.reduce_sum(mse_pixel, (-1, -2, -3)))
mse_latent = tf.reduce_mean(
tf.reduce_sum(tf.squared_difference(latent_space_gt, latent_space_pred), (-1, -2, -3)))
elif self.model_type== "many_to_one":
mse_pixel = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(x_pred_gt-im_background, p_x_pred-im_background), (-1, -2, -3)))
mse_latent = tf.constant(0.0)
z_mse_ratio_placeholder = tf.placeholder(tf.float32, name="ratio_for_z_mse")
if self.model_type != "many_to_one":
loss_tot = mse_pixel + mse_latent * z_mse_ratio_placeholder
else:
loss_tot = mse_pixel
var_tot = tf.trainable_variables()
[print(v) for v in var_tot if 'kernel' in v.name]
# print("==========================================")
# print("encoder decoder trainable variables")
# [print(v) for v in var_tot if 'motion_latent' not in v.name]
# print("==========================================")
# print("motion trainable variables")
# [print(v) for v in var_tot if 'motion_latent' in v.name]
var_0 = var_tot
loss_tot = tf.add_n([loss_tot, tf.add_n(
[tf.nn.l2_loss(v) for v in var_0 if 'kernel' in v.name or 'weight' in v.name]) * args.regu_par])
g_lrate = tf.placeholder(tf.float32, name='g_lrate')
train_op_0 = loss_tf.train_op(loss_tot, g_lrate, var_opt=var_0, name='train_op_tot')
z_lrate = tf.placeholder(tf.float32, name='z_lrate')
if self.model_type != "many_to_one":
var_motion = [v for v in var_tot if 'motion_latent' in v.name]
loss_motion = mse_latent
loss_motion = tf.add_n([loss_motion, tf.add_n(
[tf.nn.l2_loss(v) for v in var_motion if 'kernel' in v.name or 'weight' in v.name]) * args.regu_par])
train_op_z = loss_tf.train_op(loss_motion, z_lrate, var_opt=var_motion, name='train_latent_z')
train_z_group = [z_lrate, train_op_z]
else:
train_z_group = [z_lrate, []]
saver_set_all = tf.train.Saver(tf.trainable_variables(), max_to_keep=1)
input_group = [image_init, image_placeholder, z_mse_ratio_placeholder]
loss_group = [mse_pixel, mse_latent, loss_tot]
train_group = [g_lrate, train_op_0, saver_set_all]
if self.model_type== "2d_2d_pure_unet" or self.model_type== "2d_2d_unet_no_shortcut":
im_stat = [p_x_recons, x_recons_gt, p_x_pred, x_pred_gt]
else:
im_stat = [p_x_pred, x_pred_gt]
return input_group, loss_group, train_group, train_z_group, im_stat
def build_train_op(self, sess, image_init, placeholder_group,
x_train, single_epoch, num_epoch_for_full, loss_group, train_op_group):
train_op_0, train_op_z = train_op_group
image_placeholder, z_mse_placeholder, g_lrate_placeholder, z_lrate_placeholder = placeholder_group
sess.run(image_init.initializer, feed_dict={image_placeholder: x_train})
num_tr_iter_per_epoch = np.shape(x_train)[0] // self.batch_size
lrate_g_npy = self.lrate_g_init * math.pow(0.1, math.floor(float(single_epoch) / float(self.lrate_g_decay_step)))
lrate_z_npy = self.lrate_z_init * math.pow(0.1, math.floor(float(single_epoch - num_epoch_for_full) / float(self.lrate_z_decay_step)))
loss_per_epoch = []
if single_epoch <= num_epoch_for_full:
fetches_tr = [train_op_0]
else:
fetches_tr = [train_op_z]
fetches_tr.append(loss_group)
for single_iter in range(num_tr_iter_per_epoch):
_, _loss_group = sess.run(fetches=fetches_tr, feed_dict={z_mse_placeholder: self.z_mse_ratio,
g_lrate_placeholder: lrate_g_npy,
z_lrate_placeholder: lrate_z_npy})
loss_per_epoch.append(_loss_group)
return np.mean(loss_per_epoch, axis=0)
def build_val_op(self, sess, image_init, image_placeholder, x_val, loss_group, image_stat, image_path, single_epoch):
sess.run(image_init.initializer, feed_dict={image_placeholder: x_val})
num_val_iter_per_epoch = np.shape(x_val)[0] // self.batch_size
# image_stat: [p_x_recons, p_x_pred, x_recons_gt, x_pred_gt]
# or
# image_stat: [p_x_pred, x_pred_gt]
loss_val_per_epoch = []
for single_val_iter in range(num_val_iter_per_epoch):
if single_val_iter != num_val_iter_per_epoch - 1:
_loss_val = sess.run(fetches=loss_group)
else:
_loss_val, _stat_use = sess.run(fetches=[loss_group, image_stat])
for single_input, single_path in zip(_stat_use, image_path):
for j in range(np.shape(single_input)[0]):
im_use = single_input[j, :]
shape_use = np.array(np.shape(im_use)[1:])
cv2.imwrite(os.path.join(single_path, "epoch_%d_frame_%d.jpg" % (single_epoch, j)),
(plot_canvas(im_use, shape_use)).astype('uint8')[:, :, ::-1])
loss_val_per_epoch.append(_loss_val)
return np.mean(loss_val_per_epoch, axis=0)
def build_running(self):
im_path = os.path.join(self.model_dir, 'recons_gt')
recons_path = os.path.join(self.model_dir, 'p_x_recons')
im_pred_path = os.path.join(self.model_dir, 'pred_gt')
pred_path = os.path.join(self.model_dir, 'p_x_pred')
if self.model_type== "2d_2d_pure_unet" or self.model_type== "2d_2d_unet_no_shortcut":
path_group = [recons_path, im_path, pred_path, im_pred_path]
else:
path_group = [pred_path, im_pred_path]
for i in path_group:
if not os.path.exists(i):
os.makedirs(i)
with tf.Graph().as_default():
input_group, loss_group, train_group, train_z_group, im_stat = self.build_graph()
image_init, image_placeholder, z_mse_ratio_placeholder = input_group
mse_pixel_loss, mse_latent_loss, mse_tot = loss_group
g_lrate, train_op, saver = train_group
#z_lrate, train_z_op = train_z_group
saver_restore = None
tot_num_frame = np.shape(self.test_im)[0]
test_im_shuffle = self.test_im[np.random.choice(np.arange(tot_num_frame),
tot_num_frame,
replace=False)]
placeholder_group = [image_placeholder, z_mse_ratio_placeholder, g_lrate, train_z_group[0]]
loss_group = [mse_pixel_loss, mse_latent_loss]
train_group = [train_op, train_z_group[-1]]
if "ucsd" in self.data_set:
x_train = test_im_shuffle[:-self.batch_size * 4]
x_val = test_im_shuffle[-self.batch_size * 4:]
elif "avenue" in self.data_set or "shanghaitech" in self.data_set:
x_train = test_im_shuffle[:-self.batch_size * 20]
x_val = test_im_shuffle[-self.batch_size * 20:]
else:
x_train = test_im_shuffle[:-self.batch_size * 2]
x_val = test_im_shuffle[-self.batch_size * 2:]
if self.data_set== "ucsd1" and self.model_type != "many_to_one":
num_epoch_for_full = 25
else:
num_epoch_for_full = self.lrate_g_decay_step
checkpoint_path = self.model_dir + '/model.ckpt'
print("====================================================================================")
print("There are %d frames in total" % np.shape(self.test_im)[0])
print("The shape of training and validation images", np.shape(x_train), np.shape(x_val))
print(
"%d input frames are loaded with %d stride for predicting furture frame at time t+%d" % (self.time_step,
self.interval,
self.delta))
print("The lr for whole process start from %.4f and decay 0.1 every %d epoch" % (
self.lrate_g_init, self.lrate_g_decay_step))
print("The lr for motion process start from %.4f and decay 0.1 every %d epoch" % (
self.lrate_z_init, self.lrate_z_decay_step))
print("The ratio for the latent space mse loss== ", self.z_mse_ratio)
print("The used background index is:", self.train_index)
print("I am only focusing on the reconstruction for the first %d epochs" % num_epoch_for_full)
print("====================================================================================")
with tf.Session() as sess:
if self.ckpt_dir== None:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
else:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
saver_restore.restore(sess, self.ckpt_dir)
print("restore parameter from ", self.ckpt_dir)
loss_tr_tot = np.zeros([self.max_epoch, 2])
loss_val_tot = []
try:
for single_epoch in range(self.max_epoch):
loss_per_epoch = self.build_train_op(sess, image_init, placeholder_group, x_train,
single_epoch, num_epoch_for_full, loss_group,
train_group)
loss_tr_tot[single_epoch, :] = loss_per_epoch
print("Epoch %d with training pixel mse loss %.3f z mse %.3f" % (single_epoch,
loss_tr_tot[single_epoch, 0],
loss_tr_tot[single_epoch, 1]))
if single_epoch % 5 == 0 or single_epoch == self.max_epoch - 1:
# sess, image_init, image_placeholder, x_val, loss_group, image_stat, image_path, single_epoch)
loss_val_per_epoch = self.build_val_op(sess, image_init, image_placeholder, x_val, loss_group, im_stat,
path_group, single_epoch)
loss_val_tot.append(loss_val_per_epoch)
print("Epoch %d with validation pixel mse loss %.3f z mse %.3f" % (single_epoch,
loss_val_tot[-1][0],
loss_val_tot[-1][1]))
if np.isnan(loss_tr_tot[single_epoch, 0]):
np.save(self.model_dir + '/tr_loss', loss_tr_tot)
np.save(self.model_dir + '/val_loss', np.array(loss_val_tot))
if single_epoch % 5 == 0 and single_epoch != 0:
np.save(self.model_dir + '/tr_loss', loss_tr_tot)
np.save(self.model_dir + '/val_loss', np.array(loss_val_tot))
saver.save(sess, checkpoint_path, global_step=single_epoch)
if single_epoch == self.max_epoch - 1:
saver.save(sess, checkpoint_path, global_step=single_epoch)
np.save(self.model_dir + '/tr_loss', loss_tr_tot)
np.save(self.model_dir + '/val_loss', np.array(loss_val_tot))
except tf.errors.OutOfRangeError:
print("---oh my god, my model again could't read the data----")
print("I am at step", single_iter, single_iter // num_tr_iter_per_epoch)
np.save(os.path.join(self.model_dir, 'tr_loss'), loss_tr_tot)
np.save(os.path.join(self.model_dir, 'val_loss'), np.array(loss_val_tot))
saver.save(sess, checkpoint_path, global_step=single_epoch)
pass
def plot_canvas(image, imshape, ny=8):
if np.shape(image)[0] < ny:
ny = np.shape(image)[0]
nx = np.shape(image)[0] // ny
x_values = np.linspace(-3, 3, nx)
y_values = np.linspace(-3, 3, ny)
targ_height, targ_width = imshape[0], imshape[1]
if np.shape(image)[-1] == 1:
image = np.repeat(image, 3, -1)
imshape[-1] = 3
canvas = np.empty((targ_height * nx, targ_width * ny, 3))
for i, yi in enumerate(x_values):
for j, xi in enumerate(y_values):
canvas[(nx - i - 1) * targ_height:(nx - i) * targ_height,
j * targ_width:(j + 1) * targ_width, :] = np.reshape(image[i * ny + j], imshape)
return (canvas * 255.0).astype('uint8')
if __name__ == '__main__':
args = const.args
print("-------------------------------------------------------------------")
print("------------------argument for current experiment------------------")
print("-------------------------------------------------------------------")
for arg in vars(args):
print(arg, getattr(args, arg))
print("-------------------------------------------------------------------")
print(type(args.version), args.version)
if args.version == 0:
print("only running experiment once")
train_end2end(args, args.data_set, args.model_type,
args.motion_method, version=args.version, bg_ind=None, augment_opt="none")
else:
for s_version in range(args.version):
print("running experiment for version %d" % s_version)
train_end2end(args, args.data_set, args.model_type,
args.motion_method, version=s_version, bg_ind=None, augment_opt="none")
| 49.725191
| 142
| 0.576727
| 20,833
| 0.639638
| 0
| 0
| 0
| 0
| 0
| 0
| 5,099
| 0.156555
|
9a9d1d892502aafdc91f1a1eaee4fb13e479814b
| 10,781
|
py
|
Python
|
cellfinder_napari/detect.py
|
neuromusic/cellfinder-napari
|
9a58a3b2174c5cb4c740ace6373744b5bcc4cc3d
|
[
"BSD-3-Clause"
] | 7
|
2021-03-03T11:58:24.000Z
|
2021-12-24T08:40:12.000Z
|
cellfinder_napari/detect.py
|
neuromusic/cellfinder-napari
|
9a58a3b2174c5cb4c740ace6373744b5bcc4cc3d
|
[
"BSD-3-Clause"
] | 87
|
2021-03-08T18:58:26.000Z
|
2022-03-30T15:37:08.000Z
|
cellfinder_napari/detect.py
|
neuromusic/cellfinder-napari
|
9a58a3b2174c5cb4c740ace6373744b5bcc4cc3d
|
[
"BSD-3-Clause"
] | 5
|
2021-05-26T19:23:50.000Z
|
2022-03-06T13:03:13.000Z
|
import napari
from pathlib import Path
from magicgui import magicgui
from typing import List
from cellfinder_napari.utils import brainglobe_logo
# TODO:
# how to store & fetch pre-trained models?
# TODO: params to add
NETWORK_VOXEL_SIZES = [5, 1, 1]
CUBE_WIDTH = 50
CUBE_HEIGHT = 20
CUBE_DEPTH = 20
# If using ROI, how many extra planes to analyse
MIN_PLANES_ANALYSE = 0
def detect():
from math import ceil
# from fancylog import fancylog
# import cellfinder_napari as program_for_log
from napari.qt.threading import thread_worker
from cellfinder_core.main import main as cellfinder_run
from cellfinder_core.classify.cube_generator import get_cube_depth_min_max
from imlib.cells.cells import Cell
from .utils import cells_to_array
DEFAULT_PARAMETERS = dict(
voxel_size_z=5,
voxel_size_y=2,
voxel_size_x=2,
Soma_diameter=16.0,
ball_xy_size=6,
ball_z_size=15,
Ball_overlap=0.6,
Filter_width=0.2,
Threshold=10,
Cell_spread=1.4,
Max_cluster=100000,
Trained_model=Path.home(),
Start_plane=0,
End_plane=0,
Number_of_free_cpus=2,
Analyse_local=False,
Debug=False,
)
@magicgui(
header=dict(
widget_type="Label",
label=f'<h1><img src="{brainglobe_logo}"width="100">cellfinder</h1>',
),
detection_label=dict(
widget_type="Label",
label="<h3>Cell detection</h3>",
),
data_options=dict(
widget_type="Label",
label="<b>Data:</b>",
),
detection_options=dict(
widget_type="Label",
label="<b>Detection:</b>",
),
classification_options=dict(
widget_type="Label",
label="<b>Classification:</b>",
),
misc_options=dict(
widget_type="Label",
label="<b>Misc:</b>",
),
voxel_size_z=dict(
value=DEFAULT_PARAMETERS["voxel_size_z"],
label="Voxel size (z)",
step=0.1,
),
voxel_size_y=dict(
value=DEFAULT_PARAMETERS["voxel_size_y"],
label="Voxel size (y)",
step=0.1,
),
voxel_size_x=dict(
value=DEFAULT_PARAMETERS["voxel_size_x"],
label="Voxel size (x)",
step=0.1,
),
Soma_diameter=dict(
value=DEFAULT_PARAMETERS["Soma_diameter"], step=0.1
),
ball_xy_size=dict(
value=DEFAULT_PARAMETERS["ball_xy_size"], label="Ball filter (xy)"
),
ball_z_size=dict(
value=DEFAULT_PARAMETERS["ball_z_size"], label="Ball filter (z)"
),
Ball_overlap=dict(value=DEFAULT_PARAMETERS["Ball_overlap"], step=0.1),
Filter_width=dict(value=DEFAULT_PARAMETERS["Filter_width"], step=0.1),
Threshold=dict(value=DEFAULT_PARAMETERS["Threshold"], step=0.1),
Cell_spread=dict(value=DEFAULT_PARAMETERS["Cell_spread"], step=0.1),
Max_cluster=dict(
value=DEFAULT_PARAMETERS["Max_cluster"], min=0, max=10000000
),
Trained_model=dict(value=DEFAULT_PARAMETERS["Trained_model"]),
Start_plane=dict(
value=DEFAULT_PARAMETERS["Start_plane"], min=0, max=100000
),
End_plane=dict(
value=DEFAULT_PARAMETERS["End_plane"], min=0, max=100000
),
Number_of_free_cpus=dict(
value=DEFAULT_PARAMETERS["Number_of_free_cpus"]
),
Analyse_local=dict(
value=DEFAULT_PARAMETERS["Analyse_local"], label="Analyse local"
),
Debug=dict(value=DEFAULT_PARAMETERS["Debug"]),
# Classification_batch_size=dict(max=4096),
call_button=True,
persist=True,
reset_button=dict(widget_type="PushButton", text="Reset defaults"),
)
def widget(
header,
detection_label,
data_options,
viewer: napari.Viewer,
Signal_image: napari.layers.Image,
Background_image: napari.layers.Image,
voxel_size_z: float,
voxel_size_y: float,
voxel_size_x: float,
detection_options,
Soma_diameter: float,
ball_xy_size: float,
ball_z_size: float,
Ball_overlap: float,
Filter_width: float,
Threshold: int,
Cell_spread: float,
Max_cluster: int,
classification_options,
Trained_model: Path,
misc_options,
Start_plane: int,
End_plane: int,
Number_of_free_cpus: int,
Analyse_local: bool,
Debug: bool,
reset_button,
) -> List[napari.types.LayerDataTuple]:
"""
Parameters
----------
Signal_image : napari.layers.Image
Image layer containing the labelled cells
Background_image : napari.layers.Image
Image layer without labelled cells
voxel_size_z : float
Size of your voxels in the axial dimension
voxel_size_y : float
Size of your voxels in the y direction (top to bottom)
voxel_size_x : float
Size of your voxels in the x direction (left to right)
Soma_diameter : float
The expected in-plane soma diameter (microns)
ball_xy_size : float
Elliptical morphological in-plane filter size (microns)
ball_z_size : float
Elliptical morphological axial filter size (microns)
Ball_overlap : float
Fraction of the morphological filter needed to be filled
to retain a voxel
Filter_width : float
Laplacian of Gaussian filter width (as a fraction of soma diameter)
Threshold : int
Cell intensity threshold (as a multiple of noise above the mean)
Cell_spread : float
Cell spread factor (for splitting up cell clusters)
Max_cluster : int
Largest putative cell cluster (in cubic um) where splitting
should be attempted
Trained_model : Path
Trained model file path
Start_plane : int
First plane to process (to process a subset of the data)
End_plane : int
Last plane to process (to process a subset of the data)
Number_of_free_cpus : int
How many CPU cores to leave free
Analyse_local : bool
Only analyse planes around the current position
Debug : bool
Increase logging
reset_button :
Reset parameters to default
"""
def add_layers(points):
points, rejected = cells_to_array(points)
viewer.add_points(
rejected,
name="Rejected",
size=15,
n_dimensional=True,
opacity=0.6,
symbol="ring",
face_color="lightskyblue",
visible=False,
metadata=dict(point_type=Cell.UNKNOWN),
)
viewer.add_points(
points,
name="Detected",
size=15,
n_dimensional=True,
opacity=0.6,
symbol="ring",
face_color="lightgoldenrodyellow",
metadata=dict(point_type=Cell.CELL),
)
@thread_worker
def run(
signal,
background,
voxel_sizes,
Soma_diameter,
ball_xy_size,
ball_z_size,
Start_plane,
End_plane,
Ball_overlap,
Filter_width,
Threshold,
Cell_spread,
Max_cluster,
Trained_model,
Number_of_free_cpus,
# Classification_batch_size,
):
points = cellfinder_run(
signal,
background,
voxel_sizes,
soma_diameter=Soma_diameter,
ball_xy_size=ball_xy_size,
ball_z_size=ball_z_size,
start_plane=Start_plane,
end_plane=End_plane,
ball_overlap_fraction=Ball_overlap,
log_sigma_size=Filter_width,
n_sds_above_mean_thresh=Threshold,
soma_spread_factor=Cell_spread,
max_cluster_size=Max_cluster,
trained_model=Trained_model,
n_free_cpus=Number_of_free_cpus,
# batch_size=Classification_batch_size,
)
return points
if End_plane == 0:
End_plane = len(Signal_image.data)
voxel_sizes = (voxel_size_z, voxel_size_y, voxel_size_x)
if Trained_model == Path.home():
Trained_model = None
if Analyse_local:
current_plane = viewer.dims.current_step[0]
# so a reasonable number of cells in the plane are detected
planes_needed = MIN_PLANES_ANALYSE + int(
ceil((CUBE_DEPTH * NETWORK_VOXEL_SIZES[0]) / voxel_size_z)
)
Start_plane, End_plane = get_cube_depth_min_max(
current_plane, planes_needed
)
Start_plane = max(0, Start_plane)
End_plane = min(len(Signal_image.data), End_plane)
worker = run(
Signal_image.data,
Background_image.data,
voxel_sizes,
Soma_diameter,
ball_xy_size,
ball_z_size,
Start_plane,
End_plane,
Ball_overlap,
Filter_width,
Threshold,
Cell_spread,
Max_cluster,
Trained_model,
Number_of_free_cpus,
# Classification_batch_size,
)
worker.returned.connect(add_layers)
worker.start()
widget.header.value = (
"<p>Efficient cell detection in large images.</p>"
'<p><a href="https://cellfinder.info" style="color:gray;">Website</a></p>'
'<p><a href="https://docs.brainglobe.info/cellfinder/napari-plugin" style="color:gray;">Documentation</a></p>'
'<p><a href="https://github.com/brainglobe/cellfinder-napari" style="color:gray;">Source</a></p>'
'<p><a href="https://www.biorxiv.org/content/10.1101/2020.10.21.348771v2" style="color:gray;">Citation</a></p>'
"<p><small>For help, hover the cursor over each parameter.</small>"
)
widget.header.native.setOpenExternalLinks(True)
@widget.reset_button.changed.connect
def restore_defaults(event=None):
for name, value in DEFAULT_PARAMETERS.items():
getattr(widget, name).value = value
return widget
| 32.969419
| 119
| 0.576384
| 0
| 0
| 0
| 0
| 8,857
| 0.821538
| 0
| 0
| 3,419
| 0.317132
|
9a9de9279be39ea51b643d07bcacfa3cc557f3f2
| 1,414
|
py
|
Python
|
setup.py
|
paxtonfitzpatrick/nltools
|
9d52e2e1d665a21feb641ab16424e450aca0c971
|
[
"MIT"
] | 65
|
2018-08-26T19:39:11.000Z
|
2022-02-20T10:32:58.000Z
|
setup.py
|
paxtonfitzpatrick/nltools
|
9d52e2e1d665a21feb641ab16424e450aca0c971
|
[
"MIT"
] | 138
|
2018-08-15T22:31:45.000Z
|
2022-02-14T18:23:46.000Z
|
setup.py
|
paxtonfitzpatrick/nltools
|
9d52e2e1d665a21feb641ab16424e450aca0c971
|
[
"MIT"
] | 18
|
2018-08-23T16:52:35.000Z
|
2022-02-24T01:52:27.000Z
|
from setuptools import setup, find_packages
version = {}
with open("nltools/version.py") as f:
exec(f.read(), version)
with open("requirements.txt") as f:
requirements = f.read().splitlines()
extra_setuptools_args = dict(tests_require=["pytest"])
setup(
name="nltools",
version=version["__version__"],
author="Cosan Lab",
author_email="luke.j.chang@dartmouth.edu",
url="https://cosanlab.github.io/nltools",
python_requires=">=3.6",
install_requires=requirements,
extras_require={"interactive_plots": ["ipywidgets>=5.2.2"]},
packages=find_packages(exclude=["nltools/tests"]),
package_data={"nltools": ["resources/*"]},
include_package_data=True,
license="LICENSE.txt",
description="A Python package to analyze neuroimaging data",
long_description="nltools is a collection of python tools to perform "
"preprocessing, univariate GLMs, and predictive "
"multivariate modeling of neuroimaging data. It is the "
"analysis engine powering www.neuro-learn.org.",
keywords=["neuroimaging", "preprocessing", "analysis", "machine-learning"],
classifiers=[
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
],
**extra_setuptools_args
)
| 35.35
| 79
| 0.681047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 740
| 0.523338
|
9a9e35c047e006353fb6423b17d95459f785de56
| 4,028
|
py
|
Python
|
{{ cookiecutter.repo_name }}/src/config/config.py
|
johanngerberding/cookiecutter-data-science
|
db44c48cdce4886d42b610c04e758d758f834e32
|
[
"MIT"
] | null | null | null |
{{ cookiecutter.repo_name }}/src/config/config.py
|
johanngerberding/cookiecutter-data-science
|
db44c48cdce4886d42b610c04e758d758f834e32
|
[
"MIT"
] | null | null | null |
{{ cookiecutter.repo_name }}/src/config/config.py
|
johanngerberding/cookiecutter-data-science
|
db44c48cdce4886d42b610c04e758d758f834e32
|
[
"MIT"
] | null | null | null |
import os
import warnings
from dotenv import find_dotenv, load_dotenv
from yacs.config import CfgNode as ConfigurationNode
from pathlib import Path
# Please configure your own settings here #
# YACS overwrite these settings using YAML
__C = ConfigurationNode()
### EXAMPLE ###
"""
# data augmentation parameters with albumentations library
__C.DATASET.AUGMENTATION = ConfigurationNode()
__C.DATASET.AUGMENTATION.BLURRING_PROB = 0.25
__C.DATASET.AUGMENTATION.GAUSS_NOISE_PROB = 0.25
__C.DATASET.AUGMENTATION.GAUSS_VAR_LIMIT =(10.0, 40.0)
__C.DATASET.AUGMENTATION.BLUR_LIMIT = 7
...
# model backbone configs
__C.MODEL.BACKBONE = ConfigurationNode()
__C.MODEL.BACKBONE.NAME = 'mobilenet_v2'
__C.MODEL.BACKBONE.RGB = True
__C.MODEL.BACKBONE.PRETRAINED_PATH = 'C:/data-science/kaggle/bengali.ai/models/mobilenet_v2-b0353104.pth'
# model head configs
__C.MODEL.HEAD = ConfigurationNode()
__C.MODEL.HEAD.NAME = 'simple_head_module'
__C.MODEL.HEAD.ACTIVATION = 'leaky_relu'
__C.MODEL.HEAD.OUTPUT_DIMS = [168, 11, 7]
__C.MODEL.HEAD.INPUT_DIM = 1280 # mobilenet_v2
__C.MODEL.HEAD.HIDDEN_DIMS = [512, 256]
__C.MODEL.HEAD.BATCH_NORM = True
__C.MODEL.HEAD.DROPOUT = 0.4
"""
def get_cfg_defaults():
"""
Get a yacs CfgNode object with default values for my_project.
"""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern recommended by the YACS repo.
# It will be subsequently overwritten with local YAML.
return __C.clone()
def combine_cfgs(path_cfg_data: Path=None, path_cfg_override: Path=None):
"""
An internal facing routine thaat combined CFG in the order provided.
:param path_output: path to output files
:param path_cfg_data: path to path_cfg_data files
:param path_cfg_override: path to path_cfg_override actual
:return: cfg_base incorporating the overwrite.
"""
if path_cfg_data is not None:
path_cfg_data=Path(path_cfg_data)
if path_cfg_override is not None:
path_cfg_override=Path(path_cfg_override)
# Path order of precedence is:
# Priority 1, 2, 3, 4 respectively
# .env > other CFG YAML > data.yaml > default.yaml
# Load default lowest tier one:
# Priority 4:
cfg_base = get_cfg_defaults()
# Merge from the path_data
# Priority 3:
if path_cfg_data is not None and path_cfg_data.exists():
cfg_base.merge_from_file(path_cfg_data.absolute())
# Merge from other cfg_path files to further reduce effort
# Priority 2:
if path_cfg_override is not None and path_cfg_override.exists():
cfg_base.merge_from_file(path_cfg_override.absolute())
# Merge from .env
# Priority 1:
list_cfg = update_cfg_using_dotenv()
if list_cfg is not []:
cfg_base.merge_from_list(list_cfg)
return cfg_base
def update_cfg_using_dotenv() -> list:
"""
In case when there are dotenvs, try to return list of them.
# It is returning a list of hard overwrite.
:return: empty list or overwriting information
"""
# If .env not found, bail
if find_dotenv() == '':
warnings.warn(".env files not found. YACS config file merging aborted.")
return []
# Load env.
load_dotenv(find_dotenv(), verbose=True)
# Load variables
list_key_env = {
"DATASET.TRAIN_DATA_PATH",
"DATASET.VAL_DATA_PATH",
"MODEL.BACKBONE.PRETRAINED_PATH",
"MODEL.SOLVER.LOSS.LABELS_WEIGHTS_PATH"
}
# Instantiate return list.
path_overwrite_keys = []
# Go through the list of key to be overwritten.
for key in list_key_env:
# Get value from the env.
value = os.getenv("path_overwrite_keys")
# If it is none, skip. As some keys are only needed during training and others during the prediction stage.
if value is None:
continue
# Otherwise, adding the key and the value to the dictionary.
path_overwrite_keys.append(key)
path_overwrite_keys.append(value)
return path_overwrite_keys
| 30.984615
| 115
| 0.712512
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,536
| 0.629593
|
9a9e673814218a6b691d7522f64cfb8d20627d8f
| 475
|
py
|
Python
|
section_7/ex 30.py
|
thiagofreitascarneiro/Python-avancado-Geek-University
|
861b742ad6b30955fcbe63274b8cf8afc6ca028f
|
[
"MIT"
] | null | null | null |
section_7/ex 30.py
|
thiagofreitascarneiro/Python-avancado-Geek-University
|
861b742ad6b30955fcbe63274b8cf8afc6ca028f
|
[
"MIT"
] | null | null | null |
section_7/ex 30.py
|
thiagofreitascarneiro/Python-avancado-Geek-University
|
861b742ad6b30955fcbe63274b8cf8afc6ca028f
|
[
"MIT"
] | null | null | null |
list1 = []
list2 = []
list3 = []
cont = 0
while cont < 10:
valor = int(input('Digite um numero na lista 1: '))
list1.append(valor)
valor2 = int(input('Digite um numero na lista 2: '))
list2.append(valor2)
cont = cont + 1
if cont == 10:
for i in list1:
if i in list2:
if i not in list3:
list3.append(i)
print(list1)
print(list2)
print(f'Os númeoros que contem em ambos os vetores são: {list3}')
| 23.75
| 65
| 0.562105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 122
| 0.255765
|
9a9ee79fbb5396d6313eb8172811069d5e290bd2
| 7,693
|
py
|
Python
|
scripts/eval/eval.py
|
p0l0satik/PlaneDetector
|
60d7330537b90ff0ca74247cd6dac2ca7fc627bc
|
[
"MIT"
] | null | null | null |
scripts/eval/eval.py
|
p0l0satik/PlaneDetector
|
60d7330537b90ff0ca74247cd6dac2ca7fc627bc
|
[
"MIT"
] | null | null | null |
scripts/eval/eval.py
|
p0l0satik/PlaneDetector
|
60d7330537b90ff0ca74247cd6dac2ca7fc627bc
|
[
"MIT"
] | null | null | null |
import os
from shutil import rmtree
import cv2
import docker
import numpy as np
import open3d as o3d
from pypcd import pypcd
from src.metrics import metrics
from src.metrics.metrics import multi_value, mean
from src.parser import loaders, create_parser
UNSEGMENTED_COLOR = np.asarray([0, 0, 0], dtype=int)
algos = {
"ddpff": "ddpff:1.0"
}
all_plane_metrics = [
metrics.iou,
metrics.dice,
metrics.precision,
metrics.recall,
metrics.fScore
]
CLOUDS_DIR = "input"
PREDICTIONS_DIR = "output"
annot_sorters = {
'tum': lambda x: x,
'icl_tum': lambda x: int(x),
'icl': lambda x: x
}
def read_labels(annot_frame_path: str) -> np.array:
annot_image = cv2.imread(annot_frame_path)
label_colors = annot_image.reshape((annot_image.shape[0] * annot_image.shape[1], 3))
labels = np.zeros(label_colors.shape[0], dtype=int)
unique_colors = np.unique(label_colors, axis=0)
for index, color in enumerate(unique_colors):
color_indices = np.where(np.all(label_colors == color, axis=-1))[0]
if not np.array_equal(color, UNSEGMENTED_COLOR):
labels[color_indices] = index + 1
return labels
def predict_labels(algo_name: str):
if os.path.exists(PREDICTIONS_DIR):
rmtree(PREDICTIONS_DIR)
os.mkdir(PREDICTIONS_DIR)
current_dir_abs = os.path.abspath(os.path.curdir)
path_to_input = os.path.join(current_dir_abs, CLOUDS_DIR)
path_to_output = os.path.join(current_dir_abs, PREDICTIONS_DIR)
# for filename in os.listdir(path_to_input):
# folder_path = os.path.join(path_to_output, filename[:-4])
# os.mkdir(folder_path)
# pcd = o3d.io.read_point_cloud(os.path.join(path_to_input, filename))
# o3d.io.write_point_cloud(os.path.join(folder_path, filename), pcd)
# np.save(
# os.path.join(folder_path, "{}.npy".format(filename[:-4])),
# np.ones(np.asarray(pcd.points).shape[0], dtype=int)
# )
client = docker.from_env()
docker_image_name = algos[algo_name]
container = client.containers.run(
docker_image_name,
volumes=[
'{}:/app/build/input'.format(path_to_input),
'{}:/app/build/output'.format(path_to_output)
],
detach=True
)
for line in container.logs(stream=True):
print(line.strip())
def prepare_clouds(dataset_path: str, loader_name: str):
if os.path.exists(CLOUDS_DIR):
rmtree(CLOUDS_DIR)
os.mkdir(CLOUDS_DIR)
loader = loaders[loader_name](dataset_path)
for depth_frame_num in range(loader.get_frame_count()):
pcd_points = loader.read_pcd(depth_frame_num)
cloud_filepath = os.path.join(CLOUDS_DIR, "{:04d}.pcd".format(depth_frame_num))
# pcd = o3d.geometry.PointCloud()
# pcd.points = o3d.utility.Vector3dVector(pcd_points)
# o3d.io.write_point_cloud(cloud_filepath, pcd)
pc = pypcd.make_xyz_point_cloud(pcd_points)
pc.width = loader.cam_intrinsics.width
pc.height = loader.cam_intrinsics.height
pc.save_pcd(cloud_filepath, compression='binary')
def get_filepaths_for_dir(dir_path: str):
filenames = os.listdir(dir_path)
file_paths = [os.path.join(dir_path, filename) for filename in filenames]
return file_paths
def get_path_to_frames(annot_path: str, loader_name: str) -> [(str, str)]:
sort_by = annot_sorters[loader_name]
cloud_file_paths = sorted(get_filepaths_for_dir(CLOUDS_DIR), key=lambda x: sort_by(os.path.split(x)[-1][:-4]))
prediction_folders = sorted(get_filepaths_for_dir(PREDICTIONS_DIR), key=lambda x: sort_by(os.path.split(x)[-1]))
prediction_grouped_file_paths = [
list(filter(lambda x: x.endswith(".npy"), get_filepaths_for_dir(folder))) for folder in prediction_folders
]
annot_file_paths = sorted(get_filepaths_for_dir(annot_path), key=lambda x: sort_by(os.path.split(x)[-1][:-4]))
return zip(cloud_file_paths, annot_file_paths, prediction_grouped_file_paths)
def visualize_pcd_labels(pcd_points: np.array, labels: np.array, filename: str = None):
colors = np.concatenate([UNSEGMENTED_COLOR.astype(dtype=float).reshape(-1, 3), np.random.rand(np.max(labels), 3)])
pcd_for_vis = o3d.geometry.PointCloud()
pcd_for_vis.points = o3d.utility.Vector3dVector(pcd_points)
pcd_for_vis.paint_uniform_color([0, 0, 0])
pcd_for_vis.colors = o3d.utility.Vector3dVector(colors[labels])
if filename is None:
o3d.visualization.draw_geometries([pcd_for_vis])
else:
o3d.io.write_point_cloud(filename, pcd_for_vis)
def dump_info(info, file=None):
print(info)
if file is not None:
print(info, file=file)
def measure_algo(algo_name: str, annot_path: str, loader_name: str, log_file):
metrics_average = {metric.__name__: 0 for metric in all_plane_metrics}
dump_info("-------------Results for algo: '{}'--------------".format(algo_name), log_file)
predict_labels(algo_name)
for cloud_frame_path, annot_frame_path, prediction_group in get_path_to_frames(annot_path, loader_name):
pcd_points = np.asarray(o3d.io.read_point_cloud(cloud_frame_path).points)
gt_labels = read_labels(annot_frame_path)
# remove zero depth (for TUM)
zero_depth_mask = np.sum(pcd_points == 0, axis=-1) == 3
pcd_points = pcd_points[~zero_depth_mask]
gt_labels = gt_labels[~zero_depth_mask]
# Find the best annotation from algorithm for frame
max_mean_index = 0
max_mean = 0
for prediction_index, prediction_frame_path in enumerate(prediction_group):
pred_labels = np.load(prediction_frame_path)
# remove zero depth (for TUM)
pred_labels = pred_labels[~zero_depth_mask]
metric_res = mean(pcd_points, pred_labels, gt_labels, metrics.iou)
if metric_res > max_mean:
max_mean = metric_res
max_mean_index = prediction_index
# Load chosen predictions
chosen_prediction_path = prediction_group[max_mean_index]
pred_labels = np.load(chosen_prediction_path)
pred_labels = pred_labels[~zero_depth_mask]
# visualize_pcd_labels(pcd_points, pred_labels)
# Print metrics results
dump_info("********Result for frame: '{}'********".format(os.path.split(cloud_frame_path)[-1][:-4]), log_file)
dump_info(multi_value(pcd_points, pred_labels, gt_labels), log_file)
for metric in all_plane_metrics:
metric_res = mean(pcd_points, pred_labels, gt_labels, metric)
metrics_average[metric.__name__] += metric_res
dump_info("Mean {0}: {1}".format(metric.__name__, metric_res), log_file)
dump_info("--------------------------------------------------------", log_file)
dump_info("----------------Average of algo: '{}'----------------".format(algo_name), log_file)
for metric_name, sum_value in metrics_average.items():
dump_info(
"Average {0} for dataset is: {1}".format(metric_name, sum_value / len(os.listdir(CLOUDS_DIR))),
log_file
)
dump_info("--------------------------------------------------------", log_file)
dump_info("----------------End of algo: '{}'--------------------".format(algo_name), log_file)
dump_info("--------------------------------------------------------", log_file)
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
prepare_clouds(args.dataset_path, args.loader)
with open("results.txt", 'w') as log_file:
for algo_name in algos.keys():
measure_algo(algo_name, args.annotations_path, args.loader, log_file)
| 38.273632
| 118
| 0.66203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,337
| 0.173794
|
9a9f85fc451de9881426ccefc8e13f03669bb8d6
| 491
|
py
|
Python
|
cosmogrb/utils/fits_file.py
|
wematthias/cosmogrb
|
09852eb4e6e7315bbede507e19a2d57f1b927c3f
|
[
"BSD-2-Clause"
] | 3
|
2020-03-08T18:20:32.000Z
|
2022-03-10T17:27:26.000Z
|
cosmogrb/utils/fits_file.py
|
wematthias/cosmogrb
|
09852eb4e6e7315bbede507e19a2d57f1b927c3f
|
[
"BSD-2-Clause"
] | 11
|
2020-03-04T17:21:15.000Z
|
2020-06-09T12:20:00.000Z
|
cosmogrb/utils/fits_file.py
|
wematthias/cosmogrb
|
09852eb4e6e7315bbede507e19a2d57f1b927c3f
|
[
"BSD-2-Clause"
] | 5
|
2020-03-18T18:05:05.000Z
|
2022-03-21T16:06:38.000Z
|
from responsum.utils.fits_file import FITSFile, FITSExtension as FE
import pkg_resources
class FITSExtension(FE):
# I use __new__ instead of __init__ because I need to use the classmethod .from_columns instead of the
# constructor of fits.BinTableHDU
def __init__(self, data_tuple, header_tuple):
creator = "COSMOGRB v.%s" % (pkg_resources.get_distribution("cosmogrb").version)
super(FITSExtension, self).__init__(data_tuple, header_tuple, creator=creator)
| 32.733333
| 106
| 0.757637
| 399
| 0.812627
| 0
| 0
| 0
| 0
| 0
| 0
| 160
| 0.325866
|
9a9fb2cd7765697e57d5b413e5af8232b235432f
| 121,557
|
py
|
Python
|
mi/instrument/seabird/sbe26plus/driver.py
|
rhan1498/marine-integrations
|
ad94c865e0e4cc7c8fd337870410c74b57d5c826
|
[
"BSD-2-Clause"
] | null | null | null |
mi/instrument/seabird/sbe26plus/driver.py
|
rhan1498/marine-integrations
|
ad94c865e0e4cc7c8fd337870410c74b57d5c826
|
[
"BSD-2-Clause"
] | null | null | null |
mi/instrument/seabird/sbe26plus/driver.py
|
rhan1498/marine-integrations
|
ad94c865e0e4cc7c8fd337870410c74b57d5c826
|
[
"BSD-2-Clause"
] | null | null | null |
"""
@package mi.instrument.seabird.sbe26plus.ooicore.driver
@file /Users/unwin/OOI/Workspace/code/marine-integrations/mi/instrument/seabird/sbe26plus/ooicore/driver.py
@author Roger Unwin
@brief Driver for the ooicore
Release notes:
None.
"""
__author__ = 'Roger Unwin'
__license__ = 'Apache 2.0'
import re
import time
import string
from mi.core.log import get_logger ; log = get_logger()
from mi.instrument.seabird.driver import SeaBirdInstrumentDriver
from mi.instrument.seabird.driver import SeaBirdProtocol
from mi.instrument.seabird.driver import NEWLINE
from mi.instrument.seabird.driver import ESCAPE
from mi.core.util import dict_equal
from mi.core.common import BaseEnum
from mi.core.instrument.instrument_fsm import InstrumentFSM
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import DriverParameter
from mi.core.instrument.protocol_param_dict import ParameterDictVisibility
from mi.core.instrument.data_particle import DataParticle, DataParticleKey, CommonDataParticleType
from mi.core.instrument.protocol_param_dict import ParameterDictType
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.instrument.chunker import StringChunker
from mi.core.exceptions import InstrumentParameterException
from mi.core.exceptions import SampleException
from mi.core.exceptions import InstrumentStateException
from mi.core.exceptions import InstrumentProtocolException
from mi.core.exceptions import InstrumentTimeoutException
from pyon.agent.agent import ResourceAgentState
# default timeout.
TIMEOUT = 60 # setsampling takes longer than 10 on bad internet days.
TIDE_REGEX = r'tide: start time = +(\d+ [A-Za-z]{3} \d{4} \d+:\d+:\d+), p = +([\-\d\.]+), pt = +([\-\d\.]+), t = +([\-\d\.]+)\r\n'
TIDE_REGEX_MATCHER = re.compile(TIDE_REGEX)
WAVE_REGEX = r'(wave: start time =.*?wave: end burst\r\n)'
WAVE_REGEX_MATCHER = re.compile(WAVE_REGEX, re.DOTALL)
STATS_REGEX = r'(deMeanTrend.*?H1/100 = [\d\.e+]+\r\n)'
STATS_REGEX_MATCHER = re.compile(STATS_REGEX, re.DOTALL)
TS_REGEX = r'( +)([\-\d\.]+) +([\-\d\.]+) +([\-\d\.]+)\r\n'
TS_REGEX_MATCHER = re.compile(TS_REGEX)
DC_REGEX = r'(Pressure coefficients.+?)TA3 = [\d+e\.].+?\r\n'
DC_REGEX_MATCHER = re.compile(DC_REGEX, re.DOTALL)
DS_REGEX = r'(SBE 26plus V.+?)logging = [\w, ].+?\r\n'
DS_REGEX_MATCHER = re.compile(DS_REGEX, re.DOTALL)
###
# Driver Constant Definitions
###
class ScheduledJob(BaseEnum):
ACQUIRE_STATUS = 'acquire_status'
CALIBRATION_COEFFICIENTS = 'calibration_coefficients'
CLOCK_SYNC = 'clock_sync'
class DataParticleType(BaseEnum):
RAW = CommonDataParticleType.RAW
TIDE_PARSED = 'presf_tide_measurement'
WAVE_BURST = 'presf_wave_burst'
DEVICE_STATUS = 'presf_operating_status'
DEVICE_CALIBRATION = 'presf_calibration_coefficients'
STATISTICS = 'presf_wave_statistics'
class InstrumentCmds(BaseEnum):
"""
Device specific commands
Represents the commands the driver implements and the string that must be sent to the instrument to
execute the command.
"""
SETSAMPLING = 'setsampling'
DISPLAY_STATUS = 'ds'
QUIT_SESSION = 'qs'
DISPLAY_CALIBRATION = 'dc'
START_LOGGING = 'start'
STOP_LOGGING = 'stop'
SET_TIME = 'settime'
SET = 'set'
GET = 'get'
TAKE_SAMPLE = 'ts'
SEND_LAST_SAMPLE = "sl"
class ProtocolState(BaseEnum):
"""
Protocol states
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
class ProtocolEvent(BaseEnum):
"""
Protocol events
Extends protocol events to the set defined in the base class.
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
GET = DriverEvent.GET
SET = DriverEvent.SET
DISCOVER = DriverEvent.DISCOVER
### Common driver commands, should these be promoted? What if the command isn't supported?
ACQUIRE_SAMPLE = DriverEvent.ACQUIRE_SAMPLE # TS
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE # START
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE # DTOP
ACQUIRE_STATUS = DriverEvent.ACQUIRE_STATUS # DS
ACQUIRE_CONFIGURATION = "PROTOCOL_EVENT_ACQUIRE_CONFIGURATION" # DC
SEND_LAST_SAMPLE = "PROTOCOL_EVENT_SEND_LAST_SAMPLE" # SL
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
PING_DRIVER = DriverEvent.PING_DRIVER
SETSAMPLING = 'PROTOCOL_EVENT_SETSAMPLING'
QUIT_SESSION = 'PROTOCOL_EVENT_QUIT_SESSION'
CLOCK_SYNC = DriverEvent.CLOCK_SYNC
# Different event because we don't want to expose this as a capability
SCHEDULED_CLOCK_SYNC = 'PROTOCOL_EVENT_SCHEDULED_CLOCK_SYNC'
class Capability(BaseEnum):
"""
Protocol events that should be exposed to users (subset of above).
"""
ACQUIRE_SAMPLE = ProtocolEvent.ACQUIRE_SAMPLE
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
ACQUIRE_CONFIGURATION = ProtocolEvent.ACQUIRE_CONFIGURATION
SEND_LAST_SAMPLE = ProtocolEvent.SEND_LAST_SAMPLE
QUIT_SESSION = ProtocolEvent.QUIT_SESSION
CLOCK_SYNC = ProtocolEvent.CLOCK_SYNC
class Parameter(DriverParameter):
"""
Device parameters
"""
# DS
DEVICE_VERSION = 'DEVICE_VERSION' # str,
SERIAL_NUMBER = 'SERIAL_NUMBER' # str,
DS_DEVICE_DATE_TIME = 'DateTime' # str for now, later ***
USER_INFO = 'USERINFO' # str,
QUARTZ_PRESSURE_SENSOR_SERIAL_NUMBER = 'QUARTZ_PRESSURE_SENSOR_SERIAL_NUMBER' # float,
QUARTZ_PRESSURE_SENSOR_RANGE = 'QUARTZ_PRESSURE_SENSOR_RANGE' # float,
EXTERNAL_TEMPERATURE_SENSOR = 'ExternalTemperature' # bool,
CONDUCTIVITY = 'CONDUCTIVITY' # bool,
IOP_MA = 'IOP_MA' # float,
VMAIN_V = 'VMAIN_V' # float,
VLITH_V = 'VLITH_V' # float,
LAST_SAMPLE_P = 'LAST_SAMPLE_P' # float,
LAST_SAMPLE_T = 'LAST_SAMPLE_T' # float,
LAST_SAMPLE_S = 'LAST_SAMPLE_S' # float,
# DS/SETSAMPLING
TIDE_INTERVAL = 'TIDE_INTERVAL' # int,
TIDE_MEASUREMENT_DURATION = 'TIDE_MEASUREMENT_DURATION' # int,
TIDE_SAMPLES_BETWEEN_WAVE_BURST_MEASUREMENTS = 'TIDE_SAMPLES_BETWEEN_WAVE_BURST_MEASUREMENTS' # int,
WAVE_SAMPLES_PER_BURST = 'WAVE_SAMPLES_PER_BURST' # float,
WAVE_SAMPLES_SCANS_PER_SECOND = 'WAVE_SAMPLES_SCANS_PER_SECOND' # 4.0 = 0.25
USE_START_TIME = 'USE_START_TIME' # bool,
USE_STOP_TIME = 'USE_STOP_TIME' # bool,
TXWAVESTATS = 'TXWAVESTATS' # bool,
TIDE_SAMPLES_PER_DAY = 'TIDE_SAMPLES_PER_DAY' # float,
WAVE_BURSTS_PER_DAY = 'WAVE_BURSTS_PER_DAY' # float,
MEMORY_ENDURANCE = 'MEMORY_ENDURANCE' # float,
NOMINAL_ALKALINE_BATTERY_ENDURANCE = 'NOMINAL_ALKALINE_BATTERY_ENDURANCE' # float,
TOTAL_RECORDED_TIDE_MEASUREMENTS = 'TOTAL_RECORDED_TIDE_MEASUREMENTS' # float,
TOTAL_RECORDED_WAVE_BURSTS = 'TOTAL_RECORDED_WAVE_BURSTS' # float,
TIDE_MEASUREMENTS_SINCE_LAST_START = 'TIDE_MEASUREMENTS_SINCE_LAST_START' # float,
WAVE_BURSTS_SINCE_LAST_START = 'WAVE_BURSTS_SINCE_LAST_START' # float,
TXREALTIME = 'TxTide' # bool,
TXWAVEBURST = 'TxWave' # bool,
NUM_WAVE_SAMPLES_PER_BURST_FOR_WAVE_STASTICS = 'NUM_WAVE_SAMPLES_PER_BURST_FOR_WAVE_STASTICS' # int,
USE_MEASURED_TEMP_AND_CONDUCTIVITY_FOR_DENSITY_CALC = 'USE_MEASURED_TEMP_AND_CONDUCTIVITY_FOR_DENSITY_CALC' # bool,
AVERAGE_WATER_TEMPERATURE_ABOVE_PRESSURE_SENSOR = 'AVERAGE_WATER_TEMPERATURE_ABOVE_PRESSURE_SENSOR'
AVERAGE_SALINITY_ABOVE_PRESSURE_SENSOR = 'AVERAGE_SALINITY_ABOVE_PRESSURE_SENSOR'
PRESSURE_SENSOR_HEIGHT_FROM_BOTTOM = 'PRESSURE_SENSOR_HEIGHT_FROM_BOTTOM' # float,
SPECTRAL_ESTIMATES_FOR_EACH_FREQUENCY_BAND = 'SPECTRAL_ESTIMATES_FOR_EACH_FREQUENCY_BAND' # int,
MIN_ALLOWABLE_ATTENUATION = 'MIN_ALLOWABLE_ATTENUATION' # float,
MIN_PERIOD_IN_AUTO_SPECTRUM = 'MIN_PERIOD_IN_AUTO_SPECTRUM' # float,
MAX_PERIOD_IN_AUTO_SPECTRUM = 'MAX_PERIOD_IN_AUTO_SPECTRUM' # float,
HANNING_WINDOW_CUTOFF = 'HANNING_WINDOW_CUTOFF' # float,
SHOW_PROGRESS_MESSAGES = 'SHOW_PROGRESS_MESSAGES' # bool,
STATUS = 'STATUS' # str,
LOGGING = 'LOGGING' # bool,
# Device prompts.
class Prompt(BaseEnum):
"""
sbe26plus io prompts.
"""
COMMAND = 'S>'
BAD_COMMAND = '? cmd S>'
AUTOSAMPLE = 'S>'
CONFIRMATION_PROMPT = 'proceed Y/N ?'
###############################################################################
# Data Particles
################################################################################
# presf_tide_measurement
class SBE26plusTideSampleDataParticleKey(BaseEnum):
TIMESTAMP = "date_time_string"
PRESSURE = "absolute_pressure" # p = calculated and stored pressure (psia).
PRESSURE_TEMP = "pressure_temp" # pt = calculated pressure temperature (not stored) (C).
TEMPERATURE = "seawater_temperature" # t = calculated and stored temperature (C).
class SBE26plusTideSampleDataParticle(DataParticle):
"""
Routines for parsing raw data into a data particle structure. Override
the building of values, and the rest should come along for free.
"""
_data_particle_type = DataParticleType.TIDE_PARSED
def _build_parsed_values(self):
"""
Take something in the autosample format and split it into
values with appropriate tags
@throws SampleException If there is a problem with sample creation
"""
log.debug("in SBE26plusTideSampleDataParticle._build_parsed_values")
match1 = TIDE_REGEX_MATCHER.match(self.raw_data) ## Tide sample from streaming
match2 = TS_REGEX_MATCHER.match(self.raw_data) ## Tide sample from TS command
if not (match1 or match2):
raise SampleException("No regex match of parsed sample data: [%s]" % self.raw_data)
if(match1):
match = match1
else:
match = match2
# initialize
timestamp = None
pressure = None
pressure_temp = None
temperature = None
try:
# Only streaming outputs a timestamp
text_timestamp = None
if(match1):
text_timestamp = match.group(1)
py_timestamp = time.strptime(text_timestamp, "%d %b %Y %H:%M:%S")
self.set_internal_timestamp(unix_time=time.mktime(py_timestamp))
pressure = float(match.group(2))
pressure_temp = float(match.group(3))
temperature = float(match.group(4))
except ValueError:
raise SampleException("ValueError while decoding floats in data: [%s]" %
self.raw_data)
result = [{DataParticleKey.VALUE_ID: SBE26plusTideSampleDataParticleKey.TIMESTAMP,
DataParticleKey.VALUE: text_timestamp},
{DataParticleKey.VALUE_ID: SBE26plusTideSampleDataParticleKey.PRESSURE,
DataParticleKey.VALUE: pressure},
{DataParticleKey.VALUE_ID: SBE26plusTideSampleDataParticleKey.PRESSURE_TEMP,
DataParticleKey.VALUE: pressure_temp},
{DataParticleKey.VALUE_ID: SBE26plusTideSampleDataParticleKey.TEMPERATURE,
DataParticleKey.VALUE: temperature}]
return result
# presf_wave_burst
class SBE26plusWaveBurstDataParticleKey(BaseEnum):
TIMESTAMP = "date_time_string" # start time of wave measurement.
PTFREQ = "ptemp_frequency" # ptfreq = pressure temperature frequency (Hz);
PTRAW = "absolute_pressure_burst" # calculated pressure temperature number
class SBE26plusWaveBurstDataParticle(DataParticle):
"""
Routines for parsing raw data into a data particle structure. Override
the building of values, and the rest should come along for free.
"""
_data_particle_type = DataParticleType.WAVE_BURST
def _build_parsed_values(self):
"""
Take something in the autosample format and split it into
values with appropriate tags
@throws SampleException If there is a problem with sample creation
"""
start_time_pat = r'wave: start time = +(\d+ [A-Za-z]{3} \d{4} \d+:\d+:\d+)'
start_time_matcher = re.compile(start_time_pat)
ptfreq_pat = r'wave: ptfreq = ([\d\.]+)'
ptfreq_matcher = re.compile(ptfreq_pat)
ptraw_pat = r' *(-?\d+\.\d+)'
ptraw_matcher = re.compile(ptraw_pat)
# initialize
timestamp = None
ptfreq = None
ptraw = []
for line in self.raw_data.split(NEWLINE):
log.debug("SBE26plusWaveBurstDataParticle._build_parsed_values LINE = " + repr(line))
matched = False
# skip blank lines
if len(line) == 0:
matched = True
match = start_time_matcher.match(line)
if match:
matched = True
try:
text_timestamp = match.group(1)
py_timestamp = time.strptime(text_timestamp, "%d %b %Y %H:%M:%S")
self.set_internal_timestamp(unix_time=time.mktime(py_timestamp))
except ValueError:
raise SampleException("ValueError while decoding floats in data: [%s]" %
self.raw_data)
match = ptfreq_matcher.match(line)
if match:
matched = True
try:
ptfreq = float(match.group(1))
except ValueError:
raise SampleException("ValueError while decoding floats in data: [%s]" %
self.raw_data)
match = ptraw_matcher.match(line)
if match:
matched = True
try:
ptraw.append(float(match.group(1)))
except ValueError:
raise SampleException("ValueError while decoding floats in data: [%s]" %
self.raw_data)
if 'wave: end burst' in line:
matched = True
log.debug("End of record detected")
if False == matched:
raise SampleException("No regex match of parsed sample data: ROW: [%s]" % line)
result = [{DataParticleKey.VALUE_ID: SBE26plusWaveBurstDataParticleKey.TIMESTAMP,
DataParticleKey.VALUE: text_timestamp},
{DataParticleKey.VALUE_ID: SBE26plusWaveBurstDataParticleKey.PTFREQ,
DataParticleKey.VALUE: ptfreq},
{DataParticleKey.VALUE_ID: SBE26plusWaveBurstDataParticleKey.PTRAW,
DataParticleKey.VALUE: ptraw}]
return result
# presf_wave_statistics
class SBE26plusStatisticsDataParticleKey(BaseEnum):
# deMeanTrend
DEPTH = "depth"
TEMPERATURE = "temperature"
SALINITY = "salinity"
DENSITY = "density"
# Auto-Spectrum Statistics:
N_AGV_BAND = "n_avg_band"
TOTAL_VARIANCE = "ass_total_variance"
TOTAL_ENERGY = "ass_total_energy"
SIGNIFICANT_PERIOD = "ass_sig_wave_period"
SIGNIFICANT_WAVE_HEIGHT = "ass_sig_wave_height"
# Time Series Statistics:
TSS_WAVE_INTEGRATION_TIME = "tss_wave_integration_time"
TSS_NUMBER_OF_WAVES = "tss_number_of_waves"
TSS_TOTAL_VARIANCE = "tss_total_variance"
TSS_TOTAL_ENERGY = "tss_total_energy"
TSS_AVERAGE_WAVE_HEIGHT = "tss_avg_wave_height"
TSS_AVERAGE_WAVE_PERIOD = "tss_avg_wave_period"
TSS_MAXIMUM_WAVE_HEIGHT = "tss_max_wave_height"
TSS_SIGNIFICANT_WAVE_HEIGHT = "tss_sig_wave_height"
TSS_SIGNIFICANT_WAVE_PERIOD = "tss_sig_wave_period"
TSS_H1_10 = "tss_10_wave_height"
TSS_H1_100 = "tss_1_wave_height"
class SBE26plusStatisticsDataParticle(DataParticle):
"""
Routines for parsing raw data into a data particle structure. Override
the building of values, and the rest should come along for free.
"""
_data_particle_type = DataParticleType.STATISTICS
class StatisticType(BaseEnum):
NONE = 0
AUTO = 1
TSS = 2
def _build_parsed_values(self):
"""
Take something in the autosample format and split it into
values with appropriate tags
@throws SampleException If there is a problem with sample creation
"""
dtsd_matcher = re.compile(r'depth = +([\d\.e+-]+), temperature = +([\d\.e+-]+), salinity = +([\d\.e+-]+), density = +([\d\.e+-]+)')
#going to err on the side of VERBOSE methinks...
single_var_matchers = {
"nAvgBand": re.compile(r' nAvgBand = (\d+)'),
"total variance": re.compile(r' total variance = ([\d\.e+-]+)'),
"total energy": re.compile(r' total energy = ([\d\.e+-]+)'),
"significant period": re.compile(r' significant period = ([\d\.e+-]+)'),
"a significant wave height":re.compile(r' significant wave height = ([\d\.e+-]+)'),
"wave integration time": re.compile(r' wave integration time = (\d+)'),
"number of waves": re.compile(r' number of waves = (\d+)'),
"total variance": re.compile(r' total variance = ([\d\.e+-]+)'),
"total energy": re.compile(r' total energy = ([\d\.e+-]+)'),
"average wave height": re.compile(r' average wave height = ([\d\.e+-]+)'),
"average wave period": re.compile(r' average wave period = ([\d\.e+-]+)'),
"maximum wave height": re.compile(r' maximum wave height = ([\d\.e+-]+)'),
"significant wave height": re.compile(r' significant wave height = ([\d\.e+-]+)'),
"t significant wave period":re.compile(r' significant wave period = ([\d\.e+-]+)'),
"H1/10": re.compile(r' H1/10 = ([\d\.e+-]+)'),
"H1/100": re.compile(r' H1/100 = ([\d\.e+-]+)')
}
# Initialize
depth = None
temperature = None
salinity = None
density = None
single_var_matches = {
"nAvgBand": None,
"total variance": None,
"total energy": None,
"significant period": None,
"significant wave height": None,
"wave integration time": None,
"number of waves": None,
"total variance": None,
"total energy": None,
"average wave height": None,
"average wave period": None,
"maximum wave height": None,
"t significant wave height":None,
"t significant wave period":None,
"t total variance": None,
"t total energy": None,
"H1/10": None,
"H1/100": None
}
stat_type = self.StatisticType.NONE
for line in self.raw_data.split(NEWLINE):
if 'Auto-Spectrum Statistics:' in line:
stat_type = self.StatisticType.AUTO
elif 'Time Series Statistics:' in line:
stat_type = self.StatisticType.TSS
match = dtsd_matcher.match(line)
if match:
depth = float(match.group(1))
temperature = float(match.group(2))
salinity = float(match.group(3))
density = float(match.group(4))
for (key, matcher) in single_var_matchers.items():
match = single_var_matchers[key].match(line)
if match:
if key in ["nAvgBand", "wave integration time", "number of waves"]:
single_var_matches[key] = int(match.group(1))
elif key in ["significant wave height", "significant wave period", "total variance", "total energy"] and stat_type == self.StatisticType.TSS:
single_var_matches["t " + key] = float(match.group(1))
else:
single_var_matches[key] = float(match.group(1))
result = [{DataParticleKey.VALUE_ID: SBE26plusStatisticsDataParticleKey.DEPTH,
DataParticleKey.VALUE: depth},
{DataParticleKey.VALUE_ID: SBE26plusStatisticsDataParticleKey.TEMPERATURE,
DataParticleKey.VALUE: temperature},
{DataParticleKey.VALUE_ID: SBE26plusStatisticsDataParticleKey.SALINITY,
DataParticleKey.VALUE: salinity},
{DataParticleKey.VALUE_ID: SBE26plusStatisticsDataParticleKey.DENSITY,
DataParticleKey.VALUE: density},
{DataParticleKey.VALUE_ID: SBE26plusStatisticsDataParticleKey.N_AGV_BAND,
DataParticleKey.VALUE: single_var_matches["nAvgBand"]},
{DataParticleKey.VALUE_ID: SBE26plusStatisticsDataParticleKey.TOTAL_VARIANCE,
DataParticleKey.VALUE: single_var_matches["total variance"]},
{DataParticleKey.VALUE_ID: SBE26plusStatisticsDataParticleKey.TOTAL_ENERGY,
DataParticleKey.VALUE: single_var_matches["total energy"]},
{DataParticleKey.VALUE_ID: SBE26plusStatisticsDataParticleKey.SIGNIFICANT_PERIOD,
DataParticleKey.VALUE: single_var_matches["significant period"]},
{DataParticleKey.VALUE_ID: SBE26plusStatisticsDataParticleKey.SIGNIFICANT_WAVE_HEIGHT,
DataParticleKey.VALUE: single_var_matches["significant wave height"]},
{DataParticleKey.VALUE_ID: SBE26plusStatisticsDataParticleKey.TSS_WAVE_INTEGRATION_TIME,
DataParticleKey.VALUE: single_var_matches["wave integration time"]},
{DataParticleKey.VALUE_ID: SBE26plusStatisticsDataParticleKey.TSS_NUMBER_OF_WAVES,
DataParticleKey.VALUE: single_var_matches["number of waves"]},
{DataParticleKey.VALUE_ID: SBE26plusStatisticsDataParticleKey.TSS_TOTAL_VARIANCE,
DataParticleKey.VALUE: single_var_matches["t total variance"]},
{DataParticleKey.VALUE_ID: SBE26plusStatisticsDataParticleKey.TSS_TOTAL_ENERGY,
DataParticleKey.VALUE: single_var_matches["t total energy"]},
{DataParticleKey.VALUE_ID: SBE26plusStatisticsDataParticleKey.TSS_AVERAGE_WAVE_HEIGHT,
DataParticleKey.VALUE: single_var_matches["average wave height"]},
{DataParticleKey.VALUE_ID: SBE26plusStatisticsDataParticleKey.TSS_AVERAGE_WAVE_PERIOD,
DataParticleKey.VALUE: single_var_matches["average wave period"]},
{DataParticleKey.VALUE_ID: SBE26plusStatisticsDataParticleKey.TSS_MAXIMUM_WAVE_HEIGHT,
DataParticleKey.VALUE: single_var_matches["maximum wave height"]},
{DataParticleKey.VALUE_ID: SBE26plusStatisticsDataParticleKey.TSS_SIGNIFICANT_WAVE_HEIGHT,
DataParticleKey.VALUE: single_var_matches["t significant wave height"]},
{DataParticleKey.VALUE_ID: SBE26plusStatisticsDataParticleKey.TSS_SIGNIFICANT_WAVE_PERIOD,
DataParticleKey.VALUE: single_var_matches["t significant wave period"]},
{DataParticleKey.VALUE_ID: SBE26plusStatisticsDataParticleKey.TSS_H1_10,
DataParticleKey.VALUE: single_var_matches["H1/10"]},
{DataParticleKey.VALUE_ID: SBE26plusStatisticsDataParticleKey.TSS_H1_100,
DataParticleKey.VALUE: single_var_matches["H1/100"]}]
return result
# presf_calibration_coefficients
class SBE26plusDeviceCalibrationDataParticleKey(BaseEnum):
PCALDATE = 'calibration_date_pressure' # tuple,
PU0 = 'press_coeff_pu0' # float,
PY1 = 'press_coeff_py1' # float,
PY2 = 'press_coeff_py2' # float,
PY3 = 'press_coeff_py3' # float,
PC1 = 'press_coeff_pc1' # float,
PC2 = 'press_coeff_pc2' # float,
PC3 = 'press_coeff_pc3' # float,
PD1 = 'press_coeff_pd1' # float,
PD2 = 'press_coeff_pd2' # float,
PT1 = 'press_coeff_pt1' # float,
PT2 = 'press_coeff_pt2' # float,
PT3 = 'press_coeff_pt3' # float,
PT4 = 'press_coeff_pt4' # float,
FACTORY_M = 'press_coeff_m' # float,
FACTORY_B = 'press_coeff_b' # float,
POFFSET = 'press_coeff_poffset' # float,
TCALDATE = 'calibration_date_temperature' # string,
TA0 = 'temp_coeff_ta0' # float,
TA1 = 'temp_coeff_ta1' # float,
TA2 = 'temp_coeff_ta2' # float,
TA3 = 'temp_coeff_ta3' # float,
CCALDATE = 'calibration_date_cond' # tuple,
CG = 'cond_coeff_cg' # float,
CH = 'cond_coeff_ch' # float,
CI = 'cond_coeff_ci' # float,
CJ = 'cond_coeff_cj' # float,
CTCOR = 'cond_coeff_ctcor' # float,
CPCOR = 'cond_coeff_cpcor' # float,
CSLOPE = 'cond_coeff_cslope' # float,
class SBE26plusDeviceCalibrationDataParticle(DataParticle):
"""
Routines for parsing raw data into a data particle structure. Override
the building of values, and the rest should come along for free.
"""
_data_particle_type = DataParticleType.DEVICE_CALIBRATION
def _build_parsed_values(self):
"""
Take something in the autosample format and split it into
values with appropriate tags
@throws SampleException If there is a problem with sample creation
"""
log.debug("in SBE26plusDeviceCalibrationDataParticle._build_parsed_values")
single_var_matchers = {
SBE26plusDeviceCalibrationDataParticleKey.PCALDATE: (
re.compile(r'Pressure coefficients: +(\d+-[a-zA-Z]+-\d+)'),
lambda match : match.group(1)
),
SBE26plusDeviceCalibrationDataParticleKey.PU0: (
re.compile(r' +U0 = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.PY1: (
re.compile(r' +Y1 = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.PY2: (
re.compile(r' +Y2 = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.PY3: (
re.compile(r' +Y3 = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.PC1: (
re.compile(r' +C1 = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.PC2: (
re.compile(r' +C2 = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.PC3: (
re.compile(r' +C3 = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.PD1: (
re.compile(r' +D1 = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.PD2: (
re.compile(r' +D2 = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.PT1: (
re.compile(r' +T1 = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.PT2: (
re.compile(r' +T2 = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.PT3: (
re.compile(r' +T3 = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.PT4: (
re.compile(r' +T4 = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.FACTORY_M: (
re.compile(r' +M = ([\d\.]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.FACTORY_B: (
re.compile(r' +B = ([\d\.]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.POFFSET: (
re.compile(r' +OFFSET = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.TCALDATE: (
re.compile(r'Temperature coefficients: +(\d+-[a-zA-Z]+-\d+)'),
lambda match : str(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.TA0: (
re.compile(r' +TA0 = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.TA1: (
re.compile(r' +TA1 = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.TA2: (
re.compile(r' +TA2 = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.TA3: (
re.compile(r' +TA3 = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.CCALDATE: (
re.compile(r'Conductivity coefficients: +(\d+-[a-zA-Z]+-\d+)'),
lambda match : str(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.CG: (
re.compile(r' +CG = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.CH: (
re.compile(r' +CH = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.CI: (
re.compile(r' +CI = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.CJ: (
re.compile(r' +CJ = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.CTCOR: (
re.compile(r' +CTCOR = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.CPCOR: (
re.compile(r' +CPCOR = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceCalibrationDataParticleKey.CSLOPE: (
re.compile(r' +CSLOPE = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
}
result = [] # Final storage for particle
vals = {} # intermediate storage for particle values so they can be set to null first.
for (key, (matcher, l_func)) in single_var_matchers.iteritems():
vals[key] = None
for line in self.raw_data.split(NEWLINE):
for (key, (matcher, l_func)) in single_var_matchers.iteritems():
match = matcher.match(line)
if match:
vals[key] = l_func(match)
for (key, val) in vals.iteritems():
result.append({DataParticleKey.VALUE_ID: key, DataParticleKey.VALUE: val})
return result
# presf_operating_status
class SBE26plusDeviceStatusDataParticleKey(BaseEnum):
# DS
DEVICE_VERSION = 'firmware_version' # str,
SERIAL_NUMBER = 'serial_number' # str,
DS_DEVICE_DATE_TIME = 'date_time_string' # str for now, later ***
USER_INFO = 'user_info' # str,
QUARTZ_PRESSURE_SENSOR_SERIAL_NUMBER = 'quartz_pressure_sensor_serial_number' # float,
QUARTZ_PRESSURE_SENSOR_RANGE = 'pressure_sensor_range' # float,
EXTERNAL_TEMPERATURE_SENSOR = 'external_temperature_sensor' # bool,
CONDUCTIVITY = 'external_conductivity_sensor' # bool,
IOP_MA = 'operational_current' # float,
VMAIN_V = 'battery_voltage_main' # float,
VLITH_V = 'battery_voltage_lithium' # float,
LAST_SAMPLE_P = 'last_sample_absolute_press' # float,
LAST_SAMPLE_T = 'last_sample_temp' # float,
LAST_SAMPLE_S = 'last_sample_saln' # float,
# DS/SETSAMPLING
TIDE_INTERVAL = 'tide_measurement_interval' # int,
TIDE_MEASUREMENT_DURATION = 'tide_measurement_duration' # int,
TIDE_SAMPLES_BETWEEN_WAVE_BURST_MEASUREMENTS = 'wave_samples_between_tide_measurement' # int,
WAVE_SAMPLES_PER_BURST = 'wave_samples_per_burst' # float,
WAVE_SAMPLES_SCANS_PER_SECOND = 'wave_samples_scans_per_second' # 4.0 = 0.25
USE_START_TIME = 'use_start_time' # bool,
#START_TIME = 'logging_start_time' # ***
USE_STOP_TIME = 'use_stop_time' # bool,
#STOP_TIME = 'logging_stop_time' # ***
TXWAVESTATS = 'tx_wave_stats' # bool, ##########################################
TIDE_SAMPLES_PER_DAY = 'tide_samples_per_day' # float,
WAVE_BURSTS_PER_DAY = 'wave_bursts_per_day' # float,
MEMORY_ENDURANCE = 'memory_endurance' # float,
NOMINAL_ALKALINE_BATTERY_ENDURANCE = 'nominal_alkaline_battery_endurance' # float,
TOTAL_RECORDED_TIDE_MEASUREMENTS = 'total_recorded_tide_measurements' # float,
TOTAL_RECORDED_WAVE_BURSTS = 'total_recorded_wave_bursts' # float,
TIDE_MEASUREMENTS_SINCE_LAST_START = 'tide_measurements_since_last_start' # float,
WAVE_BURSTS_SINCE_LAST_START = 'wave_bursts_since_last_start' # float,
WAVE_SAMPLES_DURATION = 'wave_samples_duration'
TXREALTIME = 'tx_tide_samples' # bool,
TXWAVEBURST = 'tx_wave_bursts' # bool,
NUM_WAVE_SAMPLES_PER_BURST_FOR_WAVE_STASTICS = 'num_wave_samples_per_burst_for_wave_statistics' # int,
USE_MEASURED_TEMP_AND_CONDUCTIVITY_FOR_DENSITY_CALC = 'use_measured_temp_and_cond_for_density_calc' # bool,
PRESSURE_SENSOR_HEIGHT_FROM_BOTTOM = 'pressure_sensor_height_from_bottom' # float,
SPECTRAL_ESTIMATES_FOR_EACH_FREQUENCY_BAND = 'num_spectral_estimates_for_each_frequency_band' # int,
MIN_ALLOWABLE_ATTENUATION = 'min_allowable_attenuation' # float,
MIN_PERIOD_IN_AUTO_SPECTRUM = 'min_period_in_auto_spectrum' # float,
MAX_PERIOD_IN_AUTO_SPECTRUM = 'max_period_in_auto_spectrum' # float,
HANNING_WINDOW_CUTOFF = 'hanning_window_cutoff' # float,
SHOW_PROGRESS_MESSAGES = 'show_progress_messages' # bool,
STATUS = 'device_status' # str,
LOGGING = 'logging_status' # bool,
class SBE26plusDeviceStatusDataParticle(DataParticle):
"""
Routines for parsing raw data into a data particle structure. Override
the building of values, and the rest should come along for free.
"""
_data_particle_type = DataParticleType.DEVICE_STATUS
def _build_parsed_values(self):
"""
Take something in the autosample format and split it into
values with appropriate tags
@throws SampleException If there is a problem with sample creation
"""
log.debug("in SBE26plusDeviceStatusDataParticle._build_parsed_values")
# VAR_LABEL: (regex, lambda)
single_var_matchers = {
SBE26plusDeviceStatusDataParticleKey.DEVICE_VERSION: (
re.compile(r'SBE 26plus V ([\w.]+) +SN (\d+) +(\d{2} [a-zA-Z]{3,4} \d{4} +[\d:]+)'),
lambda match : match.group(1)
),
SBE26plusDeviceStatusDataParticleKey.SERIAL_NUMBER: (
re.compile(r'SBE 26plus V ([\w.]+) +SN (\d+) +(\d{2} [a-zA-Z]{3,4} \d{4} +[\d:]+)'),
lambda match : match.group(2)
),
SBE26plusDeviceStatusDataParticleKey.DS_DEVICE_DATE_TIME: (
re.compile(r'SBE 26plus V ([\w.]+) +SN (\d+) +(\d{2} [a-zA-Z]{3,4} \d{4} +[\d:]+)'),
lambda match : match.group(3)
),
SBE26plusDeviceStatusDataParticleKey.USER_INFO: (
re.compile(r'user info=(.*)$'),
lambda match : match.group(1)
),
SBE26plusDeviceStatusDataParticleKey.QUARTZ_PRESSURE_SENSOR_SERIAL_NUMBER: (
re.compile(r'quartz pressure sensor: serial number = ([\d\.\-]+), range = ([\d\.\-]+) psia'),
lambda match : float(match.group(1))
),
SBE26plusDeviceStatusDataParticleKey.QUARTZ_PRESSURE_SENSOR_RANGE: (
re.compile(r'quartz pressure sensor: serial number = ([\d\.\-]+), range = ([\d\.\-]+) psia'),
lambda match : float(match.group(2))
),
SBE26plusDeviceStatusDataParticleKey.EXTERNAL_TEMPERATURE_SENSOR: (
re.compile(r'(external|internal) temperature sensor'),
lambda match : False if (match.group(1)=='internal') else True
),
SBE26plusDeviceStatusDataParticleKey.CONDUCTIVITY: (
re.compile(r'conductivity = (YES|NO)'),
lambda match : False if (match.group(1)=='NO') else True
),
SBE26plusDeviceStatusDataParticleKey.IOP_MA: (
re.compile(r'iop = +([\d\.\-]+) ma vmain = +([\d\.\-]+) V vlith = +([\d\.\-]+) V'),
lambda match : float(match.group(1))
),
SBE26plusDeviceStatusDataParticleKey.VMAIN_V: (
re.compile(r'iop = +([\d\.\-]+) ma vmain = +([\d\.\-]+) V vlith = +([\d\.\-]+) V'),
lambda match : float(match.group(2))
),
SBE26plusDeviceStatusDataParticleKey.VLITH_V: (
re.compile(r'iop = +([\d\.\-]+) ma vmain = +([\d\.\-]+) V vlith = +([\d\.\-]+) V'),
lambda match : float(match.group(3))
),
SBE26plusDeviceStatusDataParticleKey.LAST_SAMPLE_P: (
re.compile(r'last sample: p = +([\d\.\-]+), t = +([\d\.\-]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceStatusDataParticleKey.LAST_SAMPLE_T: (
re.compile(r'last sample: p = +([\d\.\-]+), t = +([\d\.\-]+)'),
lambda match : float(match.group(2))
),
SBE26plusDeviceStatusDataParticleKey.LAST_SAMPLE_S: (
re.compile(r'last sample: .*?, s = +([\d\.\-]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceStatusDataParticleKey.TIDE_INTERVAL: (
re.compile(r'tide measurement: interval = (\d+).000 minutes, duration = ([\d\.\-]+) seconds'),
lambda match : int(match.group(1))
),
SBE26plusDeviceStatusDataParticleKey.TIDE_MEASUREMENT_DURATION: (
re.compile(r'tide measurement: interval = (\d+).000 minutes, duration = ([\d\.\-]+) seconds'),
lambda match : int(match.group(2))
),
SBE26plusDeviceStatusDataParticleKey.TIDE_SAMPLES_BETWEEN_WAVE_BURST_MEASUREMENTS: (
re.compile(r'measure waves every ([\d]+) tide samples'),
lambda match : int(match.group(1))
),
SBE26plusDeviceStatusDataParticleKey.WAVE_SAMPLES_PER_BURST: (
re.compile(r'([\d\.\-]+) wave samples/burst at ([\d\.\-]+) scans/sec, duration = ([\d\.\-]+) seconds'),
lambda match : int(match.group(1))
),
SBE26plusDeviceStatusDataParticleKey.WAVE_SAMPLES_SCANS_PER_SECOND: (
re.compile(r'([\d\.\-]+) wave samples/burst at ([\d\.\-]+) scans/sec, duration = ([\d\.\-]+) seconds'),
lambda match : float(match.group(2))
),
SBE26plusDeviceStatusDataParticleKey.WAVE_SAMPLES_DURATION: (
re.compile(r'([\d\.\-]+) wave samples/burst at ([\d\.\-]+) scans/sec, duration = ([\d\.\-]+) seconds'),
lambda match : int(match.group(3))
),
SBE26plusDeviceStatusDataParticleKey.USE_START_TIME: (
re.compile(r'logging start time = (do not) use start time'),
lambda match : False if (match.group(1)=='do not') else True
),
SBE26plusDeviceStatusDataParticleKey.USE_STOP_TIME: (
re.compile(r'logging stop time = (do not) use stop time'),
lambda match : False if (match.group(1)=='do not') else True
),
SBE26plusDeviceStatusDataParticleKey.TIDE_SAMPLES_PER_DAY: (
re.compile(r'tide samples/day = (\d+\.\d+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceStatusDataParticleKey.WAVE_BURSTS_PER_DAY: (
re.compile(r'wave bursts/day = (\d+\.\d+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceStatusDataParticleKey.MEMORY_ENDURANCE: (
re.compile(r'memory endurance = (\d+\.\d+) days'),
lambda match : float(match.group(1))
),
SBE26plusDeviceStatusDataParticleKey.NOMINAL_ALKALINE_BATTERY_ENDURANCE: (
re.compile(r'nominal alkaline battery endurance = (\d+\.\d+) days'),
lambda match : float(match.group(1))
),
SBE26plusDeviceStatusDataParticleKey.TOTAL_RECORDED_TIDE_MEASUREMENTS: (
re.compile(r'total recorded tide measurements = ([\d\.\-]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceStatusDataParticleKey.TOTAL_RECORDED_WAVE_BURSTS: (
re.compile(r'total recorded wave bursts = ([\d\.\-]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceStatusDataParticleKey.TIDE_MEASUREMENTS_SINCE_LAST_START: (
re.compile(r'tide measurements since last start = ([\d\.\-]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceStatusDataParticleKey.WAVE_BURSTS_SINCE_LAST_START: (
re.compile(r'wave bursts since last start = ([\d\.\-]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceStatusDataParticleKey.TXREALTIME: (
re.compile(r'transmit real-time tide data = (YES|NO)'),
lambda match : False if (match.group(1)=='NO') else True
),
SBE26plusDeviceStatusDataParticleKey.TXWAVEBURST: (
re.compile(r'transmit real-time wave burst data = (YES|NO)'),
lambda match : False if (match.group(1)=='NO') else True
),
SBE26plusDeviceStatusDataParticleKey.TXWAVESTATS: (
re.compile(r'transmit real-time wave statistics = (YES|NO)'),
lambda match : False if (match.group(1)=='NO') else True
),
SBE26plusDeviceStatusDataParticleKey.NUM_WAVE_SAMPLES_PER_BURST_FOR_WAVE_STASTICS: (
re.compile(r' +number of wave samples per burst to use for wave statistics = (\d+)'),
lambda match : int(match.group(1))
),
# combined this into the regex of below.
#SBE26plusDeviceStatusDataParticleKey.USE_MEASURED_TEMP_AND_CONDUCTIVITY_FOR_DENSITY_CALC: (
# re.compile(r' +(do not|) use measured temperature and conductivity for density calculation'),
# lambda match : False if (match.group(1)=='do not') else True
#),
SBE26plusDeviceStatusDataParticleKey.USE_MEASURED_TEMP_AND_CONDUCTIVITY_FOR_DENSITY_CALC: (
re.compile(r' +(do not|) use measured temperature (and conductivity |)for density calculation'),
lambda match : True if (match.group(1)=='do not') else False
),
#SBE26plusDeviceStatusDataParticleKey.AVERAGE_WATER_TEMPERATURE_ABOVE_PRESSURE_SENSOR: (
# re.compile(r' +average water temperature above the pressure sensor \(deg C\) = ([\-\d\.]+)'),
# lambda match : float(match.group(1))
#),
#SBE26plusDeviceStatusDataParticleKey.AVERAGE_SALINITY_ABOVE_PRESSURE_SENSOR: (
# re.compile(r' +average salinity above the pressure sensor \(PSU\) = ([\-\d\.]+)'),
# lambda match : float(match.group(1))
#),
SBE26plusDeviceStatusDataParticleKey.PRESSURE_SENSOR_HEIGHT_FROM_BOTTOM: (
re.compile(r' +height of pressure sensor from bottom \(meters\) = ([\d\.]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceStatusDataParticleKey.SPECTRAL_ESTIMATES_FOR_EACH_FREQUENCY_BAND: (
re.compile(r' +number of spectral estimates for each frequency band = (\d+)'),
lambda match : int(match.group(1))
),
SBE26plusDeviceStatusDataParticleKey.MIN_ALLOWABLE_ATTENUATION: (
re.compile(r' +minimum allowable attenuation = ([\d\.]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceStatusDataParticleKey.MIN_PERIOD_IN_AUTO_SPECTRUM: (
re.compile(r' +minimum period \(seconds\) to use in auto-spectrum = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceStatusDataParticleKey.MAX_PERIOD_IN_AUTO_SPECTRUM: (
re.compile(r' +maximum period \(seconds\) to use in auto-spectrum = (-?[\d\.e\-\+]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceStatusDataParticleKey.HANNING_WINDOW_CUTOFF: (
re.compile(r' +hanning window cutoff = ([\d\.]+)'),
lambda match : float(match.group(1))
),
SBE26plusDeviceStatusDataParticleKey.SHOW_PROGRESS_MESSAGES: (
re.compile(r' +(do not show|show) progress messages'),
lambda match : True if (match.group(1)=='show') else False
),
SBE26plusDeviceStatusDataParticleKey.STATUS: (
re.compile(r'status = ([\w ]+)'),
lambda match : match.group(1)
),
SBE26plusDeviceStatusDataParticleKey.LOGGING: (
re.compile(r'logging = (YES|NO)'),
lambda match : False if (match.group(1)=='NO') else True,
)
}
result = [] # Final storage for particle
vals = {} # intermediate storage for particle values so they can be set to null first.
for (key, (matcher, l_func)) in single_var_matchers.iteritems():
vals[key] = None
for line in self.raw_data.split(NEWLINE):
for (key, (matcher, l_func)) in single_var_matchers.iteritems():
match = matcher.match(line)
if match:
vals[key] = l_func(match)
for (key, val) in vals.iteritems():
result.append({DataParticleKey.VALUE_ID: key, DataParticleKey.VALUE: val})
return result
###############################################################################
# Driver
###############################################################################
class SBE26PlusInstrumentDriver(SeaBirdInstrumentDriver):
"""
InstrumentDriver subclass
Subclasses SingleConnectionInstrumentDriver with connection state
machine.
"""
def __init__(self, evt_callback):
"""
Driver constructor.
@param evt_callback Driver process event callback.
"""
#Construct superclass.
SeaBirdInstrumentDriver.__init__(self, evt_callback)
########################################################################
# Superclass overrides for resource query.
########################################################################
########################################################################
# Protocol builder.
########################################################################
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = Protocol(Prompt, NEWLINE, self._driver_event)
###############################################################################
# Protocol
###############################################################################
class Protocol(SeaBirdProtocol):
"""
Instrument protocol class for sbe26plus driver.
Subclasses CommandResponseInstrumentProtocol
"""
def __init__(self, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The sbe26plus newline.
@param driver_event Driver process event callback.
"""
# Construct protocol superclass.
SeaBirdProtocol.__init__(self, prompts, newline, driver_event)
# Build sbe26plus protocol state machine.
self._protocol_fsm = InstrumentFSM(ProtocolState, ProtocolEvent,
ProtocolEvent.ENTER, ProtocolEvent.EXIT)
# Add event handlers for protocol state machine.
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.ENTER, self._handler_unknown_enter)
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.EXIT, self._handler_unknown_exit)
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.DISCOVER, self._handler_unknown_discover)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ENTER, self._handler_command_enter)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.EXIT, self._handler_command_exit)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ACQUIRE_SAMPLE, self._handler_command_acquire_sample)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_AUTOSAMPLE, self._handler_command_start_autosample)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.GET, self._handler_command_get)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.SET, self._handler_command_set)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.SETSAMPLING, self._handler_command_setsampling)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.CLOCK_SYNC, self._handler_command_clock_sync)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.SCHEDULED_CLOCK_SYNC, self._handler_command_clock_sync)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ACQUIRE_STATUS, self._handler_command_acquire_status)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ACQUIRE_CONFIGURATION, self._handler_command_acquire_configuration)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.QUIT_SESSION, self._handler_command_quit_session)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_DIRECT, self._handler_command_start_direct)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ENTER, self._handler_autosample_enter)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.EXIT, self._handler_autosample_exit)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.GET, self._handler_command_get)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ACQUIRE_STATUS, self._handler_command_acquire_status)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ACQUIRE_CONFIGURATION, self._handler_command_acquire_configuration)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.STOP_AUTOSAMPLE, self._handler_autosample_stop_autosample)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.SEND_LAST_SAMPLE, self._handler_command_send_last_sample)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.SCHEDULED_CLOCK_SYNC, self._handler_autosample_clock_sync)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.ENTER, self._handler_direct_access_enter)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXIT, self._handler_direct_access_exit)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXECUTE_DIRECT, self._handler_direct_access_execute_direct)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.STOP_DIRECT, self._handler_direct_access_stop_direct)
# Construct the parameter dictionary containing device parameters,
# current parameter values, and set formatting functions.
self._build_param_dict()
self._build_command_dict()
self._build_driver_dict()
# Add build handlers for device commands.
self._add_build_handler(InstrumentCmds.SETSAMPLING, self._build_setsampling_command)
self._add_build_handler(InstrumentCmds.DISPLAY_STATUS, self._build_simple_command)
self._add_build_handler(InstrumentCmds.QUIT_SESSION, self._build_simple_command)
self._add_build_handler(InstrumentCmds.DISPLAY_CALIBRATION, self._build_simple_command)
self._add_build_handler(InstrumentCmds.SEND_LAST_SAMPLE, self._build_simple_command)
self._add_build_handler(InstrumentCmds.START_LOGGING, self._build_simple_command)
self._add_build_handler(InstrumentCmds.STOP_LOGGING, self._build_simple_command)
self._add_build_handler(InstrumentCmds.SET, self._build_set_command)
self._add_build_handler(InstrumentCmds.SET_TIME, self._build_set_command)
self._add_build_handler(InstrumentCmds.TAKE_SAMPLE, self._build_simple_command)
# Add response handlers for device commands.
self._add_response_handler(InstrumentCmds.SETSAMPLING, self._parse_setsampling_response)
self._add_response_handler(InstrumentCmds.DISPLAY_STATUS, self._parse_ds_response)
self._add_response_handler(InstrumentCmds.DISPLAY_CALIBRATION, self._parse_dc_response)
self._add_response_handler(InstrumentCmds.SEND_LAST_SAMPLE, self._parse_sl_response)
self._add_response_handler(InstrumentCmds.SET, self._parse_set_response)
self._add_response_handler(InstrumentCmds.SET_TIME, self._parse_set_response)
self._add_response_handler(InstrumentCmds.TAKE_SAMPLE, self._parse_ts_response)
# State state machine in UNKNOWN state.
self._protocol_fsm.start(ProtocolState.UNKNOWN)
# commands sent sent to device to be filtered in responses for telnet DA
self._sent_cmds = []
self._chunker = StringChunker(Protocol.sieve_function)
self._add_scheduler_event(ScheduledJob.ACQUIRE_STATUS, ProtocolEvent.ACQUIRE_STATUS)
self._add_scheduler_event(ScheduledJob.CALIBRATION_COEFFICIENTS, ProtocolEvent.ACQUIRE_CONFIGURATION)
self._add_scheduler_event(ScheduledJob.CLOCK_SYNC, ProtocolEvent.SCHEDULED_CLOCK_SYNC)
@staticmethod
def sieve_function(raw_data):
"""
Chunker sieve method to help the chunker identify chunks.
@returns a list of chunks identified, if any. The chunks are all the same type.
"""
sieve_matchers = [TS_REGEX_MATCHER,
TIDE_REGEX_MATCHER,
WAVE_REGEX_MATCHER,
STATS_REGEX_MATCHER,
DS_REGEX_MATCHER,
DC_REGEX_MATCHER]
return_list = []
for matcher in sieve_matchers:
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
return return_list
def _filter_capabilities(self, events):
"""
Return a list of currently available capabilities.
"""
events_out = [x for x in events if Capability.has(x)]
return events_out
########################################################################
# Unknown handlers.
########################################################################
def _handler_unknown_enter(self, *args, **kwargs):
"""
Enter unknown state.
Tell driver superclass to send a state change event.
Superclass will query the state.
"""
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_unknown_discover(self, *args, **kwargs):
"""
Discover current state; can be COMMAND or AUTOSAMPLE.
@retval (next_state, result), (ProtocolState.COMMAND or
State.AUTOSAMPLE, None) if successful.
@throws InstrumentTimeoutException if the device cannot be woken.
@throws InstrumentStateException if the device response does not correspond to
an expected state.
"""
timeout = kwargs.get('timeout', TIMEOUT)
next_state = None
result = None
current_state = self._protocol_fsm.get_current_state()
logging = self._is_logging(timeout=timeout)
if logging == True:
next_state = ProtocolState.AUTOSAMPLE
result = ResourceAgentState.STREAMING
elif logging == False:
next_state = ProtocolState.COMMAND
result = ResourceAgentState.IDLE
else:
raise InstrumentStateException('Discover state failed.')
return (next_state, result)
def _handler_unknown_exit(self, *args, **kwargs):
"""
Exit unknown state.
"""
pass
########################################################################
# Command handlers.
########################################################################
def _handler_command_enter(self, *args, **kwargs):
"""
Enter command state.
@throws InstrumentTimeoutException if the device cannot be woken.
@throws InstrumentProtocolException if the update commands and not recognized.
"""
# Command device to update parameters and send a config change event.
log.debug("*** IN _handler_command_enter(), updating params")
self._update_params()
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_acquire_sample(self, *args, **kwargs):
"""
Acquire sample from SBE26 Plus.
@retval (next_state, result) tuple, (None, sample dict).
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
@throws SampleException if a sample could not be extracted from result.
"""
next_state = None
next_agent_state = None
result = None
kwargs['timeout'] = 45 # samples can take a long time
result = self._do_cmd_resp(InstrumentCmds.TAKE_SAMPLE, *args, **kwargs)
return (next_state, (next_agent_state, result))
def _handler_command_acquire_status(self, *args, **kwargs):
"""
@param args:
@param kwargs:
@return:
"""
next_state = None
next_agent_state = None
kwargs['timeout'] = 30
result = self._do_cmd_resp(InstrumentCmds.DISPLAY_STATUS, *args, **kwargs)
return (next_state, (next_agent_state, result))
def _handler_command_acquire_configuration(self, *args, **kwargs):
"""
@param args:
@param kwargs:
@return:
"""
next_state = None
next_agent_state = None
kwargs['timeout'] = 30
result = self._do_cmd_resp(InstrumentCmds.DISPLAY_CALIBRATION, *args, **kwargs)
return (next_state, (next_agent_state, result))
def _handler_command_send_last_sample(self, *args, **kwargs):
"""
@param args:
@param kwargs:
@return:
"""
next_state = None
next_agent_state = None
kwargs['timeout'] = 30
result = self._do_cmd_resp(InstrumentCmds.SEND_LAST_SAMPLE, *args, **kwargs)
return (next_state, (next_agent_state, result))
def _handler_command_exit(self, *args, **kwargs):
"""
Exit command state.
"""
pass
def _handler_autosample_clock_sync(self, *args, **kwargs):
"""
execute a clock sync on the leading edge of a second change from
autosample mode. For this command we have to move the instrument
into command mode, do the clock sync, then switch back. If an
exception is thrown we will try to get ourselves back into
streaming and then raise that exception.
@retval (next_state, result) tuple, (ProtocolState.AUTOSAMPLE,
None) if successful.
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
next_agent_state = None
result = None
error = None
try:
# Switch to command mode,
self._stop_logging()
# Sync the clock
timeout = kwargs.get('timeout', TIMEOUT)
self._sync_clock(InstrumentCmds.SET_TIME, Parameter.DS_DEVICE_DATE_TIME, TIMEOUT)
# Catch all error so we can put ourself back into
# streaming. Then rethrow the error
except Exception as e:
error = e
finally:
# Switch back to streaming
self._start_logging()
if(error):
raise error
return (next_state, (next_agent_state, result))
def _handler_command_clock_sync(self, *args, **kwargs):
"""
execute a clock sync on the leading edge of a second change
@retval (next_state, result) tuple, (None, (None, )) if successful.
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
next_agent_state = None
result = None
timeout = kwargs.get('timeout', TIMEOUT)
self._sync_clock(InstrumentCmds.SET_TIME, Parameter.DS_DEVICE_DATE_TIME, TIMEOUT)
return (next_state, (next_agent_state, result))
################################
# SET / SETSAMPLING
################################
def _set_params(self, *args, **kwargs):
"""
Issue commands to the instrument to set various parameters
"""
self._verify_not_readonly(*args, **kwargs)
# Retrieve required parameter.
# Raise if no parameter provided, or not a dict.
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('Set command requires a parameter dict.')
(set_params, ss_params) = self._split_params(**params)
log.debug("SetSampling Params: %s" % ss_params)
log.debug("General Set Params: %s" % set_params)
if set_params != {}:
for (key, val) in set_params.iteritems():
log.debug("KEY = " + str(key) + " VALUE = " + str(val))
result = self._do_cmd_resp(InstrumentCmds.SET, key, val, **kwargs)
if ss_params != {}:
# ONLY do next if a param for it is present
kwargs['expected_prompt'] = ", new value = "
self._do_cmd_resp(InstrumentCmds.SETSAMPLING, ss_params, **kwargs)
self._update_params()
def _build_set_command(self, cmd, param, val):
"""
Build handler for set commands. param=val followed by newline.
String val constructed by param dict formatting function.
@param param the parameter key to set.
@param val the parameter value to set.
@ retval The set command to be sent to the device.
@ retval The set command to be sent to the device.
@throws InstrumentProtocolException if the parameter is not valid or
if the formatting function could not accept the value passed.
"""
try:
str_val = self._param_dict.format(param, val)
set_cmd = '%s=%s' % (param, str_val)
set_cmd = set_cmd + NEWLINE
except KeyError:
raise InstrumentParameterException('Unknown driver parameter %s' % param)
return set_cmd
def _parse_set_response(self, response, prompt):
"""
Parse handler for set command.
@param response command response string.
@param prompt prompt following command response.
@throws InstrumentProtocolException if set command misunderstood.
"""
if prompt == Prompt.CONFIRMATION_PROMPT:
self._connection.send("y")
time.sleep(0.5)
elif prompt != Prompt.COMMAND:
raise InstrumentProtocolException('Protocol._parse_set_response : Set command not recognized: %s' % response)
def _handler_command_setsampling(self, *args, **kwargs):
"""
Perform a command-response on the device.
@param cmd The command to execute.
@param args positional arguments to pass to the build handler.
@param timeout=timeout optional wakeup and command timeout.
@retval resp_result The (possibly parsed) response result.
@raises InstrumentTimeoutException if the response did not occur in time.
@raises InstrumentProtocolException if command could not be built or if response
was not recognized.
"""
log.debug(" in _handler_command_setsampling")
next_state = None
kwargs['expected_prompt'] = ", new value = "
result = self._do_cmd_resp(InstrumentCmds.SETSAMPLING, *args, **kwargs)
return (next_state, result)
def _build_setsampling_command(self, foo, *args, **kwargs):
"""
Build handler for setsampling command.
@param args[0] is a dict of the values to change
@throws InstrumentParameterException if passed paramater is outside of allowed ranges.
"""
log.debug("_build_setsampling_command setting _sampling_args")
self._sampling_args = args[0]
for (arg, val) in self._sampling_args.items():
# assert int
if arg in [Parameter.WAVE_SAMPLES_PER_BURST,
Parameter.TIDE_INTERVAL,
Parameter.TIDE_MEASUREMENT_DURATION,
Parameter.TIDE_SAMPLES_BETWEEN_WAVE_BURST_MEASUREMENTS,
Parameter.NUM_WAVE_SAMPLES_PER_BURST_FOR_WAVE_STASTICS,
Parameter.SPECTRAL_ESTIMATES_FOR_EACH_FREQUENCY_BAND
]:
if type(val) != int:
raise InstrumentParameterException("incorrect type for " + str(arg))
# assert float
if arg in [Parameter.AVERAGE_WATER_TEMPERATURE_ABOVE_PRESSURE_SENSOR,
Parameter.AVERAGE_SALINITY_ABOVE_PRESSURE_SENSOR,
Parameter.PRESSURE_SENSOR_HEIGHT_FROM_BOTTOM,
Parameter.WAVE_SAMPLES_SCANS_PER_SECOND,
Parameter.MIN_ALLOWABLE_ATTENUATION,
Parameter.MIN_PERIOD_IN_AUTO_SPECTRUM,
Parameter.MAX_PERIOD_IN_AUTO_SPECTRUM,
Parameter.HANNING_WINDOW_CUTOFF
]:
if type(val) != float:
raise InstrumentParameterException("incorrect type for " + str(arg))
# assert bool
if arg in [Parameter.USE_START_TIME,
Parameter.USE_STOP_TIME,
Parameter.TXWAVESTATS,
Parameter.SHOW_PROGRESS_MESSAGES,
Parameter.USE_MEASURED_TEMP_AND_CONDUCTIVITY_FOR_DENSITY_CALC
]:
if type(val) != bool:
raise InstrumentParameterException("incorrect type for " + str(arg))
return InstrumentCmds.SETSAMPLING + NEWLINE
def _parse_setsampling_response(self, response, prompt): #(self, cmd, *args, **kwargs):
"""
Parse handler for set command. Timeout if we don't parse in a timely manor. Not
infinite loop here.
@param response command response string.
@param prompt prompt following command response.
@throws InstrumentProtocolException if set command misunderstood.
@throws InstrumentTimeoutException if we don't parse the setsample in a timely manor
"""
desired_prompt = ", new value = "
done = False
starttime = time.time()
while not done:
if (starttime + TIMEOUT < time.time()):
raise InstrumentTimeoutException("failed to parse set sample string in a timely(%ds) manor" % TIMEOUT)
(prompt, response) = self._get_response(expected_prompt=desired_prompt)
self._promptbuf = ''
self._linebuf = ''
time.sleep(0.1)
log.debug("mmprompt = " + str(prompt))
log.debug("mmresponse = " + str(response))
if "tide interval (integer minutes) " in response:
if Parameter.TIDE_INTERVAL in self._sampling_args:
self._connection.send(self._int_to_string(self._sampling_args[Parameter.TIDE_INTERVAL]) + NEWLINE)
else:
self._connection.send(NEWLINE)
elif "tide measurement duration (seconds)" in response:
if Parameter.TIDE_MEASUREMENT_DURATION in self._sampling_args:
self._connection.send(self._int_to_string(self._sampling_args[Parameter.TIDE_MEASUREMENT_DURATION]) + NEWLINE)
else:
self._connection.send(NEWLINE)
elif "measure wave burst after every N tide samples" in response:
if Parameter.TIDE_SAMPLES_BETWEEN_WAVE_BURST_MEASUREMENTS in self._sampling_args:
self._connection.send(self._int_to_string(self._sampling_args[Parameter.TIDE_SAMPLES_BETWEEN_WAVE_BURST_MEASUREMENTS]) + NEWLINE)
else:
self._connection.send(NEWLINE)
elif "number of wave samples per burst (multiple of 4)" in response:
if Parameter.WAVE_SAMPLES_PER_BURST in self._sampling_args:
self._connection.send(self._int_to_string(self._sampling_args[Parameter.WAVE_SAMPLES_PER_BURST]) + NEWLINE)
else:
self._connection.send(NEWLINE)
elif "wave Sample duration (0.25, 0.50, 0.75, 1.0) seconds" in response:
# WAVE_SAMPLES_SCANS_PER_SECOND = 4, wave Sample duration = 1/4...
if Parameter.WAVE_SAMPLES_SCANS_PER_SECOND in self._sampling_args:
val = float(1.0 / float(self._sampling_args[Parameter.WAVE_SAMPLES_SCANS_PER_SECOND]))
self._connection.send(self._float_to_string(val) + NEWLINE)
else:
self._connection.send(NEWLINE)
elif "use start time (y/n)" in response:
if Parameter.USE_START_TIME in self._sampling_args:
self._connection.send(self._true_false_to_string(self._sampling_args[Parameter.USE_START_TIME]) + NEWLINE)
else:
self._connection.send(NEWLINE)
elif "use stop time (y/n)" in response:
if Parameter.USE_STOP_TIME in self._sampling_args:
self._connection.send(self._true_false_to_string(self._sampling_args[Parameter.USE_STOP_TIME]) + NEWLINE)
else:
self._connection.send(NEWLINE)
elif "TXWAVESTATS (real-time wave statistics) (y/n)" in response:
if Parameter.TXWAVESTATS in self._sampling_args:
if self._sampling_args[Parameter.TXWAVESTATS] == False:
done = True
self._connection.send(self._true_false_to_string(self._sampling_args[Parameter.TXWAVESTATS]) + NEWLINE)
else:
# We default to no just for consistency sake. We might want to change the behavior here because this
# parameter affects the ability to set parameters. Options, default to no if not explicit (what
# we are doing now), decide Yes or No based on parameters to be set, or raise an exception if incorrect
# for parameter set
self._connection.send(self._true_false_to_string(False) + NEWLINE)
done = True
elif "show progress messages (y/n) = " in response:
if Parameter.SHOW_PROGRESS_MESSAGES in self._sampling_args:
self._connection.send(self._true_false_to_string(self._sampling_args[Parameter.SHOW_PROGRESS_MESSAGES]) + NEWLINE)
else:
self._connection.send(NEWLINE)
elif "number of wave samples per burst to use for wave statistics = " in response:
if Parameter.NUM_WAVE_SAMPLES_PER_BURST_FOR_WAVE_STASTICS in self._sampling_args:
self._connection.send(self._int_to_string(self._sampling_args[Parameter.NUM_WAVE_SAMPLES_PER_BURST_FOR_WAVE_STASTICS]) + NEWLINE)
else:
self._connection.send(NEWLINE)
elif "use measured temperature and conductivity for density calculation (y/n) = " in response or \
"use measured temperature for density calculation " in response:
if Parameter.USE_MEASURED_TEMP_AND_CONDUCTIVITY_FOR_DENSITY_CALC in self._sampling_args:
self._connection.send(self._true_false_to_string(self._sampling_args[Parameter.USE_MEASURED_TEMP_AND_CONDUCTIVITY_FOR_DENSITY_CALC]) + NEWLINE)
else:
self._connection.send(NEWLINE)
elif "average water temperature above the pressure sensor (deg C) = " in response:
if Parameter.AVERAGE_WATER_TEMPERATURE_ABOVE_PRESSURE_SENSOR in self._sampling_args:
self._connection.send(self._float_to_string(self._sampling_args[Parameter.AVERAGE_WATER_TEMPERATURE_ABOVE_PRESSURE_SENSOR]) + NEWLINE)
else:
self._connection.send(NEWLINE)
elif "average salinity above the pressure sensor (PSU) = " in response:
if Parameter.AVERAGE_SALINITY_ABOVE_PRESSURE_SENSOR in self._sampling_args:
self._connection.send(self._float_to_string(self._sampling_args[Parameter.AVERAGE_SALINITY_ABOVE_PRESSURE_SENSOR]) + NEWLINE)
else:
self._connection.send(NEWLINE)
elif "height of pressure sensor from bottom (meters) = " in response:
if Parameter.PRESSURE_SENSOR_HEIGHT_FROM_BOTTOM in self._sampling_args:
self._connection.send(self._float_to_string(self._sampling_args[Parameter.PRESSURE_SENSOR_HEIGHT_FROM_BOTTOM]) + NEWLINE)
else:
self._connection.send(NEWLINE)
elif "number of spectral estimates for each frequency band = " in response:
if Parameter.SPECTRAL_ESTIMATES_FOR_EACH_FREQUENCY_BAND in self._sampling_args:
self._connection.send(self._int_to_string(self._sampling_args[Parameter.SPECTRAL_ESTIMATES_FOR_EACH_FREQUENCY_BAND]) + NEWLINE)
else:
self._connection.send(NEWLINE)
elif "minimum allowable attenuation = " in response:
if Parameter.MIN_ALLOWABLE_ATTENUATION in self._sampling_args:
self._connection.send(self._float_to_string(self._sampling_args[Parameter.MIN_ALLOWABLE_ATTENUATION]) + NEWLINE)
else:
self._connection.send(NEWLINE)
elif "minimum period (seconds) to use in auto-spectrum = " in response:
if Parameter.MIN_PERIOD_IN_AUTO_SPECTRUM in self._sampling_args:
self._connection.send(self._float_to_string(self._sampling_args[Parameter.MIN_PERIOD_IN_AUTO_SPECTRUM]) + NEWLINE)
else:
self._connection.send(NEWLINE)
elif "maximum period (seconds) to use in auto-spectrum = " in response:
if Parameter.MAX_PERIOD_IN_AUTO_SPECTRUM in self._sampling_args:
self._connection.send(self._float_to_string(self._sampling_args[Parameter.MAX_PERIOD_IN_AUTO_SPECTRUM]) + NEWLINE)
else:
self._connection.send(NEWLINE)
elif "hanning window cutoff = " in response:
done = True
if Parameter.HANNING_WINDOW_CUTOFF in self._sampling_args:
self._connection.send(self._float_to_string(self._sampling_args[Parameter.HANNING_WINDOW_CUTOFF]) + NEWLINE)
else:
self._connection.send(NEWLINE)
# the remaining prompts apply to real-time wave statistics
# show progress messages (y/n) = n, new value = y
# number of wave samples per burst to use for wave statistics = 512, new value = 555
# use measured temperature and conductivity for density calculation (y/n) = y, new value =
# height of pressure sensor from bottom (meters) = 600.0, new value = 55
# number of spectral estimates for each frequency band = 5, new value =
# minimum allowable attenuation = 0.0025, new value =
# minimum period (seconds) to use in auto-spectrum = 0.0e+00, new value =
# maximum period (seconds) to use in auto-spectrum = 1.0e+06, new value =
# hanning window cutoff = 0.10, new value =
# resetting number of wave samples per burst to 512
# resetting number of samples to use for wave statistics to 512
else:
raise InstrumentProtocolException('HOW DID I GET HERE! %s' % str(response) + str(prompt))
prompt = ""
while prompt != Prompt.COMMAND:
(prompt, response) = self._get_response(expected_prompt=Prompt.COMMAND)
log.debug("WARNING!!! UNEXPECTED RESPONSE " + repr(response))
# Update params after changing them.
self._update_params()
# Verify that paramaters set via set are matching in the latest parameter scan.
device_parameters = self._param_dict.get_config()
for k in self._sampling_args.keys():
try:
log.debug("self._sampling_args " + k + " = " + str(self._sampling_args[k]))
except:
log.debug("self._sampling_args " + k + " = ERROR")
try:
log.debug("device_parameters " + k + " = " + str(device_parameters[k]))
except:
log.debug("device_parameters " + k + " = ERROR")
if self._sampling_args[k] != device_parameters[k]:
log.debug("FAILURE: " + str(k) + " was " + str(device_parameters[k]) + " and should have been " + str(self._sampling_args[k]))
raise InstrumentParameterException("FAILURE: " + str(k) + " was " + str(device_parameters[k]) + " and should have been " + str(self._sampling_args[k]))
def _split_params(self, **params):
log.debug("PARAMS = " + str(params))
ss_params = {}
set_params = {}
ss_keys = [Parameter.TIDE_INTERVAL,
Parameter.TIDE_MEASUREMENT_DURATION,
Parameter.TIDE_SAMPLES_BETWEEN_WAVE_BURST_MEASUREMENTS,
Parameter.WAVE_SAMPLES_PER_BURST,
Parameter.WAVE_SAMPLES_SCANS_PER_SECOND,
Parameter.USE_START_TIME,
Parameter.USE_STOP_TIME,
Parameter.TXWAVESTATS,
Parameter.SHOW_PROGRESS_MESSAGES,
Parameter.NUM_WAVE_SAMPLES_PER_BURST_FOR_WAVE_STASTICS,
Parameter.USE_MEASURED_TEMP_AND_CONDUCTIVITY_FOR_DENSITY_CALC,
Parameter.AVERAGE_WATER_TEMPERATURE_ABOVE_PRESSURE_SENSOR,
Parameter.AVERAGE_SALINITY_ABOVE_PRESSURE_SENSOR,
Parameter.PRESSURE_SENSOR_HEIGHT_FROM_BOTTOM,
Parameter.SPECTRAL_ESTIMATES_FOR_EACH_FREQUENCY_BAND,
Parameter.MIN_ALLOWABLE_ATTENUATION,
Parameter.MIN_PERIOD_IN_AUTO_SPECTRUM,
Parameter.MAX_PERIOD_IN_AUTO_SPECTRUM,
Parameter.HANNING_WINDOW_CUTOFF]
for (key, value) in params.iteritems():
if key in ss_keys:
ss_params[key] = value
else:
set_params[key] = value
return(set_params, ss_params)
########################################################################
# Quit Session.
########################################################################
def _handler_command_quit_session(self, *args, **kwargs):
"""
Perform a command-response on the device.
@param cmd The command to execute.
@param args positional arguments to pass to the build handler.
@param timeout=timeout optional wakeup and command timeout.
@retval resp_result The (possibly parsed) response result.
@raises InstrumentTimeoutException if the response did not occur in time.
@raises InstrumentProtocolException if command could not be built or if response
was not recognized.
"""
next_state = None
next_agent_state = None
result = self._do_cmd_no_resp(InstrumentCmds.QUIT_SESSION, *args, **kwargs)
return (next_state, (next_agent_state, result))
########################################################################
# Autosample handlers.
########################################################################
def _handler_autosample_enter(self, *args, **kwargs):
"""
Enter autosample state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_start_autosample(self, *args, **kwargs):
"""
Switch into autosample mode.
@retval (next_state, result) tuple, (ProtocolState.AUTOSAMPLE,
None) if successful.
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
kwargs['expected_prompt'] = Prompt.COMMAND
kwargs['timeout'] = 30
next_state = None
result = None
# Issue start command and switch to autosample if successful.
self._start_logging()
next_state = ProtocolState.AUTOSAMPLE
next_agent_state = ResourceAgentState.STREAMING
return (next_state, (next_agent_state, result))
def _handler_autosample_stop_autosample(self, *args, **kwargs):
"""
Stop autosample and switch back to command mode.
@retval (next_state, result) tuple, (ProtocolState.COMMAND,
None) if successful.
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command misunderstood or
incorrect prompt received.
"""
next_state = None
result = None
# Wake up the device, continuing until autosample prompt seen.
timeout = kwargs.get('timeout', TIMEOUT)
self._wakeup_until(timeout, Prompt.AUTOSAMPLE)
self._stop_logging(timeout)
next_state = ProtocolState.COMMAND
next_agent_state = ResourceAgentState.COMMAND
return (next_state, (next_agent_state, result))
def _handler_autosample_exit(self, *args, **kwargs):
"""
Exit autosample state.
"""
pass
########################################################################
# Common handlers.
########################################################################
########################################################################
# Test handlers.
########################################################################
########################################################################
# Direct access handlers.
########################################################################
def _handler_command_start_direct(self, *args, **kwargs):
"""
"""
next_state = None
result = None
next_state = ProtocolState.DIRECT_ACCESS
next_agent_state = ResourceAgentState.DIRECT_ACCESS
return (next_state, (next_agent_state, result))
def _handler_direct_access_enter(self, *args, **kwargs):
"""
Enter direct access state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._sent_cmds = []
def _handler_direct_access_execute_direct(self, data):
"""
"""
next_state = None
result = None
next_agent_state = None
self._do_cmd_direct(data)
# add sent command to list for 'echo' filtering in callback
self._sent_cmds.append(data)
return (next_state, (next_agent_state, result))
def _handler_direct_access_stop_direct(self):
"""
@throw InstrumentProtocolException on invalid command
"""
result = None
next_state = ProtocolState.COMMAND
next_agent_state = ResourceAgentState.COMMAND
return (next_state, (next_agent_state, result))
def _handler_direct_access_exit(self, *args, **kwargs):
"""
Exit direct access state.
"""
pass
########################################################################
# Startup parameter handlers
########################################################################
def apply_startup_params(self):
"""
Apply all startup parameters. First we check the instrument to see
if we need to set the parameters. If they are they are set
correctly then we don't do anything.
If we need to set parameters then we might need to transition to
command first. Then we will transition back when complete.
@todo: This feels odd. It feels like some of this logic should
be handled by the state machine. It's a pattern that we
may want to review. I say this because this command
needs to be run from autosample or command mode.
@raise: InstrumentProtocolException if not in command or streaming
"""
# Let's give it a try in unknown state
log.debug("CURRENT STATE: %s" % self.get_current_state())
if (self.get_current_state() != ProtocolState.COMMAND and
self.get_current_state() != ProtocolState.AUTOSAMPLE):
raise InstrumentProtocolException("Not in command or autosample state. Unable to apply startup params")
logging = self._is_logging()
# If we are in streaming mode and our configuration on the
# instrument matches what we think it should be then we
# don't need to do anything.
if(not self._instrument_config_dirty()):
return True
error = None
try:
if(logging):
# Switch to command mode,
self._stop_logging()
self._apply_params()
# Catch all error so we can put ourself back into
# streaming. Then rethrow the error
except Exception as e:
error = e
finally:
# Switch back to streaming
if(logging):
self._start_logging()
if(error):
raise error
def _instrument_config_dirty(self):
"""
Read the startup config and compare that to what the instrument
is configured too. If they differ then return True
@return: True if the startup config doesn't match the instrument
@raise: InstrumentParameterException
"""
# Refresh the param dict cache
# Let's assume we have already run this command recently
#self._do_cmd_resp(InstrumentCmds.DISPLAY_STATUS)
self._do_cmd_resp(InstrumentCmds.DISPLAY_CALIBRATION)
startup_params = self._param_dict.get_startup_list()
log.debug("Startup Parameters: %s" % startup_params)
for param in startup_params:
if not Parameter.has(param):
raise InstrumentParameterException()
if (self._param_dict.get(param) != self._param_dict.get_config_value(param)):
log.debug("DIRTY: %s %s != %s" % (param, self._param_dict.get(param), self._param_dict.get_config_value(param)))
return True
log.debug("Clean instrument config")
return False
########################################################################
# Private helpers.
########################################################################
def _is_logging(self, *args, **kwargs):
"""
Wake up the instrument and inspect the prompt to determine if we
are in streaming
@param: timeout - Command timeout
@return: True - instrument logging, False - not logging,
None - unknown logging state
@raise: InstrumentProtocolException if we can't identify the prompt
"""
basetime = self._param_dict.get_current_timestamp()
prompt = self._wakeup(timeout=TIMEOUT, delay=0.3)
self._update_params()
pd = self._param_dict.get_all(basetime)
return pd.get(Parameter.LOGGING)
def _start_logging(self, timeout=TIMEOUT):
"""
Command the instrument to start logging
@param timeout: how long to wait for a prompt
@return: True if successful
@raise: InstrumentProtocolException if failed to start logging
"""
log.debug("Start Logging!")
if(self._is_logging()):
return True
self._do_cmd_no_resp(InstrumentCmds.START_LOGGING, timeout=timeout)
time.sleep(1)
# Prompt device until command prompt is seen.
self._wakeup_until(timeout, Prompt.COMMAND)
if not self._is_logging(timeout):
raise InstrumentProtocolException("failed to start logging")
return True
def _stop_logging(self, timeout=TIMEOUT):
"""
Command the instrument to stop logging
@param timeout: how long to wait for a prompt
@return: True if successful
@raise: InstrumentTimeoutException if prompt isn't seen
@raise: InstrumentProtocolException failed to stop logging
"""
log.debug("Stop Logging!")
# Issue the stop command.
self._do_cmd_resp(InstrumentCmds.STOP_LOGGING)
time.sleep(1)
# Prompt device until command prompt is seen.
self._wakeup_until(timeout, Prompt.COMMAND)
if self._is_logging(timeout):
raise InstrumentProtocolException("failed to stop logging")
return True
def _build_simple_command(self, cmd):
"""
Build handler for basic sbe26plus commands.
@param cmd the simple sbe37 command to format.
@retval The command to be sent to the device.
"""
return cmd + NEWLINE
def _build_driver_dict(self):
"""
Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, True)
def _build_command_dict(self):
"""
Populate the command dictionary with command.
"""
self._cmd_dict.add(Capability.ACQUIRE_STATUS, display_name="acquire status")
self._cmd_dict.add(Capability.CLOCK_SYNC, display_name="sync clock")
self._cmd_dict.add(Capability.START_AUTOSAMPLE, display_name="start autosample")
self._cmd_dict.add(Capability.STOP_AUTOSAMPLE, display_name="stop autosample")
self._cmd_dict.add(Capability.ACQUIRE_CONFIGURATION, display_name="get configuration data")
self._cmd_dict.add(Capability.SEND_LAST_SAMPLE, display_name="get last sample")
self._cmd_dict.add(Capability.QUIT_SESSION, display_name="quit session")
def _build_param_dict(self):
"""
Populate the parameter dictionary with sbe26plus parameters.
For each parameter key, add match stirng, match lambda function,
and value formatting function for set commands.
"""
# Add parameter handlers to parameter dict.
# DS
ds_line_01 = r'SBE 26plus V ([\w.]+) +SN (\d+) +(\d{2} [a-zA-Z]{3,4} \d{4} +[\d:]+)' # NOT DONE #
ds_line_02 = r'user info=(.*)$'
ds_line_03 = r'quartz pressure sensor: serial number = ([\d\.\-]+), range = ([\d\.\-]+) psia'
ds_line_04 = r'(external|internal) temperature sensor' # NOT DONE #
ds_line_05 = r'conductivity = (YES|NO)'
ds_line_06 = r'iop = +([\d\.\-]+) ma vmain = +([\d\.\-]+) V vlith = +([\d\.\-]+) V'
ds_line_07a = r'last sample: p = +([\d\.\-]+), t = +([\d\.\-]+), s = +([\d\.\-]+)'
ds_line_07b = r'last sample: p = +([\d\.\-]+), t = +([\d\.\-]+)'
ds_line_08 = r'tide measurement: interval = (\d+)\.000 minutes, duration = ([\d\.\-]+) seconds'
ds_line_09 = r'measure waves every ([\d\.\-]+) tide samples'
ds_line_10 = r'([\d\.]+) wave samples/burst at ([\d\.]+) scans/sec, duration = ([\d\.]+) seconds'
#ds_line_11 = r'logging start time = (\d{2} [a-zA-Z]{3,4} \d{4} +[\d:]+)' # NOT DONE #
ds_line_11b = r'logging start time = (do not) use start time'
#ds_line_12 = r'logging stop time = (\d{2} [a-zA-Z]{3,4} \d{4} +[\d:]+)' # NOT DONE #
ds_line_12b = r'logging stop time = (do not) use stop time'
ds_line_13 = r'tide samples/day = (\d+.\d+)'
ds_line_14 = r'wave bursts/day = (\d+.\d+)'
ds_line_15 = r'memory endurance = (\d+.\d+) days'
ds_line_16 = r'nominal alkaline battery endurance = (\d+\.\d+) days'
#ds_line_16_b = r'deployments longer than 2 years are not recommended with alkaline batteries'
ds_line_17 = r'total recorded tide measurements = ([\d\.\-]+)'
ds_line_18 = r'total recorded wave bursts = ([\d\.\-]+)'
ds_line_19 = r'tide measurements since last start = ([\d\.\-]+)'
ds_line_20 = r'wave bursts since last start = ([\d\.\-]+)'
ds_line_21 = r'transmit real-time tide data = (YES|NO)'
ds_line_22 = r'transmit real-time wave burst data = (YES|NO)'
ds_line_23 = r'transmit real-time wave statistics = (YES|NO)'
# real-time wave statistics settings:
ds_line_24 = r' +number of wave samples per burst to use for wave statistics = (\d+)'
ds_line_25 = r' +(do not |)use measured temperature (and conductivity |)for density calculation'
# average water temperature above the pressure sensor (deg C) = -273.0
ds_line_26 = r' +average water temperature above the pressure sensor \(deg C\) = +([\-\d\.]+)' # float
ds_line_27 = r' +average salinity above the pressure sensor \(PSU\) = +([\-\d\.]+)' # float
ds_line_28 = r' +height of pressure sensor from bottom \(meters\) = ([\-\d\.]+)'
ds_line_29 = r' +number of spectral estimates for each frequency band = (\d+)'
ds_line_30 = r' +minimum allowable attenuation = ([\d\.]+)'
ds_line_31 = r' +minimum period \(seconds\) to use in auto-spectrum = (-?[\d\.e\-\+]+)'
ds_line_32 = r' +maximum period \(seconds\) to use in auto-spectrum = (-?[\d\.e\-\+]+)'
ds_line_33 = r' +hanning window cutoff = ([\d\.]+)'
ds_line_34 = r' +(do not show|show) progress messages' # NOT DONE #
ds_line_35 = r'status = (logging|waiting|stopped)' # status = stopped by user
ds_line_36 = r'logging = (YES|NO)' # logging = NO, send start command to begin logging
# Next 2 work together to pull 2 values out of a single line.
#
self._param_dict.add(Parameter.DEVICE_VERSION,
ds_line_01,
lambda match : string.upper(match.group(1)),
self._string_to_string,
type=ParameterDictType.STRING,
display_name="Firmware Version",
multi_match=True,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.SERIAL_NUMBER,
ds_line_01,
lambda match : string.upper(match.group(2)),
self._string_to_string,
type=ParameterDictType.STRING,
display_name="Serial Number",
multi_match=True,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.DS_DEVICE_DATE_TIME,
ds_line_01,
lambda match : string.upper(match.group(3)),
self._string_to_numeric_date_time_string,
type=ParameterDictType.STRING,
display_name="Instrument Time",
multi_match=True,
visibility=ParameterDictVisibility.READ_ONLY) # will need to make this a date time once that is sorted out
self._param_dict.add(Parameter.USER_INFO,
ds_line_02,
lambda match : string.upper(match.group(1)),
self._string_to_string,
type=ParameterDictType.STRING,
display_name="User Info")
#
# Next 2 work together to pull 2 values out of a single line.
#
self._param_dict.add(Parameter.QUARTZ_PRESSURE_SENSOR_SERIAL_NUMBER,
ds_line_03,
lambda match : float(match.group(1)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="Quartz Pressure Sensor Serial Number",
multi_match=True,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.QUARTZ_PRESSURE_SENSOR_RANGE,
ds_line_03,
lambda match : float(match.group(2)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="Quartz Pressure Sensor Range",
multi_match=True,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.EXTERNAL_TEMPERATURE_SENSOR,
ds_line_04,
lambda match : False if (match.group(1)=='internal') else True,
self._true_false_to_string,
type=ParameterDictType.BOOL,
display_name="External Temperature Sensor",
visibility=ParameterDictVisibility.IMMUTABLE,
startup_param=True,
direct_access=True,
default_value=False
)
self._param_dict.add(Parameter.CONDUCTIVITY,
ds_line_05,
lambda match : False if (match.group(1)=='NO') else True,
self._true_false_to_string,
type=ParameterDictType.BOOL,
display_name="Report Conductivity",
visibility=ParameterDictVisibility.IMMUTABLE,
startup_param=True,
direct_access=True,
default_value=False
)
#
# Next 3 work together to pull 3 values out of a single line.
#
self._param_dict.add(Parameter.IOP_MA,
ds_line_06,
lambda match : float(match.group(1)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="IOP",
multi_match=True,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.VMAIN_V,
ds_line_06,
lambda match : float(match.group(2)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="VMain",
multi_match=True,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.VLITH_V,
ds_line_06,
lambda match : float(match.group(3)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="VLith",
multi_match=True,
visibility=ParameterDictVisibility.READ_ONLY)
#
# Next 3 work together to pull 3 values out of a single line.
#
self._param_dict.add(Parameter.LAST_SAMPLE_P,
ds_line_07a,
lambda match : float(match.group(1)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="Last Sample Pressure",
multi_match=True,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.LAST_SAMPLE_T,
ds_line_07a,
lambda match : float(match.group(2)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="Last Sample Temperature",
multi_match=True,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.LAST_SAMPLE_S,
ds_line_07a,
lambda match : float(match.group(3)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="Last Sample Salinity",
multi_match=True,
visibility=ParameterDictVisibility.READ_ONLY)
#
# Altewrnate for when S is not present
#
self._param_dict.add(Parameter.LAST_SAMPLE_P,
ds_line_07b,
lambda match : float(match.group(1)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="Last Sample Pressure",
multi_match=True,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.LAST_SAMPLE_T,
ds_line_07b,
lambda match : float(match.group(2)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="Last Sample Temperature",
multi_match=True,
visibility=ParameterDictVisibility.READ_ONLY)
#
# Next 2 work together to pull 2 values out of a single line.
#
self._param_dict.add(Parameter.TIDE_INTERVAL,
ds_line_08,
lambda match : int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
display_name="Tide Interval",
multi_match=True)
self._param_dict.add(Parameter.TIDE_MEASUREMENT_DURATION,
ds_line_08,
lambda match : int(match.group(2)),
self._int_to_string,
type=ParameterDictType.INT,
display_name="Tide Measurement Duration",
multi_match=True)
self._param_dict.add(Parameter.TIDE_SAMPLES_BETWEEN_WAVE_BURST_MEASUREMENTS,
ds_line_09,
lambda match : float(match.group(1)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="Tide Samples Between Wave Burst Measurements")
#
# Next 3 work together to pull 3 values out of a single line.
# 1000 wave samples/burst at 4.00 scans/sec, duration = 250 seconds
self._param_dict.add(Parameter.WAVE_SAMPLES_PER_BURST,
ds_line_10,
lambda match : int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
display_name="Wave Sample Per Burst",
multi_match=True)
self._param_dict.add(Parameter.WAVE_SAMPLES_SCANS_PER_SECOND,
ds_line_10,
lambda match : float(match.group(2)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="Wave Samples Scans Per Second",
multi_match=True)
self._param_dict.add(Parameter.NUM_WAVE_SAMPLES_PER_BURST_FOR_WAVE_STASTICS,
ds_line_10,
lambda match : int(match.group(3)),
self._int_to_string,
type=ParameterDictType.INT,
display_name="Number of Wave Samples Per Burst For Wave Stats",
multi_match=True)
self._param_dict.add(Parameter.USE_START_TIME,
ds_line_11b,
lambda match : False if (match.group(1)=='do not') else True,
self._true_false_to_string,
type=ParameterDictType.BOOL,
display_name="Use Start Time",
visibility=ParameterDictVisibility.IMMUTABLE,
startup_param=False,
direct_access=False,
default_value=False
)
self._param_dict.add(Parameter.USE_STOP_TIME,
ds_line_12b,
lambda match : False if (match.group(1)=='do not') else True,
self._true_false_to_string,
type=ParameterDictType.BOOL,
display_name="Use Stop Time",
visibility=ParameterDictVisibility.IMMUTABLE,
startup_param=False,
direct_access=False,
default_value=False
)
self._param_dict.add(Parameter.TIDE_SAMPLES_PER_DAY,
ds_line_13,
lambda match : float(match.group(1)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="Tide Samples Per Day")
self._param_dict.add(Parameter.WAVE_BURSTS_PER_DAY,
ds_line_14,
lambda match : float(match.group(1)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="Wave Bursts Per Day")
self._param_dict.add(Parameter.MEMORY_ENDURANCE,
ds_line_15,
lambda match : float(match.group(1)),
self._float_to_string,
visibility=ParameterDictVisibility.READ_ONLY,
type=ParameterDictType.FLOAT,
display_name="Memory Endurance")
self._param_dict.add(Parameter.NOMINAL_ALKALINE_BATTERY_ENDURANCE,
ds_line_16,
lambda match : float(match.group(1)),
self._float_to_string,
visibility=ParameterDictVisibility.READ_ONLY,
type=ParameterDictType.FLOAT,
display_name="Nominal Alkaline Battery Endurance")
self._param_dict.add(Parameter.TOTAL_RECORDED_TIDE_MEASUREMENTS,
ds_line_17,
lambda match : float(match.group(1)),
self._float_to_string,
visibility=ParameterDictVisibility.READ_ONLY,
type=ParameterDictType.FLOAT,
display_name="Total Recorded Tide Measurements")
self._param_dict.add(Parameter.TOTAL_RECORDED_WAVE_BURSTS,
ds_line_18,
lambda match : float(match.group(1)),
self._float_to_string,
visibility=ParameterDictVisibility.READ_ONLY,
type=ParameterDictType.FLOAT,
display_name="Total Recorded Wave Bursts")
self._param_dict.add(Parameter.TIDE_MEASUREMENTS_SINCE_LAST_START,
ds_line_19,
lambda match : float(match.group(1)),
self._float_to_string,
visibility=ParameterDictVisibility.READ_ONLY,
type=ParameterDictType.FLOAT,
display_name="Tide Measuremetns Since Last Start")
self._param_dict.add(Parameter.WAVE_BURSTS_SINCE_LAST_START,
ds_line_20,
lambda match : float(match.group(1)),
self._float_to_string,
visibility=ParameterDictVisibility.READ_ONLY,
type=ParameterDictType.FLOAT,
display_name="Wave Bursts Since Last Start")
self._param_dict.add(Parameter.TXREALTIME,
ds_line_21,
lambda match : False if (match.group(1)=='NO') else True,
self._true_false_to_string,
type=ParameterDictType.BOOL,
display_name="Transmit RealTime Tide Data",
startup_param=True,
direct_access=True,
default_value=True
)
self._param_dict.add(Parameter.TXWAVEBURST,
ds_line_22,
lambda match : False if (match.group(1)=='NO') else True,
self._true_false_to_string,
type=ParameterDictType.BOOL,
display_name="Transmit RealTime Wave Burst Data",
startup_param=True,
direct_access=True,
default_value=False
)
self._param_dict.add(Parameter.TXWAVESTATS,
ds_line_23,
lambda match : False if (match.group(1)=='NO') else True,
self._true_false_to_string,
type=ParameterDictType.BOOL,
display_name="Transmit Wave Stats Data",
)
self._param_dict.add(Parameter.NUM_WAVE_SAMPLES_PER_BURST_FOR_WAVE_STASTICS,
ds_line_24,
lambda match : int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
display_name="Number of Wave Samples Per Burst For Wave Stats",
)
self._param_dict.add(Parameter.USE_MEASURED_TEMP_AND_CONDUCTIVITY_FOR_DENSITY_CALC,
ds_line_25,
lambda match : False if (match.group(1)=='do not ') else True,
self._true_false_to_string,
type=ParameterDictType.BOOL,
display_name="Use Measured Temperature and Conductivity of rDensity Calculation",
)
self._param_dict.add(Parameter.AVERAGE_WATER_TEMPERATURE_ABOVE_PRESSURE_SENSOR,
ds_line_26,
lambda match : float(match.group(1)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="Average Water Temperature Above Pressure Sensor",
)
self._param_dict.add(Parameter.AVERAGE_SALINITY_ABOVE_PRESSURE_SENSOR,
ds_line_27,
lambda match : float(match.group(1)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="Average Salinity Above Pressure Sensor",
)
self._param_dict.add(Parameter.PRESSURE_SENSOR_HEIGHT_FROM_BOTTOM,
ds_line_28,
lambda match : float(match.group(1)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="Pressure Sensor Height From Bottom",
)
self._param_dict.add(Parameter.SPECTRAL_ESTIMATES_FOR_EACH_FREQUENCY_BAND,
ds_line_29,
lambda match : int(match.group(1)),
self._int_to_string,
type=ParameterDictType.INT,
display_name="Spectral Estimates For Each Frequency Band",
)
self._param_dict.add(Parameter.MIN_ALLOWABLE_ATTENUATION,
ds_line_30,
lambda match : float(match.group(1)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="Minimum Allowable Attenuation",
)
self._param_dict.add(Parameter.MIN_PERIOD_IN_AUTO_SPECTRUM,
ds_line_31,
lambda match : float(match.group(1)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="Minimum Period In Auto Spectrum",
)
self._param_dict.add(Parameter.MAX_PERIOD_IN_AUTO_SPECTRUM,
ds_line_32,
lambda match : float(match.group(1)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="Maximum Period In Auto Spectrum",
)
self._param_dict.add(Parameter.HANNING_WINDOW_CUTOFF,
ds_line_33,
lambda match : float(match.group(1)),
self._float_to_string,
type=ParameterDictType.FLOAT,
display_name="Hanning Window Cutoff",
)
self._param_dict.add(Parameter.SHOW_PROGRESS_MESSAGES,
ds_line_34,
lambda match : True if (match.group(1)=='show') else False,
self._true_false_to_string,
type=ParameterDictType.BOOL,
display_name="Show Progress Message",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.STATUS,
ds_line_35,
lambda match : string.upper(match.group(1)),
self._string_to_string,
type=ParameterDictType.STRING,
display_name="Status",
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.LOGGING,
ds_line_36,
lambda match : False if (match.group(1)=='NO') else True,
self._true_false_to_string,
type=ParameterDictType.BOOL,
display_name="Logging",
visibility=ParameterDictVisibility.READ_ONLY)
def _update_params(self, *args, **kwargs):
"""
Update the parameter dictionary. Wake the device then issue
display status and display calibration commands. The parameter
dict will match line output and udpate itself.
@throws InstrumentTimeoutException if device cannot be timely woken.
@throws InstrumentProtocolException if ds/dc misunderstood.
"""
# Get old param dict config.
old_config = self._param_dict.get_config()
# Issue display commands and parse results.
timeout = kwargs.get('timeout', TIMEOUT)
self._do_cmd_resp(InstrumentCmds.DISPLAY_STATUS, timeout=timeout)
# Get new param dict config. If it differs from the old config,
# tell driver superclass to publish a config change event.
new_config = self._param_dict.get_config()
if not dict_equal(new_config, old_config) and self._protocol_fsm.get_current_state() != ProtocolState.UNKNOWN:
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
def _parse_ds_response(self, response, prompt):
"""
Response handler for ds command
"""
if prompt != Prompt.COMMAND:
raise InstrumentProtocolException('ds command not recognized: %s.' % response)
for line in response.split(NEWLINE):
hit_count = self._param_dict.multi_match_update(line)
log.debug(str(hit_count) + "_parse_ds_response -- " + line )
# return the Ds as text
match = DS_REGEX_MATCHER.search(response)
result = None
if match:
result = match.group(1)
log.debug("MATCH = " + str(result))
return result
def _parse_dc_response(self, response, prompt):
"""
Response handler for dc command
"""
if prompt != Prompt.COMMAND:
raise InstrumentProtocolException('dc command not recognized: %s.' % response)
# publish a sample
sample = self._extract_sample(SBE26plusDeviceCalibrationDataParticle, DC_REGEX_MATCHER, response, True)
# return the DC as text
match = DC_REGEX_MATCHER.search(response)
result = None
if match:
result = match.group(1)
return result
def _parse_sl_response(self, response, prompt):
"""
Response handler for dc command
"""
if prompt != Prompt.COMMAND:
raise InstrumentProtocolException('sl command not recognized: %s.' % response)
result = response
log.debug("_parse_sl_response RETURNING RESULT=" + str(result))
return result
def _parse_ts_response(self, response, prompt):
"""
Response handler for ts command.
@param response command response string.
@param prompt prompt following command response.
@retval sample dictionary containig c, t, d values.
@throws InstrumentProtocolException if ts command misunderstood.
@throws InstrumentSampleException if response did not contain a sample
"""
if prompt != Prompt.COMMAND:
raise InstrumentProtocolException('ts command not recognized: %s', response)
result = response
log.debug("_parse_ts_response RETURNING RESULT=" + str(result))
return result
def _got_chunk(self, chunk, timestamp):
"""
The base class got_data has gotten a chunk from the chunker. Pass it to extract_sample
with the appropriate particle objects and REGEXes.
@param: chunk - byte sequence that we want to create a particle from
@param: timestamp - port agent timestamp to include in the chunk
"""
if(self._extract_sample(SBE26plusTideSampleDataParticle, TS_REGEX_MATCHER, chunk, timestamp)): return
if(self._extract_sample(SBE26plusTideSampleDataParticle, TIDE_REGEX_MATCHER, chunk, timestamp)): return
if(self._extract_sample(SBE26plusWaveBurstDataParticle, WAVE_REGEX_MATCHER, chunk, timestamp)): return
if(self._extract_sample(SBE26plusStatisticsDataParticle, STATS_REGEX_MATCHER, chunk, timestamp)): return
if(self._extract_sample(SBE26plusDeviceCalibrationDataParticle, DC_REGEX_MATCHER, chunk, timestamp)): return
if(self._extract_sample(SBE26plusDeviceStatusDataParticle, DS_REGEX_MATCHER, chunk, timestamp)): return
########################################################################
# Static helpers to format set commands.
########################################################################
@staticmethod
def _string_to_string(v):
return v
@staticmethod
# Should be renamed boolen_to_string for consistency
def _true_false_to_string(v):
"""
Write a boolean value to string formatted for sbe37 set operations.
@param v a boolean value.
@retval A yes/no string formatted for sbe37 set operations.
@throws InstrumentParameterException if value not a bool.
"""
if not isinstance(v,bool):
raise InstrumentParameterException('Value %s is not a bool.' % str(v))
if v:
return 'y'
else:
return 'n'
@staticmethod
def _int_to_string(v):
"""
Write an int value to string formatted for sbe37 set operations.
@param v An int val.
@retval an int string formatted for sbe37 set operations.
@throws InstrumentParameterException if value not an int.
"""
if not isinstance(v,int):
raise InstrumentParameterException('Value %s is not an int.' % str(v))
else:
return '%i' % v
@staticmethod
def _float_to_string(v):
"""
Write a float value to string formatted for sbe37 set operations.
@param v A float val.
@retval a float string formatted for sbe37 set operations.
@throws InstrumentParameterException if value is not a float.
"""
if not isinstance(v, float):
raise InstrumentParameterException('Value %s is not a float.' % v)
else:
#return '%e' % v #This returns a exponential formatted float
# every time. not what is needed
return str(v) #return a simple float
@staticmethod
def _string_to_numeric_date_time_string(date_time_string):
"""
convert string from "21 AUG 2012 09:51:55" to numeric "mmddyyyyhhmmss"
"""
return time.strftime("%m%d%Y%H%M%S", time.strptime(date_time_string, "%d %b %Y %H:%M:%S"))
| 45.054485
| 167
| 0.613276
| 118,300
| 0.973206
| 0
| 0
| 2,677
| 0.022023
| 0
| 0
| 39,752
| 0.327024
|
9a9fc338c15aa55b529d0d570899ecd61a1b41cd
| 514
|
py
|
Python
|
Strings/count-index-find.py
|
tverma332/python3
|
544c4ec9c726c37293c8da5799f50575cc50852d
|
[
"MIT"
] | 3
|
2022-03-28T09:10:08.000Z
|
2022-03-29T10:47:56.000Z
|
Strings/count-index-find.py
|
tverma332/python3
|
544c4ec9c726c37293c8da5799f50575cc50852d
|
[
"MIT"
] | 1
|
2022-03-27T11:52:58.000Z
|
2022-03-27T11:52:58.000Z
|
Strings/count-index-find.py
|
tverma332/python3
|
544c4ec9c726c37293c8da5799f50575cc50852d
|
[
"MIT"
] | null | null | null |
# 1) count = To count how many time a particular word & char. is appearing
x = "Keep grinding keep hustling"
print(x.count("t"))
# 2) index = To get index of letter(gives the lowest index)
x="Keep grinding keep hustling"
print(x.index("t")) # will give the lowest index value of (t)
# 3) find = To get index of letter(gives the lowest index) | Return -1 on failure.
x = "Keep grinding keep hustling"
print(x.find("t"))
'''
NOTE : print(x.index("t",34)) : Search starts from index value 34 including 34
'''
| 25.7
| 82
| 0.684825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 438
| 0.85214
|
9aa0a86fc034faf07525b543313701f15dfaa4e4
| 4,526
|
py
|
Python
|
datasets/datasets.py
|
rioyokotalab/ecl-isvr
|
ae274b1b81b1d1c10db008140c477f5893a0c1c3
|
[
"BSD-4-Clause-UC"
] | null | null | null |
datasets/datasets.py
|
rioyokotalab/ecl-isvr
|
ae274b1b81b1d1c10db008140c477f5893a0c1c3
|
[
"BSD-4-Clause-UC"
] | null | null | null |
datasets/datasets.py
|
rioyokotalab/ecl-isvr
|
ae274b1b81b1d1c10db008140c477f5893a0c1c3
|
[
"BSD-4-Clause-UC"
] | 2
|
2021-09-30T02:13:40.000Z
|
2021-12-14T07:33:28.000Z
|
#! -*- coding:utf-8
from typing import Callable, List, Optional
import numpy as np
import torch
import torchvision
__all__ = ["CIFAR10", "FashionMNIST"]
class CIFAR10(torch.utils.data.Dataset):
def __init__(self,
root: str,
train: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
indices: List[int] = None,
data_length: int = None,
shuffle: bool = False):
super(CIFAR10, self).__init__()
self.__datas__ = []
self.__labels__ = []
dataset = torchvision.datasets.CIFAR10(root,
train=train,
transform=transform,
target_transform=target_transform,
download=download)
self.__classes__ = dataset.classes
if indices is None:
indices = list(range(len(dataset)))
for i in indices: # load data and catching...
d, l = dataset[i]
self.__datas__.append(d)
self.__labels__.append(l)
self.__length__ = (len(self.data)
if data_length is None else data_length)
self.__indices__ = np.arange(len(self.data))
self.__shuffle__ = shuffle
if self.shuffle:
np.random.shuffle(self.__indices__)
self.__call_count__ = 0
@property
def data(self): return self.__datas__
@property
def label(self): return self.__labels__
@property
def classes(self): return self.__classes__
@property
def indices(self): return self.__indices__
@property
def shuffle(self): return self.__shuffle__
def __len__(self): return self.__length__
def __getitem__(self, idx):
idx = self.indices[idx % len(self.data)]
d = self.data[idx]
l = self.label[idx]
self.__call_count__ += 1
if self.shuffle and self.__call_count__ >= len(self):
np.random.shuffle(self.__indices__)
self.__call_count__ = 0
return d, l
class FashionMNIST(torch.utils.data.Dataset):
def __init__(self,
root: str,
train: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
indices: List[int] = None,
data_length: int = None,
shuffle: bool = False):
super(FashionMNIST, self).__init__()
self.__datas__ = []
self.__labels__ = []
dataset = torchvision.datasets.FashionMNIST(root,
train=train,
transform=transform,
target_transform=target_transform,
download=download)
self.__classes__ = dataset.classes
if indices is None:
indices = list(range(len(dataset)))
for i in indices: # load data and catching...
d, l = dataset[i]
self.__datas__.append(d)
self.__labels__.append(l)
self.__length__ = (len(self.data)
if data_length is None else data_length)
self.__indices__ = np.arange(len(self.data))
self.__shuffle__ = shuffle
if self.shuffle:
np.random.shuffle(self.__indices__)
self.__call_count__ = 0
@property
def data(self): return self.__datas__
@property
def label(self): return self.__labels__
@property
def classes(self): return self.__classes__
@property
def indices(self): return self.__indices__
@property
def shuffle(self): return self.__shuffle__
def __len__(self): return self.__length__
def __getitem__(self, idx):
idx = self.indices[idx % len(self.data)]
d = self.data[idx]
l = self.label[idx]
self.__call_count__ += 1
if self.shuffle and self.__call_count__ >= len(self):
np.random.shuffle(self.__indices__)
self.__call_count__ = 0
return d, l
| 36.208
| 87
| 0.527176
| 4,351
| 0.961335
| 0
| 0
| 554
| 0.122404
| 0
| 0
| 99
| 0.021874
|
9aa249f279f7113e5bf54c4bf46eea1716af9bd2
| 1,819
|
py
|
Python
|
API/Segmentation_API/detectron_seg.py
|
rogo96/Background-removal
|
e301d288b73074940356fa4fe9c11f11885dc506
|
[
"MIT"
] | 40
|
2020-09-16T02:22:30.000Z
|
2021-12-22T11:30:49.000Z
|
API/Segmentation_API/detectron_seg.py
|
ganjbakhshali/Background-removal
|
39691c0044b824e8beab13e44f2c269e309aec72
|
[
"MIT"
] | 6
|
2020-09-18T02:59:11.000Z
|
2021-09-06T15:44:33.000Z
|
API/Segmentation_API/detectron_seg.py
|
ganjbakhshali/Background-removal
|
39691c0044b824e8beab13e44f2c269e309aec72
|
[
"MIT"
] | 14
|
2020-11-06T09:26:25.000Z
|
2021-10-20T08:00:48.000Z
|
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
import torch
import numpy as np
import cv2
class Model:
def __init__(self,confidence_thresh=0.6):
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = confidence_thresh # set threshold for this model
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
self.model = DefaultPredictor(cfg)
def get_seg_output(self,image:np.array):
out = self.model(image)['instances']
outputs = [(out.pred_masks[i],out.pred_classes[i]) for i in range(len(out.pred_classes)) if out.pred_classes[i]==0]
return outputs
class Preprocessing:
def __init__(self,kernel,dilate_iter=5,erode_iter=1):
self.kernel = kernel
self.dilate_iter = dilate_iter
self.erode_iter = erode_iter
def get_target_mask(self,masks):
out = np.zeros(masks[0].shape)
for mask in masks:
out += mask
out = np.clip(out,0,1)
return out
def get_trimap(self,masks):
target_mask = self.get_target_mask(masks)
erode = cv2.erode(target_mask.astype('uint8'),self.kernel,iterations=self.erode_iter)
dilate = cv2.dilate(target_mask.astype('uint8'),self.kernel,iterations=self.dilate_iter)
h, w = target_mask.shape
trimap = np.zeros((h, w, 2))
trimap[erode == 1, 1] = 1
trimap[dilate == 0, 0] = 1
return trimap
| 31.912281
| 124
| 0.6663
| 1,528
| 0.840022
| 0
| 0
| 0
| 0
| 0
| 0
| 163
| 0.08961
|
9aa39e5e7763187b713ab547d0e364010f1b3d6f
| 106
|
py
|
Python
|
examples/plugin_example/setup.py
|
linshoK/pysen
|
2b84a15240c5a47cadd8e3fc8392c54c2995b0b1
|
[
"MIT"
] | 423
|
2021-03-22T08:45:12.000Z
|
2022-03-31T21:05:53.000Z
|
examples/plugin_example/setup.py
|
linshoK/pysen
|
2b84a15240c5a47cadd8e3fc8392c54c2995b0b1
|
[
"MIT"
] | 1
|
2022-02-23T08:53:24.000Z
|
2022-03-23T14:11:54.000Z
|
examples/plugin_example/setup.py
|
linshoK/pysen
|
2b84a15240c5a47cadd8e3fc8392c54c2995b0b1
|
[
"MIT"
] | 9
|
2021-03-26T14:20:07.000Z
|
2022-03-24T13:17:06.000Z
|
from setuptools import setup
setup(
name="example-advanced-package", version="0.0.0", packages=[],
)
| 17.666667
| 66
| 0.698113
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 0.311321
|
9aa3bdf68ace18fc9d168671cbe55ba44bdbac29
| 416
|
py
|
Python
|
setup.py
|
xpac1985/pyASA
|
a6cf470a4d1b731864a1b450e321901636c1ebdf
|
[
"MIT"
] | 10
|
2017-02-05T12:15:19.000Z
|
2020-05-20T14:33:04.000Z
|
setup.py
|
xpac1985/pyASA
|
a6cf470a4d1b731864a1b450e321901636c1ebdf
|
[
"MIT"
] | null | null | null |
setup.py
|
xpac1985/pyASA
|
a6cf470a4d1b731864a1b450e321901636c1ebdf
|
[
"MIT"
] | 3
|
2017-04-02T13:00:28.000Z
|
2020-06-13T23:34:37.000Z
|
from distutils.core import setup
setup(
name='pyASA',
packages=['pyASA'],
version='0.1.0',
description='Wrapper for the Cisco ASA REST API',
author='xpac',
author_email='bjoern@areafunky.net',
url='https://github.com/xpac1985/pyASA',
download_url='https://github.com/xpac1985/pyASA/tarball/0.1.0',
keywords=['cisco', 'asa', 'rest-api', 'wrapper', 'alpha'],
classifiers=[],
)
| 27.733333
| 67
| 0.646635
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 207
| 0.497596
|
9aa3ca73beed1f30ce5fdf99995b03ee7f17a719
| 2,441
|
py
|
Python
|
Client.py
|
fimmartins/qpid_protobuf_python
|
b1411088e74b48347aeeaecdf84bbf9c7c9f7662
|
[
"Apache-2.0"
] | 1
|
2015-12-15T19:21:26.000Z
|
2015-12-15T19:21:26.000Z
|
Client.py
|
fimmartins/qpid_protobuf_python
|
b1411088e74b48347aeeaecdf84bbf9c7c9f7662
|
[
"Apache-2.0"
] | null | null | null |
Client.py
|
fimmartins/qpid_protobuf_python
|
b1411088e74b48347aeeaecdf84bbf9c7c9f7662
|
[
"Apache-2.0"
] | null | null | null |
from Qpid import QpidConnection
from mxt1xx_pb2 import *
from commands_pb2 import *
from QpidTypes import *
from qpid.messaging import *
#doc http://qpid.apache.org/releases/qpid-0.14/apis/python/html/
#examples https://developers.google.com/protocol-buffers/docs/pythontutorial
qpidCon = QpidConnection('192.168.0.78', '5672', 'fila_dados_ext', 'mxt_command_qpid')
while not(qpidCon.start()):
print('Trying to reconnect')
response_received = True;
def mxt1xx_output_control(activate, pos, qpidCon):
activate = not activate
activate = int(activate == True)
cmd = u_command()
cmd.protocol = pos.firmware.protocol
cmd.serial = pos.firmware.serial
cmd.id = 'Controla Saida ' + str(pos.firmware.serial)
cmd.type = 51
cmd.attempt = 50
cmd.timeout = '2020-12-31 00:00:00'
cmd.handler_type = 2
cmd.transport = 'GPRS'
parameter = cmd.parameter.add()
parameter.id = 'SET_OUTPUT'
parameter.value = '1'
parameter = cmd.parameter.add()
parameter.id = 'SET OUTPUT 1'
parameter.value = str(activate)
parameter = cmd.parameter.add()
parameter.id = 'SET OUTPUT 2'
parameter.value = str(activate)
parameter = cmd.parameter.add()
parameter.id = 'SET OUTPUT 3'
parameter.value = str(activate)
parameter = cmd.parameter.add()
parameter.id = 'SET OUTPUT 4'
parameter.value = str(activate)
message = Message(subject="PB_COMMAND", content=cmd.SerializeToString())
qpidCon.sender.send(message)
return False
while(1):
message = qpidCon.receiver.fetch()
subject = message.subject
print (message.subject + ' received')
if subject == QpidSubjectType.qpid_st_pb_mxt1xx_pos:
pos = mxt1xx_u_position()
pos.ParseFromString(message.content)
print (str(pos.firmware.protocol) + ':' + str(pos.firmware.serial) + ':' + str(pos.firmware.memory_index))
qpidCon.session.acknowledge()
if response_received:
response_received = mxt1xx_output_control(pos.hardware_monitor.outputs.output_1, pos, qpidCon);
if subject == QpidSubjectType.qpid_st_pb_command_response:
res = u_command_response()
res.ParseFromString(message.content)
if res.status == 5:
print('Command response: Success')
response_received = True
else:
print('Command response: ' + str(res.status))
else:
qpidCon.session.acknowledge()
| 31.294872
| 114
| 0.679639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 406
| 0.166325
|
9aa4eade5a06a5cb47e49505af09bdb59f7f1c8a
| 1,574
|
py
|
Python
|
run_all.py
|
EinariTuukkanen/line-search-comparison
|
7daa38779017f26828caa31a53675c8223e6ab8e
|
[
"MIT"
] | null | null | null |
run_all.py
|
EinariTuukkanen/line-search-comparison
|
7daa38779017f26828caa31a53675c8223e6ab8e
|
[
"MIT"
] | null | null | null |
run_all.py
|
EinariTuukkanen/line-search-comparison
|
7daa38779017f26828caa31a53675c8223e6ab8e
|
[
"MIT"
] | null | null | null |
import numpy as np
from example_functions import target_function_dict
from line_search_methods import line_search_dict
from main_methods import main_method_dict
from config import best_params
from helpers import generate_x0
def run_one(_theta, _main_method, _ls_method, params, ls_params):
theta = _theta()
x0 = generate_x0(theta.n, *theta.bounds)
ls_method = _ls_method(ls_params)
main_method = _main_method(params, ls_method)
# print('Correct solution: ', theta.min_values)
result = main_method(theta, np.array(x0))
# print('Found solution: ', result['min_value'])
# print(result_to_string(result))
return result
def result_to_string(result):
perf = result['performance']
ls_perf = perf['line_search']
return ', '.join([str(s) for s in [
result['status'], perf['iterations'], f"{perf['duration']} ms",
ls_perf['iterations'], f"{round(ls_perf['duration'], 2)} ms",
]])
np.warnings.filterwarnings('ignore', category=RuntimeWarning)
for theta in best_params:
for main_method in best_params[theta]:
for line_search in best_params[theta][main_method]:
result = run_one(
target_function_dict[theta],
main_method_dict[main_method],
line_search_dict[line_search],
best_params[theta][main_method][line_search]['params'],
best_params[theta][main_method][line_search]['ls_params'],
)
status = result['status']
print(f"{status}: {theta},{main_method},{line_search}")
| 34.217391
| 74
| 0.670902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 334
| 0.212198
|
9aa4fd6241fe5ed3a825608b2a7990cea4c0d1af
| 5,299
|
py
|
Python
|
bin/runner.py
|
ColorOfLight/ML-term-project
|
047b22fcdd8df7a18abd224ccbf23ae5d981fc97
|
[
"MIT"
] | null | null | null |
bin/runner.py
|
ColorOfLight/ML-term-project
|
047b22fcdd8df7a18abd224ccbf23ae5d981fc97
|
[
"MIT"
] | null | null | null |
bin/runner.py
|
ColorOfLight/ML-term-project
|
047b22fcdd8df7a18abd224ccbf23ae5d981fc97
|
[
"MIT"
] | null | null | null |
# Load Packages
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.metrics import classification_report
from plots import draw_corr_heatmap
import seaborn as sns
import xgboost as xgb
import pickle
from logger import Logger
import os
from sklearn.linear_model import ElasticNet
from sklearn.ensemble import AdaBoostRegressor
from ensemble import Ensemble
from sklearn.impute import SimpleImputer
from ilbeom_lg_v2 import Ilbeom_Linear
from sklearn.model_selection import StratifiedKFold
os.environ["JOBLIB_TEMP_FOLDER"] = "/tmp"
# Varaibles
train_rate = .8
# The model will saved in ../models/{model_name}.dat
model_name = 'ensemble-test1'
np.random.seed(0)
names = ['contract date', 'latitude', 'longtitude', 'altitude', '1st region id', '2nd region id', 'road id',
'apartment_id', 'floor', 'angle', 'area', 'parking lot limit', 'parking lot area', 'parking lot external',
'management fee', 'households', 'age of residents', 'builder id', 'completion date', 'built year',
'schools', 'bus stations', 'subway stations', 'price']
non_numeric_names = ['contract date', 'completion date']
tuned_parameters = {
'n_estimators': [100, 200, 400],
'learning_rate': [0.02, 0.04, 0.08, 0.1, 0.4],
'gamma': [0, 1, 2],
'subsample': [0.5, 0.66, 0.75],
'colsample_bytree': [0.6, 0.8, 1],
'max_depth': [6, 7, 8]
# 'learning_rate': [0.02],
# 'gamma': [0],
# 'subsample': [0.5],
# 'colsample_bytree': [0.6],
# 'max_depth': [6]
}
def acc_scorer(model, X, y):
y_pred = model.predict(X)
return get_accuracy(y_pred, y.iloc)
def preprocess(data):
data['angle'] = np.sin(data['angle'])
data['contract date'] = pd.to_datetime(data['contract date'])
data['completion date'] = pd.to_numeric(
data['contract date'] - pd.to_datetime(data['completion date']))
data['contract date'] = pd.to_numeric(
data['contract date'] - data['contract date'].min())
drop_columns = ['1st region id', '2nd region id',
'road id', 'apartment_id', 'builder id', 'built year']
data = data.drop(columns=drop_columns)
drop_columns.append('price')
def normalize(d):
min_max_scaler = preprocessing.MinMaxScaler()
d_scaled = min_max_scaler.fit_transform(d)
return pd.DataFrame(d_scaled, columns=[item for item in names if item not in drop_columns])
return normalize(data)
def get_accuracy(y_pred, y_test):
length = len(y_pred)
_sum = 0
for idx in range(length):
_sum += abs((y_test[idx] - y_pred[idx]) / y_pred[idx])
return 1 - (_sum / length)
# Main
logger = Logger('final')
data = pd.read_csv('../data/data_train.csv',
names=names)
# Fill NaN
def fill_missing_values(data, is_test=False):
new_data = data.drop(columns=non_numeric_names)
imputer = SimpleImputer(missing_values=np.nan, strategy='median')
imputer = imputer.fit(new_data)
new_data = imputer.transform(new_data)
if is_test:
columns = [n for n in names if n not in non_numeric_names]
columns.remove('price')
new_data = pd.DataFrame(
new_data, columns=columns)
else:
new_data = pd.DataFrame(new_data, columns=[n for n in names if n not in non_numeric_names])
for n in non_numeric_names:
new_data[n] = data[n]
return new_data
data = fill_missing_values(data)
y = data['price']
X = data.drop(columns=['price'])
# X_names = list(X)
def get_unique_model():
xg = xgb.XGBRegressor(n_estimators=200, learning_rate=0.02, gamma=0, subsample=0.75,
colsample_bytree=1, max_depth=6)
en = ElasticNet(l1_ratio=0.95, alpha=0.15, max_iter=50000)
ada = AdaBoostRegressor(
learning_rate=0.01, loss='square', n_estimators=100)
lr = Ilbeom_Linear()
lst = [xg, en, ada, lr]
return Ensemble(lst)
# model_n = xgb.XGBRegressor(n_estimators=200, learning_rate=0.02, gamma=0, subsample=0.75,
# colsample_bytree=1, max_depth=6)
model_n = ElasticNet(l1_ratio=0.95, alpha=0.15, max_iter=50000)
model_u = get_unique_model()
def test_cv(model, X, y, n_splits=5):
# print(np.mean(cross_val_score(model, X, y, scoring=acc_scorer, cv=5, n_jobs=-1)))
skf = StratifiedKFold(n_splits=n_splits, shuffle=True)
results = []
for i, (train, test) in enumerate(skf.split(X, y)):
print("Running Fold", i+1, "/", 5)
X_train, X_test = X.iloc[train], X.iloc[test]
y_train, y_test = y.iloc[train], y.iloc[test]
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
results.append(get_accuracy(y_pred, y_test.iloc))
print(f"result: {sum(results) / n_splits}")
# Test each model
# test_cv(model_n, preprocess(X), y)
# test_cv(model_u, X, y)
# Write Answer Sheet
def write_answers(model_n, model_u):
data = pd.read_csv('../data/data_test.csv',
names=[n for n in names if n is not 'price'])
data = fill_missing_values(data, is_test=True)
np.savetxt('../data/result_.csv', model_n.predict(preprocess(data)).reshape(-1,1))
np.savetxt('../data/result_unique.csv', model_u.predict(data).reshape(-1, 1))
# write answers
model_n.fit(preprocess(X), y)
model_u.fit(X, y)
write_answers(model_n, model_u)
| 32.115152
| 115
| 0.684846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,452
| 0.274014
|
9aa693424bf8bc328cb722f9e8651b7867acfe8a
| 1,346
|
py
|
Python
|
api/app.py
|
t-kigi/nuxt-chalice-aws-app-template
|
d413752004976911938d2fc26aa864ddae91a34f
|
[
"MIT"
] | null | null | null |
api/app.py
|
t-kigi/nuxt-chalice-aws-app-template
|
d413752004976911938d2fc26aa864ddae91a34f
|
[
"MIT"
] | null | null | null |
api/app.py
|
t-kigi/nuxt-chalice-aws-app-template
|
d413752004976911938d2fc26aa864ddae91a34f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
nuxt-chalice-api のテンプレート実装です。
主に、全体で利用するグローバルスコープのリソースを初期化します。
"""
import os
from chalice import (
Chalice, CognitoUserPoolAuthorizer,
CORSConfig
)
from chalicelib import aws
from chalicelib.env import store
stage = store.mutation(
'chalilce.stage', os.environ.get('STAGE', 'local'))
appname = os.environ.get('APPNAME', 'nuxt-chalice-api')
app = store.mutation(
'chalice.app', Chalice(app_name=appname))
project_dir = os.path.dirname(__file__)
conffile = os.path.join(
project_dir, 'chalicelib', 'env', f'{stage}.yaml')
store.load_config(conffile)
authorizer = store.mutation(
'chalice.authorizer',
CognitoUserPoolAuthorizer(
'MyUserPool', provider_arns=[store.conf('UserPoolARN')])
)
# local の場合のみ異なる Origin からのリクエストになるため CORS 設定が必要
if store.is_local():
cors = CORSConfig(
allow_origin=store.conf('FrontUrl'),
allow_headers=['CognitoAccessToken'],
allow_credentials=True
)
else:
cors = None
store.mutation('chalice.cors', cors)
# AWS boto3 client 初期化
store.mutation(
'aws.session',
aws.create_session(store.conf('Profile'), store.conf('Region')))
store.mutation(
'aws.cognito-idp', store.get('aws.session').client('cognito-idp'))
# モジュール別のルーティングを追加
from chalicelib.routes import auth, example # noqa
| 22.433333
| 70
| 0.704309
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 646
| 0.423885
|
9aa815cea217ed0284d392142fbc2dadb16b41d8
| 2,186
|
py
|
Python
|
examples/plotting/plot_with_matplotlib.py
|
crzdg/acconeer-python-exploration
|
26c16a3164199c58fe2940fe7050664d0d0e1161
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
examples/plotting/plot_with_matplotlib.py
|
crzdg/acconeer-python-exploration
|
26c16a3164199c58fe2940fe7050664d0d0e1161
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
examples/plotting/plot_with_matplotlib.py
|
crzdg/acconeer-python-exploration
|
26c16a3164199c58fe2940fe7050664d0d0e1161
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
from acconeer.exptool import configs, utils
from acconeer.exptool.clients import SocketClient, SPIClient, UARTClient
def main():
args = utils.ExampleArgumentParser(num_sens=1).parse_args()
utils.config_logging(args)
if args.socket_addr:
client = SocketClient(args.socket_addr)
elif args.spi:
client = SPIClient()
else:
port = args.serial_port or utils.autodetect_serial_port()
client = UARTClient(port)
config = configs.IQServiceConfig()
config.sensor = args.sensors
config.update_rate = 10
session_info = client.setup_session(config)
depths = utils.get_range_depths(config, session_info)
amplitude_y_max = 1000
fig, (amplitude_ax, phase_ax) = plt.subplots(2)
fig.set_size_inches(8, 6)
fig.canvas.set_window_title("Acconeer matplotlib example")
for ax in [amplitude_ax, phase_ax]:
ax.set_xlabel("Depth (m)")
ax.set_xlim(config.range_interval)
ax.grid(True)
amplitude_ax.set_ylabel("Amplitude")
amplitude_ax.set_ylim(0, 1.1 * amplitude_y_max)
phase_ax.set_ylabel("Phase")
utils.mpl_setup_yaxis_for_phase(phase_ax)
amplitude_line = amplitude_ax.plot(depths, np.zeros_like(depths))[0]
phase_line = phase_ax.plot(depths, np.zeros_like(depths))[0]
fig.tight_layout()
plt.ion()
plt.show()
interrupt_handler = utils.ExampleInterruptHandler()
print("Press Ctrl-C to end session")
client.start_session()
while not interrupt_handler.got_signal:
info, data = client.get_next()
amplitude = np.abs(data)
phase = np.angle(data)
max_amplitude = np.max(amplitude)
if max_amplitude > amplitude_y_max:
amplitude_y_max = max_amplitude
amplitude_ax.set_ylim(0, 1.1 * max_amplitude)
amplitude_line.set_ydata(amplitude)
phase_line.set_ydata(phase)
if not plt.fignum_exists(1): # Simple way to check if plot is closed
break
fig.canvas.flush_events()
print("Disconnecting...")
plt.close()
client.disconnect()
if __name__ == "__main__":
main()
| 26.987654
| 77
| 0.682068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 154
| 0.070448
|
9aa888a27862f3097e55339b5958acdbaec12723
| 437
|
py
|
Python
|
kryptobot/bots/multi_bot.py
|
eristoddle/Kryptobot
|
d0c3050a1c924125810946530670c19b2de72d3f
|
[
"Apache-2.0"
] | 24
|
2018-05-29T13:44:36.000Z
|
2022-03-12T20:41:45.000Z
|
kryptobot/bots/multi_bot.py
|
eristoddle/Kryptobot
|
d0c3050a1c924125810946530670c19b2de72d3f
|
[
"Apache-2.0"
] | 23
|
2018-07-08T02:31:18.000Z
|
2020-06-02T04:07:49.000Z
|
kryptobot/bots/multi_bot.py
|
eristoddle/Kryptobot
|
d0c3050a1c924125810946530670c19b2de72d3f
|
[
"Apache-2.0"
] | 14
|
2018-08-10T15:44:27.000Z
|
2021-06-14T07:14:52.000Z
|
from .bot import Bot
class MultiBot(Bot):
strategies = []
def __init__(self, strategies, config=None):
super().__init__(strategy=None, config=config)
self.strategies = strategies
# override this to inherit
def __start(self):
for st in self.strategies:
st.add_session(self.session)
st.add_keys(self.config['apis'])
st.run_simulation()
st.start()
| 24.277778
| 54
| 0.606407
| 414
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 0.073227
|
9aa8e28e915cdb48539530ca48ffdc1fa280bc82
| 140
|
py
|
Python
|
setup.py
|
adrienbrunet/mixt
|
d725ec752ce430d135e993bc988bfdf2b8457c4b
|
[
"MIT"
] | 27
|
2018-06-04T19:11:42.000Z
|
2022-02-23T22:46:39.000Z
|
setup.py
|
adrienbrunet/mixt
|
d725ec752ce430d135e993bc988bfdf2b8457c4b
|
[
"MIT"
] | 7
|
2018-06-09T15:27:51.000Z
|
2021-03-11T20:00:35.000Z
|
setup.py
|
adrienbrunet/mixt
|
d725ec752ce430d135e993bc988bfdf2b8457c4b
|
[
"MIT"
] | 3
|
2018-07-29T10:20:02.000Z
|
2021-11-18T19:55:07.000Z
|
#!/usr/bin/env python
"""Setup file for the ``mixt`` module. Configuration is in ``setup.cfg``."""
from setuptools import setup
setup()
| 15.555556
| 76
| 0.678571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 97
| 0.692857
|
9aa920f4f30751f1feef1f340c733399005558c4
| 1,235
|
py
|
Python
|
venv/lib/python3.9/site-packages/py2app/recipes/PIL/prescript.py
|
dequeb/asmbattle
|
27e8b209de5787836e288a2f2f9b7644ce07563e
|
[
"MIT"
] | 193
|
2020-01-15T09:34:20.000Z
|
2022-03-18T19:14:16.000Z
|
venv/lib/python3.9/site-packages/py2app/recipes/PIL/prescript.py
|
dequeb/asmbattle
|
27e8b209de5787836e288a2f2f9b7644ce07563e
|
[
"MIT"
] | 185
|
2020-01-15T08:38:27.000Z
|
2022-03-27T17:29:29.000Z
|
venv/lib/python3.9/site-packages/py2app/recipes/PIL/prescript.py
|
dequeb/asmbattle
|
27e8b209de5787836e288a2f2f9b7644ce07563e
|
[
"MIT"
] | 23
|
2020-01-24T14:47:18.000Z
|
2022-02-22T17:19:47.000Z
|
def _recipes_pil_prescript(plugins):
try:
import Image
have_PIL = False
except ImportError:
from PIL import Image
have_PIL = True
import sys
def init():
if Image._initialized >= 2:
return
if have_PIL:
try:
import PIL.JpegPresets
sys.modules["JpegPresets"] = PIL.JpegPresets
except ImportError:
pass
for plugin in plugins:
try:
if have_PIL:
try:
# First try absolute import through PIL (for
# Pillow support) only then try relative imports
m = __import__("PIL." + plugin, globals(), locals(), [])
m = getattr(m, plugin)
sys.modules[plugin] = m
continue
except ImportError:
pass
__import__(plugin, globals(), locals(), [])
except ImportError:
print("Image: failed to import")
if Image.OPEN or Image.SAVE:
Image._initialized = 2
return 1
Image.init = init
| 26.276596
| 80
| 0.460729
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 136
| 0.110121
|
9aa95eb6fe52df130917d5af87f7b5c65c75b243
| 691
|
py
|
Python
|
app/accounts/views/user_type.py
|
phessabi/eshop
|
6a5352753a0c27f9c3f0eda6eec696f49ef4a8eb
|
[
"Apache-2.0"
] | 1
|
2020-02-04T21:18:31.000Z
|
2020-02-04T21:18:31.000Z
|
app/accounts/views/user_type.py
|
phessabi/eshop
|
6a5352753a0c27f9c3f0eda6eec696f49ef4a8eb
|
[
"Apache-2.0"
] | 12
|
2020-01-01T11:46:33.000Z
|
2022-03-12T00:10:01.000Z
|
app/accounts/views/user_type.py
|
phessabi/eshop
|
6a5352753a0c27f9c3f0eda6eec696f49ef4a8eb
|
[
"Apache-2.0"
] | 1
|
2020-02-18T11:12:48.000Z
|
2020-02-18T11:12:48.000Z
|
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
class GetTypeView(APIView):
permission_classes = [IsAuthenticated]
def get(self, request):
user = request.user
if hasattr(user, 'vendor'):
type = 'vendor'
name = user.vendor.name
elif hasattr(user, 'buyer'):
type = 'buyer'
name = user.buyer.name
else:
type = 'admin'
name = user.username
data = {
'name': name,
'type': type,
'username': user.username
}
return Response(data)
| 26.576923
| 54
| 0.570188
| 547
| 0.791606
| 0
| 0
| 0
| 0
| 0
| 0
| 59
| 0.085384
|
9aa976fa66600077fd0293cccc1c6dcd3ade5f91
| 9,390
|
py
|
Python
|
Statistical Thinking in Python (Part 1)/Thinking_probabilistically--_Discrete_variables.py
|
shreejitverma/Data-Scientist
|
03c06936e957f93182bb18362b01383e5775ffb1
|
[
"MIT"
] | 2
|
2022-03-12T04:53:03.000Z
|
2022-03-27T12:39:21.000Z
|
Statistical Thinking in Python (Part 1)/Thinking_probabilistically--_Discrete_variables.py
|
shivaniverma1/Data-Scientist
|
f82939a411484311171465591455880c8e354750
|
[
"MIT"
] | null | null | null |
Statistical Thinking in Python (Part 1)/Thinking_probabilistically--_Discrete_variables.py
|
shivaniverma1/Data-Scientist
|
f82939a411484311171465591455880c8e354750
|
[
"MIT"
] | 2
|
2022-03-12T04:52:21.000Z
|
2022-03-27T12:45:32.000Z
|
# Thinking probabilistically-- Discrete variables!!
# Statistical inference rests upon probability. Because we can very rarely say anything meaningful with absolute certainty from data, we use probabilistic language to make quantitative statements about data. In this chapter, you will learn how to think probabilistically about discrete quantities: those that can only take certain values, like integers.
# Generating random numbers using the np.random module
# We will be hammering the np.random module for the rest of this course and its sequel. Actually, you will probably call functions from this module more than any other while wearing your hacker statistician hat. Let's start by taking its simplest function, np.random.random() for a test spin. The function returns a random number between zero and one. Call np.random.random() a few times in the IPython shell. You should see numbers jumping around between zero and one.
# In this exercise, we'll generate lots of random numbers between zero and one, and then plot a histogram of the results. If the numbers are truly random, all bars in the histogram should be of (close to) equal height.
# You may have noticed that, in the video, Justin generated 4 random numbers by passing the keyword argument size=4 to np.random.random(). Such an approach is more efficient than a for loop: in this exercise, however, you will write a for loop to experience hacker statistics as the practice of repeating an experiment over and over again.
# Seed the random number generator
np.random.seed(42)
# Initialize random numbers: random_numbers
random_numbers = np.empty(100000)
# Generate random numbers by looping over range(100000)
for i in range(100000):
random_numbers[i] = np.random.random()
# Plot a histogram
_ = plt.hist(random_numbers)
# Show the plot
plt.show()
# The np.random module and Bernoulli trials
# You can think of a Bernoulli trial as a flip of a possibly biased coin. Specifically, each coin flip has a probability p of landing heads (success) and probability 1−p of landing tails (failure). In this exercise, you will write a function to perform n Bernoulli trials, perform_bernoulli_trials(n, p), which returns the number of successes out of n Bernoulli trials, each of which has probability p of success. To perform each Bernoulli trial, use the np.random.random() function, which returns a random number between zero and one.
def perform_bernoulli_trials(n, p):
"""Perform n Bernoulli trials with success probability p
and return number of successes."""
# Initialize number of successes: n_success
n_success = 0
# Perform trials
for i in range(n):
# Choose random number between zero and one: random_number
random_number = np.random.random()
# If less than p, it's a success so add one to n_success
if random_number< p:
n_success +=1
return n_success
# How many defaults might we expect?
# Let's say a bank made 100 mortgage loans. It is possible that anywhere between 0 and 100 of the loans will be defaulted upon. You would like to know the probability of getting a given number of defaults, given that the probability of a default is p = 0.05. To investigate this, you will do a simulation. You will perform 100 Bernoulli trials using the perform_bernoulli_trials() function you wrote in the previous exercise and record how many defaults we get. Here, a success is a default. (Remember that the word "success" just means that the Bernoulli trial evaluates to True, i.e., did the loan recipient default?) You will do this for another 100 Bernoulli trials. And again and again until we have tried it 1000 times. Then, you will plot a histogram describing the probability of the number of defaults.
# Seed random number generator
np.random.seed(42)
# Initialize the number of defaults: n_defaults
n_defaults = np.empty(1000)
# Compute the number of defaults
for i in range(1000):
n_defaults[i] = perform_bernoulli_trials(100,0.05)
# Plot the histogram with default number of bins; label your axes
_ = plt.hist(n_defaults, normed= True)
_ = plt.xlabel('number of defaults out of 100 loans')
_ = plt.ylabel('probability')
# Show the plot
plt.show()
# Will the bank fail?
# Plot the number of defaults you got from the previous exercise, in your namespace as n_defaults, as a CDF. The ecdf() function you wrote in the first chapter is available.
# If interest rates are such that the bank will lose money if 10 or more of its loans are defaulted upon, what is the probability that the bank will lose money?
# Compute ECDF: x, y
x, y= ecdf(n_defaults)
# Plot the ECDF with labeled axes
plt.plot(x, y, marker = '.', linestyle ='none')
plt.xlabel('loans')
plt.ylabel('interest')
# Show the plot
plt.show()
# Compute the number of 100-loan simulations with 10 or more defaults: n_lose_money
n_lose_money=sum(n_defaults >=10)
# Compute and print probability of losing money
print('Probability of losing money =', n_lose_money / len(n_defaults))
# Sampling out of the Binomial distribution
# Compute the probability mass function for the number of defaults we would expect for 100 loans as in the last section, but instead of simulating all of the Bernoulli trials, perform the sampling using np.random.binomial(). This is identical to the calculation you did in the last set of exercises using your custom-written perform_bernoulli_trials() function, but far more computationally efficient. Given this extra efficiency, we will take 10,000 samples instead of 1000. After taking the samples, plot the CDF as last time. This CDF that you are plotting is that of the Binomial distribution.
# Note: For this exercise and all going forward, the random number generator is pre-seeded for you (with np.random.seed(42)) to save you typing that each time.
# Take 10,000 samples out of the binomial distribution: n_defaults
n_defaults = np.random.binomial(100,0.05,size = 10000)
# Compute CDF: x, y
x, y = ecdf(n_defaults)
# Plot the CDF with axis labels
plt.plot(x,y, marker ='.', linestyle = 'none')
plt.xlabel("Number of Defaults")
plt.ylabel("CDF")
# Show the plot
plt.show()
# Plotting the Binomial PMF
# As mentioned in the video, plotting a nice looking PMF requires a bit of matplotlib trickery that we will not go into here. Instead, we will plot the PMF of the Binomial distribution as a histogram with skills you have already learned. The trick is setting up the edges of the bins to pass to plt.hist() via the bins keyword argument. We want the bins centered on the integers. So, the edges of the bins should be -0.5, 0.5, 1.5, 2.5, ... up to max(n_defaults) + 1.5. You can generate an array like this using np.arange() and then subtracting 0.5 from the array.
# You have already sampled out of the Binomial distribution during your exercises on loan defaults, and the resulting samples are in the NumPy array n_defaults.
# Compute bin edges: bins
bins = np.arange(0, max(n_defaults) + 1.5) - 0.5
# Generate histogram
plt.hist(n_defaults, normed = True, bins = bins)
# Label axes
plt.xlabel('Defaults')
plt.ylabel('PMF')
# Show the plot
plt.show()
# Relationship between Binomial and Poisson distributions
# You just heard that the Poisson distribution is a limit of the Binomial distribution for rare events. This makes sense if you think about the stories. Say we do a Bernoulli trial every minute for an hour, each with a success probability of 0.1. We would do 60 trials, and the number of successes is Binomially distributed, and we would expect to get about 6 successes. This is just like the Poisson story we discussed in the video, where we get on average 6 hits on a website per hour. So, the Poisson distribution with arrival rate equal to np approximates a Binomial distribution for n Bernoulli trials with probability p of success (with n large and p small). Importantly, the Poisson distribution is often simpler to work with because it has only one parameter instead of two for the Binomial distribution.
# Let's explore these two distributions computationally. You will compute the mean and standard deviation of samples from a Poisson distribution with an arrival rate of 10. Then, you will compute the mean and standard deviation of samples from a Binomial distribution with parameters n and p such that np=10.
# Draw 10,000 samples out of Poisson distribution: samples_poisson
# Print the mean and standard deviation
print('Poisson: ', np.mean(samples_poisson),
np.std(samples_poisson))
# Specify values of n and p to consider for Binomial: n, p
# Draw 10,000 samples for each n,p pair: samples_binomial
for i in range(3):
samples_binomial = ____
# Print results
print('n =', n[i], 'Binom:', np.mean(samples_binomial),
np.std(samples_binomial))
# Was 2015 anomalous?
# 1990 and 2015 featured the most no-hitters of any season of baseball (there were seven). Given that there are on average 251/115 no-hitters per season, what is the probability of having seven or more in a season?
# Draw 10,000 samples out of Poisson distribution: n_nohitters
# Compute number of samples that are seven or greater: n_large
n_large = np.sum(____)
# Compute probability of getting seven or more: p_large
# Print the result
print('Probability of seven or more no-hitters:', p_large)
| 47.908163
| 812
| 0.760809
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7,912
| 0.842419
|
9aacaa2c9c98de085aff50585e25fcd2964d6c96
| 1,008
|
py
|
Python
|
ml/data_engineering/ETL/extract.py
|
alexnakagawa/tools
|
b5e8c047293247c8781d44607968402f637e597e
|
[
"MIT"
] | null | null | null |
ml/data_engineering/ETL/extract.py
|
alexnakagawa/tools
|
b5e8c047293247c8781d44607968402f637e597e
|
[
"MIT"
] | null | null | null |
ml/data_engineering/ETL/extract.py
|
alexnakagawa/tools
|
b5e8c047293247c8781d44607968402f637e597e
|
[
"MIT"
] | null | null | null |
'''
This is an abstract example of Extracting in an ETL pipeline.
Inspired from the "Introduction to Data Engineering" course on Datacamp.com
Author: Alex Nakagawa
'''
import requests
# Fetch the Hackernews post
resp = requests.get("https://hacker-news.firebaseio.com/v0/item/16222426.json")
# Print the response parsed as JSON
print(resp.json())
# Assign the score of the test to post_score
post_score = resp.json()['score']
print(post_score)
# Function to extract table to a pandas DataFrame
def extract_table_to_pandas(tablename, db_engine):
query = "SELECT * FROM {}".format(tablename)
return pd.read_sql(query, db_engine)
# Connect to the database using the connection URI
connection_uri = "postgresql://repl:password@localhost:5432/pagila"
db_engine = sqlalchemy.create_engine(connection_uri)
# Extract the film table into a pandas DataFrame
extract_table_to_pandas("film", db_engine)
# Extract the customer table into a pandas DataFrame
extract_table_to_pandas("customer", db_engine)
| 30.545455
| 79
| 0.779762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 623
| 0.618056
|
9aacd4bc00b3363cbb5a9d413afa93f29eedb771
| 531
|
py
|
Python
|
python/python-algorithm-intervew/11-hash-table/29-jewels-and-stones-3.py
|
bum12ark/algorithm
|
b6e262b0c29a8b5fb551db5a177a40feebc411b4
|
[
"MIT"
] | 1
|
2022-03-06T03:49:31.000Z
|
2022-03-06T03:49:31.000Z
|
python/python-algorithm-intervew/11-hash-table/29-jewels-and-stones-3.py
|
bum12ark/algorithm
|
b6e262b0c29a8b5fb551db5a177a40feebc411b4
|
[
"MIT"
] | null | null | null |
python/python-algorithm-intervew/11-hash-table/29-jewels-and-stones-3.py
|
bum12ark/algorithm
|
b6e262b0c29a8b5fb551db5a177a40feebc411b4
|
[
"MIT"
] | null | null | null |
"""
* 보석과 돌
J는 보석이며, S는 갖고 있는 돌이다. S에는 보석이 몇 개나 있을까? 대소문자는 구분한다.
- Example 1
Input : J = "aA", S = "aAAbbbb"
Output : 3
- Example 2
Input : J = "z", S = "ZZ"
Output : 0
"""
import collections
class Solution:
# Counter로 계산 생략
def numJewelsInStones(self, J: str, S: str) -> int:
freqs = collections.Counter(S)
count = 0
for char in J:
count += freqs[char]
return count
if __name__ == '__main__':
solution = Solution()
print(solution.numJewelsInStones("aA", "aAAbbbb"))
| 19.666667
| 55
| 0.585687
| 238
| 0.386992
| 0
| 0
| 0
| 0
| 0
| 0
| 295
| 0.479675
|
9aad0121a197a064fa70a4456dc468491585ad3b
| 774
|
py
|
Python
|
migrations/versions/e1c435b9e9dc_.py
|
vipshae/todo-lister
|
ca639a3efcc243bebe132ca43c1917a28d4e83a6
|
[
"MIT"
] | null | null | null |
migrations/versions/e1c435b9e9dc_.py
|
vipshae/todo-lister
|
ca639a3efcc243bebe132ca43c1917a28d4e83a6
|
[
"MIT"
] | null | null | null |
migrations/versions/e1c435b9e9dc_.py
|
vipshae/todo-lister
|
ca639a3efcc243bebe132ca43c1917a28d4e83a6
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: e1c435b9e9dc
Revises: 2527092d6a89
Create Date: 2020-06-11 14:22:00.453626
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e1c435b9e9dc'
down_revision = '2527092d6a89'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('todolists', 'completed',
existing_type=sa.BOOLEAN(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('todolists', 'completed',
existing_type=sa.BOOLEAN(),
nullable=True)
# ### end Alembic commands ###
| 23.454545
| 65
| 0.652455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 404
| 0.521964
|
9aad26c087264dde6976cf7bacd6c4bf3d397a51
| 1,345
|
py
|
Python
|
test/test_quilted_contacts_list.py
|
cocoroutine/pyquilted
|
dd8644043deec17608e00f46e3ac4562b8879603
|
[
"MIT"
] | 1
|
2019-02-21T20:10:37.000Z
|
2019-02-21T20:10:37.000Z
|
test/test_quilted_contacts_list.py
|
cocoroutine/pyquilted
|
dd8644043deec17608e00f46e3ac4562b8879603
|
[
"MIT"
] | null | null | null |
test/test_quilted_contacts_list.py
|
cocoroutine/pyquilted
|
dd8644043deec17608e00f46e3ac4562b8879603
|
[
"MIT"
] | null | null | null |
import unittest
from pyquilted.quilted.contact import *
from pyquilted.quilted.contacts_list import ContactsList
class TestContactsList(unittest.TestCase):
def test_contact_list(self):
contacts = ContactsList()
email = EmailContact('jon.snow@winterfell.got')
phone = PhoneContact('555-123-4567')
social_dict = {"handle": "@jonsnow", "sites": ['twitter', 'instagram']}
social = SocialContact(**social_dict)
contacts.append(email)
contacts.append(phone)
contacts.append(social)
valid = [
{
'label': 'email',
'value': 'jon.snow@winterfell.got',
'icons': ['fa-envelope'],
'link': 'mailto:jon.snow@winterfell.got'
},
{
'label': 'phone',
'value': '555-123-4567',
'icons': ['fa-phone'],
'link': 'tel:+15551234567'
},
{
'label': 'social',
'value': '@jonsnow',
'icons': ['fa-twitter', 'fa-instagram'],
'link': None
}
]
self.assertEqual(contacts.serialize(), valid)
if __name__ == '__main__':
unittest.main()
| 31.27907
| 79
| 0.475093
| 1,180
| 0.877323
| 0
| 0
| 0
| 0
| 0
| 0
| 345
| 0.256506
|
9aae954a3239c945002696eff2a9d8adff07720d
| 3,110
|
py
|
Python
|
examples/python/macOS/hack_or_die.py
|
kitazaki/NORA_Badge
|
9b04a57235f0763641ffa8e90e499f141dc57570
|
[
"Apache-2.0"
] | null | null | null |
examples/python/macOS/hack_or_die.py
|
kitazaki/NORA_Badge
|
9b04a57235f0763641ffa8e90e499f141dc57570
|
[
"Apache-2.0"
] | null | null | null |
examples/python/macOS/hack_or_die.py
|
kitazaki/NORA_Badge
|
9b04a57235f0763641ffa8e90e499f141dc57570
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import time
import uuid
import Adafruit_BluefruitLE
CHARACTERISTIC_SERVICE_UUID = uuid.UUID('0000fee0-0000-1000-8000-00805f9b34fb')
CHARACTERISTIC_DATA_UUID = uuid.UUID('0000fee1-0000-1000-8000-00805f9b34fb')
provider = Adafruit_BluefruitLE.get_provider()
def main():
provider.clear_cached_data()
adapter = provider.get_default_adapter()
if not adapter.is_powered:
adapter.power_on()
print('Searching for device...')
try:
adapter.start_scan()
device = provider.find_device(service_uuids=[CHARACTERISTIC_SERVICE_UUID])
if device is None:
raise RuntimeError('Failed to find device!')
else:
print(device)
print('device: {0}'.format(device.name))
print('id: {0}'.format(device.id))
finally:
adapter.stop_scan()
print('Connecting to device...')
device.connect()
try:
print('Discovering services...')
device.discover([CHARACTERISTIC_SERVICE_UUID], [CHARACTERISTIC_DATA_UUID])
service = device.find_service(CHARACTERISTIC_SERVICE_UUID)
print('service uuid: {0}'.format(service.uuid))
data = service.find_characteristic(CHARACTERISTIC_DATA_UUID)
print('characteristic uuid: {0}'.format(data.uuid))
print('Writing Data..')
bs = bytes(range(16))
bs = b'\x77\x61\x6E\x67\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00'
data.write_value(bs)
time.sleep(0.1)
bs = b'\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
data.write_value(bs)
time.sleep(0.1)
bs = b'\x00\x00\x00\x00\x00\x00\xE1\x0C\x06\x17\x2D\x23\x00\x00\x00\x00'
data.write_value(bs)
time.sleep(0.1)
bs = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
data.write_value(bs)
time.sleep(0.1)
bs = b'\x00\x00\xc6\xc6\xc6\xfe\xc6\xc6\xc6\xc6\x00\x00\x00\xfe\xc6\xc6'
data.write_value(bs)
time.sleep(0.1)
bs = b'\xfe\xc6\xc6\xc6\xc6\x00\x00\x00\xfe\xc6\xc0\xc0\xc6\xc6\xc6\xfe'
data.write_value(bs)
time.sleep(0.1)
bs = b'\x00\x00\x00\xc6\xcc\xd8\xf0\xd8\xcc\xc6\xc6\x00\x00\x00\x00\x00'
data.write_value(bs)
time.sleep(0.1)
bs = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7c\x6c\x6c'
data.write_value(bs)
time.sleep(0.1)
bs = b'\x7c\x00\x00\x00\x00\x00\x00\x00\x6c\x78\x70\x60\x00\x00\x00\x00'
data.write_value(bs)
time.sleep(0.1)
bs = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\xce\xc6\xc6\xc6\xc6'
data.write_value(bs)
time.sleep(0.1)
bs = b'\xce\xf8\x00\x00\x00\x30\x30\x30\x30\x30\x30\x30\x30\x00\x00\x00'
data.write_value(bs)
time.sleep(0.1)
bs = b'\xfe\xc0\xc0\xfe\xc0\xc0\xc0\xfe\x00\x00\x00\x00\x00\x00\x00\x00'
data.write_value(bs)
time.sleep(3)
print('Writing done.')
finally:
device.disconnect()
provider.initialize()
provider.run_mainloop_with(main)
| 37.02381
| 82
| 0.632797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,077
| 0.346302
|
9aaec48386d244bd541a612785f13979caec8fe3
| 4,902
|
py
|
Python
|
turkish_morphology/validate_test.py
|
nogeeky/turkish-morphology
|
64881f23dad87c6f470d874030f6b5f33fe1a9eb
|
[
"Apache-2.0"
] | 157
|
2019-05-20T13:05:43.000Z
|
2022-03-23T16:36:31.000Z
|
turkish_morphology/validate_test.py
|
OrenBochman/turkish-morphology
|
8f33046722ce204ccc51739687921ab041bed254
|
[
"Apache-2.0"
] | 9
|
2019-09-11T08:17:12.000Z
|
2022-03-15T18:29:01.000Z
|
turkish_morphology/validate_test.py
|
OrenBochman/turkish-morphology
|
8f33046722ce204ccc51739687921ab041bed254
|
[
"Apache-2.0"
] | 30
|
2019-09-29T06:50:01.000Z
|
2022-03-13T15:31:10.000Z
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for turkish_morphology.validate."""
import os
from turkish_morphology import analysis_pb2
from turkish_morphology import validate
from absl.testing import absltest
from absl.testing import parameterized
from google.protobuf import text_format
_TESTDATA_DIR = "turkish_morphology/testdata"
def _read_file(path):
with open(path, "r") as f:
read = f.read()
return read
def _read_analysis(basename):
path = os.path.join(_TESTDATA_DIR, f"{basename}.pbtxt")
return text_format.Parse(_read_file(path), analysis_pb2.Analysis())
class AnalysisTest(parameterized.TestCase):
@parameterized.named_parameters([
{
"testcase_name": "SingleInflectionalGroupsWithProperFeature",
"basename": "araba_with_proper",
},
{
"testcase_name": "SingleInflectionalGroupsWithoutProperFeature",
"basename": "araba_without_proper",
},
{
"testcase_name": "MultipleInflectionalGroupsWithProperFeature",
"basename": "yasa_with_proper",
},
{
"testcase_name": "MultipleInflectionalGroupsWithoutProperFeature",
"basename": "yasa_without_proper",
},
])
def test_success(self, basename):
analysis = _read_analysis(basename)
actual = validate.analysis(analysis)
self.assertIsNone(actual)
@parameterized.named_parameters([
{
"testcase_name": "AnalysisMissingInflectionalGroups",
"basename": "invalid_empty_analysis",
"message": "Analysis is missing inflectional groups",
},
{
"testcase_name": "InflectionalGroupMissingPartOfSpeechTag",
"basename": "invalid_ig_missing_pos",
"message": "Inflectional group 2 is missing part-of-speech tag",
},
{
"testcase_name": "InflectionalGroupEmptyPartOfSpeechTag",
"basename": "invalid_ig_empty_pos",
"message": "Inflectional group 2 part-of-speech tag is empty",
},
{
"testcase_name": "FirstInflectionalGroupMissingRoot",
"basename": "invalid_first_ig_missing_root",
"message": "Inflectional group 1 is missing root",
},
{
"testcase_name": "DerivedInflectionalGroupMissingDerivation",
"basename": "invalid_derived_ig_missing_derivation",
"message": "Inflectional group 2 is missing derivational affix",
},
{
"testcase_name": "AffixMissingFeature",
"basename": "invalid_affix_missing_feature",
"message": "Affix is missing feature",
},
{
"testcase_name": "DerivationalAffixMissingMetaMorpheme",
"basename": "invalid_derivational_affix_missing_meta_morpheme",
"message": "Derivational affix is missing meta-morpheme",
},
{
"testcase_name": "DerivationalAffixEmptyMetaMorpheme",
"basename": "invalid_derivational_affix_empty_meta_morpheme",
"message": "Derivational affix meta-morpheme is empty",
},
{
"testcase_name": "FeatureMissingCategory",
"basename": "invalid_feature_missing_category",
"message": "Feature is missing category",
},
{
"testcase_name": "FeatureEmptyCategory",
"basename": "invalid_feature_empty_category",
"message": "Feature category is empty",
},
{
"testcase_name": "FeatureMissingValue",
"basename": "invalid_feature_missing_value",
"message": "Feature is missing value",
},
{
"testcase_name": "FeatureEmptyValue",
"basename": "invalid_feature_empty_value",
"message": "Feature value is empty",
},
{
"testcase_name": "RootMissingMorpheme",
"basename": "invalid_root_missing_morpheme",
"message": "Root is missing morpheme",
},
{
"testcase_name": "RootEmptyMorpheme",
"basename": "invalid_root_empty_morpheme",
"message": "Root morpheme is empty",
},
])
def test_raises_exception(self, basename, message):
analysis = _read_analysis(basename)
with self.assertRaisesRegexp(validate.IllformedAnalysisError, message):
validate.analysis(analysis)
if __name__ == "__main__":
absltest.main()
| 33.346939
| 76
| 0.659935
| 3,695
| 0.753774
| 0
| 0
| 3,644
| 0.74337
| 0
| 0
| 2,908
| 0.593227
|
9aaf20b86321deb4ac2d2c3951af5c3c52764470
| 115
|
py
|
Python
|
rplint/__main__.py
|
lpozo/rplint
|
907cb5342827b2c38e79721bc2dc99b3b6f7912b
|
[
"MIT"
] | 7
|
2020-09-10T15:39:07.000Z
|
2021-02-15T17:45:04.000Z
|
rplint/__main__.py
|
lpozo/rplint
|
907cb5342827b2c38e79721bc2dc99b3b6f7912b
|
[
"MIT"
] | 6
|
2020-11-11T02:42:37.000Z
|
2021-03-17T01:00:27.000Z
|
rplint/__main__.py
|
lpozo/rplint
|
907cb5342827b2c38e79721bc2dc99b3b6f7912b
|
[
"MIT"
] | 3
|
2020-11-11T02:10:22.000Z
|
2020-12-12T01:02:29.000Z
|
#!/usr/bin/env python3
from .main import rplint
if __name__ == "__main__":
rplint.main(prog_name=__package__)
| 19.166667
| 38
| 0.730435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 0.278261
|
9ab1353597b9195d65b8c371888b502f56866647
| 3,368
|
py
|
Python
|
physicspy/optics/jones.py
|
suyag/physicspy
|
f2b29a72cb08b1de170274b3e35c3d8eda32f9e1
|
[
"MIT"
] | null | null | null |
physicspy/optics/jones.py
|
suyag/physicspy
|
f2b29a72cb08b1de170274b3e35c3d8eda32f9e1
|
[
"MIT"
] | null | null | null |
physicspy/optics/jones.py
|
suyag/physicspy
|
f2b29a72cb08b1de170274b3e35c3d8eda32f9e1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import division
from numpy import sqrt, cos, sin, arctan, exp, abs, pi, conj
from scipy import array, dot, sum
class JonesVector:
""" A Jones vector class to represent polarized EM waves """
def __init__(self,Jarray=array([1,0])):
self.Jx = Jarray[0]
self.Jy = Jarray[1]
def size(self):
""" Jones vector size """
return sqrt(dot(self.toArray().conj(),self.toArray()).real)
def normalize(self):
""" Normalized Jones vector """
result = self
try:
size = result.size()
if size == 0:
raise Exception('Zero-sized Jones vector cannot be normalized')
result.Jx /= size
result.Jy /= size
except Exception as inst:
print "Error: ",inst
finally:
return result
def toArray(self):
""" Convert into array format """
return array([self.Jx, self.Jy])
def rotate(self,phi):
""" Rotated Jones vector
Argument:
phi - rotation angle in radians (clockwise is positive)
"""
R = array([[cos(phi), sin(phi)], \
[-sin(phi), cos(phi)]])
return JonesVector(dot(R, self.toArray()))
def waveplate(self,G):
""" Waveplate with arbitrary retardance
Slow axis (or "c axis") is along X
Argument:
G - retartandance in phase units
(e.g. one wavelength retardance is G = 2 * pi)
"""
W0 = array([[exp(-1j*G/2), 0], \
[0, exp(1j*G/2)]])
return JonesVector(dot(W0, self.toArray()))
def waveplateRot(self,phi,G):
""" Waveplate matrix with arbitrary rotation
Arguments:
phi - rotation angle in radians
(clockwise is positive)
G - retardance in phase units
(e.g. one wavelength retardance is G = 2 * pi)
"""
return self.rotate(phi).waveplate(G).rotate(-phi)
def pol(self,phi):
""" Polarizer matrix """
P = array([[cos(phi)**2, cos(phi)*sin(phi)], \
[sin(phi)*cos(phi), sin(phi)**2]])
return JonesVector(dot(P, self.toArray()))
def mirrormetal(self,n,k,th):
""" Reflection off a metal mirror
Incoming and reflected beams are assumed to be in the X plane
"""
dr = mphase(n,k,th);
W0 = array([[dr[3]*exp(-1j*dr[1]), 0],\
[0, dr[2]*exp(-1j*dr[0])]])
return JonesVector(dot(W0, self.toArray()))
def intensity(self):
""" Intensity from electric field vector """
return real(self.Jx)**2 + real(self.Jy)**2
def mphase(n,k,th):
""" Calculate phase shift and reflectance of a metal in the s and p directions"""
u = sqrt(0.5 *((n**2 - k**2 - sin(th)**2) + sqrt( (n**2 - k**2 - sin(th)**2)**2 + 4*n**2*k**2 )))
v = sqrt(0.5*(-(n**2 - k**2 - sin(th)**2) + sqrt( (n**2 - k**2 - sin(th)**2)**2 + 4*n**2*k**2 )))
ds = arctan(2*v*cos(th)/(u**2+v**2-cos(th)**2));
dp = arctan(2*v*cos(th)*(n**2-k**2-2*u**2)/(u**2+v**2-(n**2+k**2)**2*cos(th)**2));
if(dp < 0):
dp = dp+pi;
rs = abs((cos(th) - (u+v*1j))/(cos(th) + (u+v*1j)))
rp = abs(((n**2 + k**2)*cos(th) - (u+v*1j))/((n**2 + k**2)*cos(th) + (u+v*1j)));
return array([ds, dp, rs, rp])
| 34.367347
| 101
| 0.518705
| 2,551
| 0.757423
| 0
| 0
| 0
| 0
| 0
| 0
| 1,101
| 0.3269
|
9ab5d8227882ea8202fdc93b49f22e935bbc0e93
| 2,560
|
py
|
Python
|
aiida/cmdline/params/options/config.py
|
louisponet/aiida-core
|
3214236df66a3792ee57fe38a06c0c3bb65861ab
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2020-10-01T17:11:58.000Z
|
2020-10-01T17:11:58.000Z
|
aiida/cmdline/params/options/config.py
|
louisponet/aiida-core
|
3214236df66a3792ee57fe38a06c0c3bb65861ab
|
[
"MIT",
"BSD-3-Clause"
] | 17
|
2020-03-11T17:04:05.000Z
|
2020-05-01T09:34:45.000Z
|
aiida/cmdline/params/options/config.py
|
louisponet/aiida-core
|
3214236df66a3792ee57fe38a06c0c3bb65861ab
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=cyclic-import
"""
.. py:module::config
:synopsis: Convenience class for configuration file option
"""
import click_config_file
import yaml
from .overridable import OverridableOption
def yaml_config_file_provider(handle, cmd_name): # pylint: disable=unused-argument
"""Read yaml config file from file handle."""
return yaml.safe_load(handle)
class ConfigFileOption(OverridableOption):
"""
Wrapper around click_config_file.configuration_option that increases reusability.
Example::
CONFIG_FILE = ConfigFileOption('--config', help='A configuration file')
@click.command()
@click.option('computer_name')
@CONFIG_FILE(help='Configuration file for computer_setup')
def computer_setup(computer_name):
click.echo(f"Setting up computer {computername}")
computer_setup --config config.yml
with config.yml::
---
computer_name: computer1
"""
def __init__(self, *args, **kwargs):
"""
Store the default args and kwargs.
:param args: default arguments to be used for the option
:param kwargs: default keyword arguments to be used that can be overridden in the call
"""
kwargs.update({'provider': yaml_config_file_provider, 'implicit': False})
super().__init__(*args, **kwargs)
def __call__(self, **kwargs):
"""
Override the stored kwargs, (ignoring args as we do not allow option name changes) and return the option.
:param kwargs: keyword arguments that will override those set in the construction
:return: click_config_file.configuration_option constructed with args and kwargs defined during construction
and call of this instance
"""
kw_copy = self.kwargs.copy()
kw_copy.update(kwargs)
return click_config_file.configuration_option(*self.args, **kw_copy)
| 36.056338
| 116
| 0.605078
| 1,550
| 0.605469
| 0
| 0
| 0
| 0
| 0
| 0
| 1,981
| 0.773828
|
9ab6d13a500341cc43c1e83dfab97d3f76d1b8d3
| 460
|
py
|
Python
|
vaccine_feed_ingest/runners/ct/state/parse.py
|
jeremyschlatter/vaccine-feed-ingest
|
215f6c144fe5220deaccdb5db3e96f28b7077b3f
|
[
"MIT"
] | 27
|
2021-04-24T02:11:18.000Z
|
2021-05-17T00:54:45.000Z
|
vaccine_feed_ingest/runners/ct/state/parse.py
|
jeremyschlatter/vaccine-feed-ingest
|
215f6c144fe5220deaccdb5db3e96f28b7077b3f
|
[
"MIT"
] | 574
|
2021-04-06T18:09:11.000Z
|
2021-08-30T07:55:06.000Z
|
vaccine_feed_ingest/runners/ct/state/parse.py
|
jeremyschlatter/vaccine-feed-ingest
|
215f6c144fe5220deaccdb5db3e96f28b7077b3f
|
[
"MIT"
] | 47
|
2021-04-23T05:31:14.000Z
|
2021-07-01T20:22:46.000Z
|
#!/usr/bin/env python
import json
import pathlib
import sys
input_dir = pathlib.Path(sys.argv[2])
output_dir = pathlib.Path(sys.argv[1])
output_file = output_dir / "data.parsed.ndjson"
results = []
for input_file in input_dir.glob("data.raw.*.json"):
with input_file.open() as fin:
results.extend(json.load(fin)["results"])
with output_file.open("w") as fout:
for result in results:
json.dump(result, fout)
fout.write("\n")
| 23
| 52
| 0.680435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 74
| 0.16087
|
9ab9d917b353cf0f8ea3e285cac62732af59e404
| 563
|
py
|
Python
|
python_learning/exception_redefinition.py
|
KonstantinKlepikov/all-python-ml-learning
|
a8a41347b548828bb8531ccdab89c622a0be20e1
|
[
"MIT"
] | null | null | null |
python_learning/exception_redefinition.py
|
KonstantinKlepikov/all-python-ml-learning
|
a8a41347b548828bb8531ccdab89c622a0be20e1
|
[
"MIT"
] | null | null | null |
python_learning/exception_redefinition.py
|
KonstantinKlepikov/all-python-ml-learning
|
a8a41347b548828bb8531ccdab89c622a0be20e1
|
[
"MIT"
] | 1
|
2020-12-23T19:32:51.000Z
|
2020-12-23T19:32:51.000Z
|
# example of redefinition __repr__ and __str__ of exception
class MyBad(Exception):
def __str__(self):
return 'My mistake!'
class MyBad2(Exception):
def __repr__(self):
return 'Not calable' # because buid-in method has __str__
try:
raise MyBad('spam')
except MyBad as X:
print(X) # My mistake!
print(X.args) # ('spam',)
try:
raise MyBad2('spam')
except MyBad2 as X:
print(X) # spam
print(X.args) # ('spam',)
raise MyBad('spam') # __main__.MyBad2: My mistake!
# raise MyBad2('spam') # __main__.MyBad2: spam
| 20.107143
| 65
| 0.648313
| 191
| 0.339254
| 0
| 0
| 0
| 0
| 0
| 0
| 257
| 0.456483
|
9abaab450ac2ca5229b853ff9168c5720ce319bf
| 7,998
|
py
|
Python
|
difPy/dif.py
|
ppizarror/Duplicate-Image-Finder
|
371d70454531d1407b06d98f3e3bdc5e3fc03f49
|
[
"MIT"
] | null | null | null |
difPy/dif.py
|
ppizarror/Duplicate-Image-Finder
|
371d70454531d1407b06d98f3e3bdc5e3fc03f49
|
[
"MIT"
] | null | null | null |
difPy/dif.py
|
ppizarror/Duplicate-Image-Finder
|
371d70454531d1407b06d98f3e3bdc5e3fc03f49
|
[
"MIT"
] | null | null | null |
import skimage.color
import matplotlib.pyplot as plt
import numpy as np
import cv2
import os
import imghdr
import time
"""
Duplicate Image Finder (DIF): function that searches a given directory for images and finds duplicate/similar images among them.
Outputs the number of found duplicate/similar image pairs with a list of the filenames having lower resolution.
"""
class dif:
def compare_images(directory, show_imgs=False, similarity="normal", px_size=50, delete=False):
"""
directory (str)......folder path to search for duplicate/similar images
show_imgs (bool).....False = omits the output and doesn't show found images
True = shows duplicate/similar images found in output
similarity (str)....."normal" = searches for duplicates, recommended setting, MSE < 200
"high" = serached for exact duplicates, extremly sensitive to details, MSE < 0.1
"low" = searches for similar images, MSE < 1000
px_size (int)........recommended not to change default value
resize images to px_size height x width (in pixels) before being compared
the higher the pixel size, the more computational ressources and time required
delete (bool)........! please use with care, as this cannot be undone
lower resolution duplicate images that were found are automatically deleted
OUTPUT (set).........a set of the filenames of the lower resolution duplicate images
"""
# list where the found duplicate/similar images are stored
duplicates = []
lower_res = []
imgs_matrix = dif.create_imgs_matrix(directory, px_size)
# search for similar images, MSE < 1000
if similarity == "low":
ref = 1000
# search for exact duplicate images, extremly sensitive, MSE < 0.1
elif similarity == "high":
ref = 0.1
# normal, search for duplicates, recommended, MSE < 200
else:
ref = 200
main_img = 0
compared_img = 1
nrows, ncols = px_size, px_size
srow_A = 0
erow_A = nrows
srow_B = erow_A
erow_B = srow_B + nrows
while erow_B <= imgs_matrix.shape[0]:
while compared_img < (len(image_files)):
# select two images from imgs_matrix
imgA = imgs_matrix[srow_A: erow_A, # rows
0: ncols] # columns
imgB = imgs_matrix[srow_B: erow_B, # rows
0: ncols] # columns
# compare the images
rotations = 0
while image_files[main_img] not in duplicates and rotations <= 3:
if rotations != 0:
imgB = dif.rotate_img(imgB)
err = dif.mse(imgA, imgB)
if err < ref:
if show_imgs:
dif.show_img_figs(imgA, imgB, err)
dif.show_file_info(compared_img, main_img)
dif.add_to_list(image_files[main_img], duplicates)
dif.check_img_quality(directory, image_files[main_img], image_files[compared_img], lower_res)
rotations += 1
srow_B += nrows
erow_B += nrows
compared_img += 1
srow_A += nrows
erow_A += nrows
srow_B = erow_A
erow_B = srow_B + nrows
main_img += 1
compared_img = main_img + 1
msg = "\n***\nFound " + str(len(duplicates)) + " duplicate image pairs in " + str(
len(image_files)) + " total images.\n\nThe following files have lower resolution:"
print(msg)
print(lower_res, "\n")
time.sleep(0.5)
if delete:
usr = input("Are you sure you want to delete all lower resolution duplicate images? (y/n)")
if str(usr) == "y":
dif.delete_imgs(directory, set(lower_res))
else:
print("Image deletion canceled.")
return set(lower_res)
else:
return set(lower_res)
def _process_directory(directory):
directory += os.sep
if not os.path.isdir(directory):
raise FileNotFoundError(f"Directory: " + directory + " does not exist")
return directory
# Function that searches the folder for image files, converts them to a matrix
def create_imgs_matrix(directory, px_size):
directory = dif._process_directory(directory)
global image_files
image_files = []
# create list of all files in directory
folder_files = [filename for filename in os.listdir(directory)]
# create images matrix
counter = 0
for filename in folder_files:
if not os.path.isdir(directory + filename) and imghdr.what(directory + filename):
img = cv2.imdecode(np.fromfile(directory + filename, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
if type(img) == np.ndarray:
img = img[..., 0:3]
img = cv2.resize(img, dsize=(px_size, px_size), interpolation=cv2.INTER_CUBIC)
if len(img.shape) == 2:
img = skimage.color.gray2rgb(img)
if counter == 0:
imgs_matrix = img
image_files.append(filename)
counter += 1
else:
imgs_matrix = np.concatenate((imgs_matrix, img))
image_files.append(filename)
return imgs_matrix
# Function that calulates the mean squared error (mse) between two image matrices
def mse(imageA, imageB):
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
return err
# Function that plots two compared image files and their mse
def show_img_figs(imageA, imageB, err):
fig = plt.figure()
plt.suptitle("MSE: %.2f" % (err))
# plot first image
ax = fig.add_subplot(1, 2, 1)
plt.imshow(imageA, cmap=plt.cm.gray)
plt.axis("off")
# plot second image
ax = fig.add_subplot(1, 2, 2)
plt.imshow(imageB, cmap=plt.cm.gray)
plt.axis("off")
# show the images
plt.show()
# Function for rotating an image matrix by a 90 degree angle
def rotate_img(image):
image = np.rot90(image, k=1, axes=(0, 1))
return image
# Function for printing filename info of plotted image files
def show_file_info(compared_img, main_img):
print("Duplicate file: " + image_files[main_img] + " and " + image_files[compared_img])
# Function for appending items to a list
def add_to_list(filename, list):
list.append(filename)
# Function for checking the quality of compared images, appends the lower quality image to the list
def check_img_quality(directory, imageA, imageB, list):
directory = dif._process_directory(directory)
size_imgA = os.stat(directory + imageA).st_size
size_imgB = os.stat(directory + imageB).st_size
if size_imgA > size_imgB:
dif.add_to_list(imageB, list)
else:
dif.add_to_list(imageA, list)
def delete_imgs(directory, filenames_set):
directory = dif._process_directory(directory)
print("\nDeletion in progress...")
deleted = 0
for filename in filenames_set:
try:
os.remove(directory + filename)
print("Deleted file:", filename)
deleted += 1
except:
print("Could not delete file:", filename)
print("\n***\nDeleted", deleted, "duplicates.")
| 41.65625
| 128
| 0.572893
| 7,625
| 0.953363
| 0
| 0
| 0
| 0
| 0
| 0
| 2,686
| 0.335834
|
9abc03c9cf82f6250f6e274347a435222a3060a0
| 1,572
|
py
|
Python
|
minmax.py
|
jeffmorais/estrutura-de-dados
|
e7088df4fe753af106b4642c5e147d578a466c3b
|
[
"MIT"
] | 1
|
2016-02-16T13:52:00.000Z
|
2016-02-16T13:52:00.000Z
|
minmax.py
|
jeffmorais/estrutura-de-dados
|
e7088df4fe753af106b4642c5e147d578a466c3b
|
[
"MIT"
] | null | null | null |
minmax.py
|
jeffmorais/estrutura-de-dados
|
e7088df4fe753af106b4642c5e147d578a466c3b
|
[
"MIT"
] | null | null | null |
# A função min_max deverá rodar em O(n) e o código não pode usar nenhuma
# lib do Python (sort, min, max e etc)
# Não pode usar qualquer laço (while, for), a função deve ser recursiva
# Ou delegar a solução para uma função puramente recursiva
import unittest
def bora(cont, seq, min, max):
if cont < len(seq):
if int(seq[cont]) > int(seq[cont + 1]) and int(seq[cont]) > max:
max = int(seq[cont])
if int(seq[cont]) < int(seq[cont + 1]) and int(seq[cont]) < min:
min = int(seq[cont])
cont = cont + 1
if cont == (len(seq) - 1):
if int(seq[len(seq) - 1]) > max:
max = int(seq[len(seq) - 1])
if int(seq[len(seq) - 1]) < min:
min = int(seq[len(seq) - 1])
return (min, max)
return bora(cont, seq, min, max)
def min_max(seq):
'''
:param seq: uma sequencia
:return: (min, max)
Retorna tupla cujo primeiro valor mínimo (min) é o valor
mínimo da sequencia seq.
O segundo é o valor máximo (max) da sequencia
O(n)
'''
if len(seq) == 0:
return (None, None)
if len(seq) == 1:
return seq[0], seq[0]
val = bora(0, seq, seq[0], seq[0])
return val
class MinMaxTestes(unittest.TestCase):
def test_lista_vazia(self):
self.assertTupleEqual((None, None), min_max([]))
def test_lista_len_1(self):
self.assertTupleEqual((1, 1), min_max([1]))
def test_lista_consecutivos(self):
self.assertTupleEqual((0, 500), min_max(list(range(501))))
if __name__ == '__main__':
unittest.main()
| 29.111111
| 72
| 0.588422
| 319
| 0.200629
| 0
| 0
| 0
| 0
| 0
| 0
| 482
| 0.303145
|
9abd21b74954fe3eba3090f8582e570668b4381d
| 3,927
|
py
|
Python
|
news-category-classifcation/build_vocab.py
|
lyeoni/pytorch-nlp-tutorial
|
8cc490adc6cc92d458548e0e73fbbf1db575f049
|
[
"MIT"
] | 1,433
|
2018-12-14T06:20:28.000Z
|
2022-03-31T14:12:50.000Z
|
news-category-classifcation/build_vocab.py
|
itsshaikaslam/nlp-tutorial-1
|
6e4c74e103f4cdc5e0559d987ae6e41c40e17a5a
|
[
"MIT"
] | 14
|
2019-04-03T08:30:23.000Z
|
2021-07-11T11:41:05.000Z
|
news-category-classifcation/build_vocab.py
|
itsshaikaslam/nlp-tutorial-1
|
6e4c74e103f4cdc5e0559d987ae6e41c40e17a5a
|
[
"MIT"
] | 306
|
2018-12-20T09:41:24.000Z
|
2022-03-31T05:07:14.000Z
|
import argparse
import pickle
from tokenization import Vocab, Tokenizer
TOKENIZER = ('treebank', 'mecab')
def argparser():
p = argparse.ArgumentParser()
# Required parameters
p.add_argument('--corpus', default=None, type=str, required=True)
p.add_argument('--vocab', default=None, type=str, required=True)
# Other parameters
p.add_argument('--pretrained_vectors', default=None, type=str)
p.add_argument('--is_sentence', action='store_true',
help='Whether the corpus is already split into sentences')
p.add_argument('--tokenizer', default='treebank', type=str,
help='Tokenizer used for input corpus tokenization: ' + ', '.join(TOKENIZER))
p.add_argument('--max_seq_length', default=1024, type=int,
help='The maximum total input sequence length after tokenization')
p.add_argument('--unk_token', default='<unk>', type=str,
help='The representation for any unknown token')
p.add_argument('--pad_token', default='<pad>', type=str,
help='The representation for the special token of padding token')
p.add_argument('--bos_token', default='<bos>', type=str,
help='The representation for the special token of beginning-of-sequence token')
p.add_argument('--eos_token', default='<eos>', type=str,
help='The representation for the special token of end-of-sequence token')
p.add_argument('--min_freq', default=3, type=int,
help='The minimum frequency required for a token')
p.add_argument('--lower', action='store_true',
help='Whether to convert the texts to lowercase')
config = p.parse_args()
return config
def load_pretrained(fname):
"""
Load pre-trained FastText word vectors
:param fname: text file containing the word vectors, one per line.
"""
fin = open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
n, d = map(int, fin.readline().split())
print('Loading {} word vectors(dim={})...'.format(n, d))
word2vec_dict = {}
for line in fin:
tokens = line.rstrip().split(' ')
word2vec_dict[tokens[0]] = list(map(float, tokens[1:]))
print('#pretrained_word_vectors:', len(word2vec_dict))
return word2vec_dict
if __name__=='__main__':
config = argparser()
print(config)
# Select tokenizer
config.tokenizer = config.tokenizer.lower()
if config.tokenizer==TOKENIZER[0]:
from nltk.tokenize import word_tokenize
tokenization_fn = word_tokenize
elif config.tokenizer ==TOKENIZER[1]:
from konlpy.tag import Mecab
tokenization_fn = Mecab().morphs
tokenizer = Tokenizer(tokenization_fn=tokenization_fn,
is_sentence=config.is_sentence,
max_seq_length=config.max_seq_length)
# Tokenization & read tokens
list_of_tokens = []
with open(config.corpus, 'r', encoding='-utf-8', errors='ignore') as reader:
for li, line in enumerate(reader):
text = ' '.join(line.split('\t')[1:]).strip()
list_of_tokens += tokenizer.tokenize(text)
# Build vocabulary
vocab = Vocab(list_of_tokens=list_of_tokens,
unk_token=config.unk_token,
pad_token=config.pad_token,
bos_token=config.bos_token,
eos_token=config.eos_token,
min_freq=config.min_freq,
lower=config.lower)
vocab.build()
if config.pretrained_vectors:
pretrained_vectors = load_pretrained(fname=config.pretrained_vectors)
vocab.from_pretrained(pretrained_vectors=pretrained_vectors)
print('Vocabulary size: ', len(vocab))
# Save vocabulary
with open(config.vocab, 'wb') as writer:
pickle.dump(vocab, writer)
print('Vocabulary saved to', config.vocab)
| 40.071429
| 98
| 0.638146
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,145
| 0.291571
|
9abd5d0a8f6f8a824f776810d4a5b66aeca261fa
| 650
|
py
|
Python
|
lambda-sfn-terraform/src/LambdaFunction.py
|
extremenelson/serverless-patterns
|
c307599ab2759567c581c37d70561e85b0fa8788
|
[
"MIT-0"
] | 1
|
2022-01-12T17:22:02.000Z
|
2022-01-12T17:22:02.000Z
|
lambda-sfn-terraform/src/LambdaFunction.py
|
extremenelson/serverless-patterns
|
c307599ab2759567c581c37d70561e85b0fa8788
|
[
"MIT-0"
] | null | null | null |
lambda-sfn-terraform/src/LambdaFunction.py
|
extremenelson/serverless-patterns
|
c307599ab2759567c581c37d70561e85b0fa8788
|
[
"MIT-0"
] | null | null | null |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import json
import boto3
import os
from aws_lambda_powertools import Logger
logger = Logger()
client = boto3.client('stepfunctions')
sfnArn = os.environ['SFN_ARN']
def lambda_handler(event, context):
# TODO implement
logger.info(f"Received Choice: {event['Choice']}")
response = client.start_execution(
stateMachineArn=sfnArn,
input=json.dumps(event)
)
logger.info(f"Received Response: {response}")
return {
'statusCode': 200,
'body': json.dumps(response,default=str)
}
| 23.214286
| 68
| 0.676923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 227
| 0.349231
|
9abd6d106252aee5d79f8c8f78a07cba499bc3da
| 3,068
|
py
|
Python
|
tests/encryption/aes_decrypter.py
|
dfjxs/dfvfs
|
a4154b07bb08c3c86afa2847f3224189dd80c138
|
[
"Apache-2.0"
] | 176
|
2015-01-02T13:55:39.000Z
|
2022-03-12T11:44:37.000Z
|
tests/encryption/aes_decrypter.py
|
dfjxs/dfvfs
|
a4154b07bb08c3c86afa2847f3224189dd80c138
|
[
"Apache-2.0"
] | 495
|
2015-01-13T06:47:06.000Z
|
2022-03-12T11:07:03.000Z
|
tests/encryption/aes_decrypter.py
|
dfjxs/dfvfs
|
a4154b07bb08c3c86afa2847f3224189dd80c138
|
[
"Apache-2.0"
] | 62
|
2015-02-23T08:19:38.000Z
|
2022-03-18T06:01:22.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the AES decrypter object."""
import unittest
from dfvfs.encryption import aes_decrypter
from dfvfs.lib import definitions
from tests.encryption import test_lib
class AESDecrypterTestCase(test_lib.DecrypterTestCase):
"""Tests for the AES decrypter object."""
_AES_INITIALIZATION_VECTOR = b'This is an IV456'
_AES_KEY = b'This is a key123'
def testInitialization(self):
"""Tests the initialization method."""
# Test missing arguments.
with self.assertRaises(ValueError):
aes_decrypter.AESDecrypter()
# Test unsupported block cipher mode.
with self.assertRaises(ValueError):
aes_decrypter.AESDecrypter(
cipher_mode='bogus', key=self._AES_KEY)
# Test missing initialization vector.
with self.assertRaises(ValueError):
aes_decrypter.AESDecrypter(
cipher_mode=definitions.ENCRYPTION_MODE_CBC, key=self._AES_KEY)
# Test missing initialization vector with valid block cipher mode.
aes_decrypter.AESDecrypter(
cipher_mode=definitions.ENCRYPTION_MODE_ECB, key=self._AES_KEY)
# Test incorrect key size.
with self.assertRaises(ValueError):
aes_decrypter.AESDecrypter(
cipher_mode=definitions.ENCRYPTION_MODE_ECB, key=b'Wrong key size')
# Test incorrect initialization vector type.
with self.assertRaises(TypeError):
aes_decrypter.AESDecrypter(
cipher_mode=definitions.ENCRYPTION_MODE_CBC,
initialization_vector='Wrong IV type', key=self._AES_KEY)
# Test incorrect initialization vector size.
with self.assertRaises(ValueError):
aes_decrypter.AESDecrypter(
cipher_mode=definitions.ENCRYPTION_MODE_CBC,
initialization_vector=b'Wrong IV size', key=self._AES_KEY)
def testDecrypt(self):
"""Tests the Decrypt method."""
decrypter = aes_decrypter.AESDecrypter(
cipher_mode=definitions.ENCRYPTION_MODE_CBC,
initialization_vector=self._AES_INITIALIZATION_VECTOR,
key=self._AES_KEY)
# Test full decryption.
expected_decrypted_data = b'This is secret encrypted text!!!'
decrypted_data, remaining_encrypted_data = decrypter.Decrypt(
b'2|\x7f\xd7\xff\xbay\xf9\x95?\x81\xc7\xaafV\xceB\x01\xdb8E7\xfe'
b'\x92j\xf0\x1d(\xb9\x9f\xad\x13', finalize=True)
self.assertEqual(decrypted_data, expected_decrypted_data)
self.assertEqual(remaining_encrypted_data, b'')
# Reset decrypter.
decrypter = aes_decrypter.AESDecrypter(
cipher_mode=definitions.ENCRYPTION_MODE_CBC,
initialization_vector=self._AES_INITIALIZATION_VECTOR,
key=self._AES_KEY)
# Test partial decryption.
partial_encrypted_data = (
b'2|\x7f\xd7\xff\xbay\xf9\x95?\x81\xc7\xaafV\xceB\x01\xdb8E7\xfe')
decrypted_data, remaining_encrypted_data = decrypter.Decrypt(
partial_encrypted_data)
self.assertEqual(decrypted_data, b'')
self.assertEqual(remaining_encrypted_data, partial_encrypted_data)
if __name__ == '__main__':
unittest.main()
| 33.714286
| 77
| 0.730769
| 2,796
| 0.911343
| 0
| 0
| 0
| 0
| 0
| 0
| 848
| 0.276402
|
9abfb5ca61ed6e49fce34592c1824290b02d1d23
| 4,460
|
py
|
Python
|
Crash Course on Python/WEEK 5/solutions.py
|
atharvpuranik/Google-IT-Automation-with-Python-Professional-Certificate
|
4d8fd587fa85ea4db62db6142fbb58cd9c29bb69
|
[
"MIT"
] | 42
|
2020-04-28T09:06:21.000Z
|
2022-01-09T01:01:55.000Z
|
Crash Course on Python/WEEK 5/solutions.py
|
vaquarkhan/Google-IT-Automation-with-Python-Professional-Certificate
|
d87dffe924de218f73d61d27689798646824ed6c
|
[
"MIT"
] | null | null | null |
Crash Course on Python/WEEK 5/solutions.py
|
vaquarkhan/Google-IT-Automation-with-Python-Professional-Certificate
|
d87dffe924de218f73d61d27689798646824ed6c
|
[
"MIT"
] | 52
|
2020-05-12T05:29:46.000Z
|
2022-01-26T21:24:08.000Z
|
#Q2
# “If you have an apple and I have an apple and we exchange these apples then
# you and I will still each have one apple. But if you have an idea and I have
# an idea and we exchange these ideas, then each of us will have two ideas.”
# George Bernard Shaw
class Person:
apples = 0
ideas = 0
johanna = Person()
johanna.apples = 1
johanna.ideas = 1
martin = Person()
martin.apples = 2
martin.ideas = 1
def exchange_apples(you, me):
#Here, despite G.B. Shaw's quote, our characters have started with #different amounts of apples so we can better observe the results.
#We're going to have Martin and Johanna exchange ALL their apples with #one another.
#Hint: how would you switch values of variables,
#so that "you" and "me" will exchange ALL their apples with one another?
#Do you need a temporary variable to store one of the values?
#You may need more than one line of code to do that, which is OK.
temp=you.apples
you.apples=me.apples
me.apples=temp
return you.apples, me.apples
def exchange_ideas(you, me):
#"you" and "me" will share our ideas with one another.
#What operations need to be performed, so that each object receives
#the shared number of ideas?
#Hint: how would you assign the total number of ideas to
#each idea attribute? Do you need a temporary variable to store
#the sum of ideas, or can you find another way?
#Use as many lines of code as you need here.
you.ideas =me.ideas+you.ideas
me.ideas =you.ideas
return you.ideas, me.ideas
exchange_apples(johanna, martin)
print("Johanna has {} apples and Martin has {} apples".format(johanna.apples, martin.apples))
exchange_ideas(johanna, martin)
print("Johanna has {} ideas and Martin has {} ideas".format(johanna.ideas, martin.ideas))
#Q3
# define a basic city class
class City:
name = ""
country = ""
elevation = 0
population = 0
# create a new instance of the City class and
# define each attribute
city1 = City()
city1.name = "Cusco"
city1.country = "Peru"
city1.elevation = 3399
city1.population = 358052
# create a new instance of the City class and
# define each attribute
city2 = City()
city2.name = "Sofia"
city2.country = "Bulgaria"
city2.elevation = 2290
city2.population = 1241675
# create a new instance of the City class and
# define each attribute
city3 = City()
city3.name = "Seoul"
city3.country = "South Korea"
city3.elevation = 38
city3.population = 9733509
def max_elevation_city(min_population):
# Initialize the variable that will hold
# the information of the city with
# the highest elevation
highest_elevation=0
return_city =""
# Evaluate the 1st instance to meet the requirements:
# does city #1 have at least min_population and
# is its elevation the highest evaluated so far?
if (city1.population>min_population):
if(highest_elevation<city1.elevation):
highest_elevation=city1.elevation
return_city = ("{}, {}".format(city1.name,city1.country))
# Evaluate the 2nd instance to meet the requirements:
# does city #2 have at least min_population and
# is its elevation the highest evaluated so far?
if(city2.population>min_population):
if (highest_elevation<city2.elevation):
highest_elevation=city2.elevation
return_city = ("{}, {}".format(city2.name,city2.country))
# Evaluate the 3rd instance to meet the requirements:
# does city #3 have at least min_population and
# is its elevation the highest evaluated so far?
if(city3.population>min_population):
if (highest_elevation<city3.elevation):
highest_elevation=city3.elevation
return_city = ("{}, {}".format(city3.name,city3.country))
#Format the return string
if return_city!="":
return return_city
else:
return ""
print(max_elevation_city(100000)) # Should print "Cusco, Peru"
print(max_elevation_city(1000000)) # Should print "Sofia, Bulgaria"
print(max_elevation_city(10000000)) # Should print ""
#Q5
class Furniture:
color = ""
material = ""
table = Furniture()
table.color="brown"
table.material="wood"
couch = Furniture()
couch.color="red"
couch.material="leather"
def describe_furniture(piece):
return ("This piece of furniture is made of {} {}".format(piece.color, piece.material))
print(describe_furniture(table))
# Should be "This piece of furniture is made of brown wood"
print(describe_furniture(couch))
# Should be "This piece of furniture is made of red leather"
| 31.188811
| 140
| 0.722646
| 153
| 0.034274
| 0
| 0
| 0
| 0
| 0
| 0
| 2,346
| 0.525538
|
9ac1c767370071e77aa1a0a522794a49b7886db3
| 205
|
py
|
Python
|
python/test/is_prime.test.py
|
hotate29/kyopro_lib
|
20085381372d2555439980c79887ca6b0809bb77
|
[
"MIT"
] | null | null | null |
python/test/is_prime.test.py
|
hotate29/kyopro_lib
|
20085381372d2555439980c79887ca6b0809bb77
|
[
"MIT"
] | 2
|
2020-10-13T17:02:12.000Z
|
2020-10-17T16:04:48.000Z
|
python/test/is_prime.test.py
|
hotate29/kyopro_lib
|
20085381372d2555439980c79887ca6b0809bb77
|
[
"MIT"
] | null | null | null |
# verification-helper: PROBLEM http://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=ALDS1_1_C
from python.lib.is_prime import isprime
print(sum(isprime(int(input())) for _ in range(int(input()))))
| 25.625
| 97
| 0.756098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 97
| 0.473171
|
9ac242f669af4d52c4d497c2811debd7113e2d03
| 691
|
py
|
Python
|
utils/pad.py
|
Zenodia/nativePytorch_NMT
|
bfced09eb6e5476d34619dfc0dd41d4ed610248f
|
[
"MIT"
] | 60
|
2018-09-28T07:53:11.000Z
|
2020-11-06T11:59:07.000Z
|
utils/pad.py
|
Pravin74/transformer-pytorch
|
c31e163ed57321e405771ef7fb556d4d92fd5efb
|
[
"MIT"
] | 2
|
2021-02-15T14:08:08.000Z
|
2021-09-12T12:52:37.000Z
|
utils/pad.py
|
Pravin74/transformer-pytorch
|
c31e163ed57321e405771ef7fb556d4d92fd5efb
|
[
"MIT"
] | 18
|
2018-09-28T07:56:35.000Z
|
2020-11-24T00:11:33.000Z
|
import torch
import numpy as np
PAD_TOKEN_INDEX = 0
def pad_masking(x, target_len):
# x: (batch_size, seq_len)
batch_size, seq_len = x.size()
padded_positions = x == PAD_TOKEN_INDEX # (batch_size, seq_len)
pad_mask = padded_positions.unsqueeze(1).expand(batch_size, target_len, seq_len)
return pad_mask
def subsequent_masking(x):
# x: (batch_size, seq_len - 1)
batch_size, seq_len = x.size()
subsequent_mask = np.triu(np.ones(shape=(seq_len, seq_len)), k=1).astype('uint8')
subsequent_mask = torch.tensor(subsequent_mask).to(x.device)
subsequent_mask = subsequent_mask.unsqueeze(0).expand(batch_size, seq_len, seq_len)
return subsequent_mask
| 32.904762
| 87
| 0.723589
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 86
| 0.124457
|
9ac324779be3fdadd696253340d551fc8f9b954c
| 576
|
py
|
Python
|
jesse/modes/utils.py
|
julesGoullee/jesse
|
49a1ac46715682e8a30df133ce055bf2dfdedb7d
|
[
"MIT"
] | 4
|
2021-02-23T18:23:58.000Z
|
2021-10-10T07:32:41.000Z
|
jesse/modes/utils.py
|
ArdeshirV/jesse
|
2ff415f6768f9ef7cca3e86d8f2f87988d3e7129
|
[
"MIT"
] | null | null | null |
jesse/modes/utils.py
|
ArdeshirV/jesse
|
2ff415f6768f9ef7cca3e86d8f2f87988d3e7129
|
[
"MIT"
] | 2
|
2021-04-30T06:49:26.000Z
|
2022-01-24T09:24:35.000Z
|
from jesse.store import store
from jesse import helpers
from jesse.services import logger
def save_daily_portfolio_balance():
balances = []
# add exchange balances
for key, e in store.exchanges.storage.items():
balances.append(e.assets[helpers.app_currency()])
# add open position values
for key, pos in store.positions.storage.items():
if pos.is_open:
balances.append(pos.pnl)
total = sum(balances)
store.app.daily_balance.append(total)
logger.info('Saved daily portfolio balance: {}'.format(round(total, 2)))
| 27.428571
| 76
| 0.694444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 84
| 0.145833
|
9ac5612f4d7fef57c2d92d9c354db5aaef44d59e
| 1,020
|
py
|
Python
|
Modo/Kits/OD_ModoCopyPasteExternal/lxserv/cmd_copyToExternal.py
|
heimlich1024/OD_CopyPasteExternal
|
943b993198e16d19f1fb4ba44049e498abf1e993
|
[
"Apache-2.0"
] | 278
|
2017-04-27T18:44:06.000Z
|
2022-03-31T02:49:42.000Z
|
Modo/Kits/OD_ModoCopyPasteExternal/lxserv/cmd_copyToExternal.py
|
heimlich1024/OD_CopyPasteExternal
|
943b993198e16d19f1fb4ba44049e498abf1e993
|
[
"Apache-2.0"
] | 57
|
2017-05-01T11:58:41.000Z
|
2022-02-06T18:43:13.000Z
|
Modo/Kits/OD_ModoCopyPasteExternal/lxserv/cmd_copyToExternal.py
|
heimlich1024/OD_CopyPasteExternal
|
943b993198e16d19f1fb4ba44049e498abf1e993
|
[
"Apache-2.0"
] | 49
|
2017-04-28T19:24:14.000Z
|
2022-03-12T15:17:13.000Z
|
################################################################################
#
# cmd_copyToExternal.py
#
# Author: Oliver Hotz | Chris Sprance
#
# Description: Copies Geo/Weights/Morphs/UV's to External File
#
# Last Update:
#
################################################################################
import lx
import lxifc
import lxu.command
from od_copy_paste_external import copy_to_external
class ODCopyToExternal(lxu.command.BasicCommand):
def __init__(self):
lxu.command.BasicCommand.__init__(self)
def cmd_Flags(self):
return lx.symbol.fCMD_MODEL | lx.symbol.fCMD_UNDO
def basic_Enable(self, msg):
return True
def cmd_Interact(self):
pass
def basic_Execute(self, msg, flags):
# TODO: Disable reload for release
reload(copy_to_external)
copy_to_external.execute()
def cmd_Query(self, index, vaQuery):
lx.notimpl()
lx.bless(ODCopyToExternal, "OD_CopyToExternal")
| 23.72093
| 81
| 0.560784
| 538
| 0.527451
| 0
| 0
| 0
| 0
| 0
| 0
| 366
| 0.358824
|
9ac6f272c7449b8674bd2e0ae76f212c2c1488d6
| 17,828
|
py
|
Python
|
iotest/case.py
|
gwk/iotest
|
bb5386c8d2e96cf99ca840fc512008ef786c4805
|
[
"CC0-1.0"
] | 1
|
2018-03-24T16:03:15.000Z
|
2018-03-24T16:03:15.000Z
|
iotest/case.py
|
gwk/iotest
|
bb5386c8d2e96cf99ca840fc512008ef786c4805
|
[
"CC0-1.0"
] | 1
|
2016-08-12T19:09:43.000Z
|
2016-08-12T19:09:43.000Z
|
iotest/case.py
|
gwk/iotest
|
bb5386c8d2e96cf99ca840fc512008ef786c4805
|
[
"CC0-1.0"
] | null | null | null |
# Dedicated to the public domain under CC0: https://creativecommons.org/publicdomain/zero/1.0/.
import ast
import os
import re
import shlex
from itertools import zip_longest
from string import Template
from typing import *
from .pithy.fs import *
from .pithy.io import *
from .pithy.types import * # type: ignore
from .ctx import Ctx
coverage_name = '_.coven'
class TestCaseError(Exception): pass
class IotParseError(TestCaseError): pass
class FileExpectation:
def __init__(self, path: str, info: Dict[str, str], expand_str_fn: Callable) -> None:
if path.find('..') != -1:
raise TestCaseError(f"file expectation {path}: cannot contain '..'")
self.path = path
self.mode = info.get('mode', 'equal')
validate_exp_mode(path, self.mode)
try:
exp_path = info['path']
except KeyError:
val = info.get('val', '')
else:
if 'val' in info:
raise TestCaseError(f'file expectation {path}: cannot specify both `path` and `val` properties')
exp_path_expanded = expand_str_fn(exp_path)
val = read_from_path(exp_path_expanded)
self.val = expand_str_fn(val)
if self.mode == 'match':
self.match_pattern_pairs = self.compile_match_lines(self.val)
else:
self.match_pattern_pairs = []
self.match_error: Optional[Tuple[int, Optional[Pattern], str]] = None
def compile_match_lines(self, text: str) -> List[Tuple[str, Pattern]]:
return [self.compile_match_line(i, line) for i, line in enumerate(text.splitlines(True), 1)]
def compile_match_line(self, i: int, line: str) -> Tuple[str, Pattern]:
prefix = line[:2]
contents = line[2:]
valid_prefixes = ('|', '|\n', '| ', '~', '~\n', '~ ')
if prefix not in valid_prefixes:
raise TestCaseError("test expectation: {!r};\nmatch line {}: must begin with one of: {}\n{!r}".format(
self.path, i, ', '.join(repr(p) for p in valid_prefixes), line))
if prefix.endswith('\n'):
# these two cases exist to be lenient about empty lines,
# where otherwise the pattern line would consist of the symbol and a single space.
# since trailing space is highlighted by `git diff` and often considered bad style,
# we allow it to be omitted, since there is no loss of generality for the patterns.
contents = '\n'
try:
return (line, re.compile(contents if prefix == '~ ' else re.escape(contents)))
except Exception as e:
raise TestCaseError('test expectation: {!r};\nmatch line {}: pattern is invalid regex:\n{!r}\n{}'.format(
self.path, i, contents, e)) from e
def __repr__(self) -> str:
return 'FileExpectation({!r}, {!r}, {!r})'.format(self.path, self.mode, self.val)
class ParConfig(NamedTuple):
'''
Parameterized case configuration data.
'''
stem: str
pattern: Pattern[str]
config: Dict
class Case:
'Case represents a single test case, or a default.'
def __init__(self, ctx:Ctx, proto: Optional['Case'], stem: str, config: Dict, par_configs: List[ParConfig],
par_stems_used: Set[str]) -> None:
self.stem: str = path_dir(stem) if path_name(stem) == '_' else stem # TODO: better naming for 'logical stem' (see code in main).
self.name: str = path_name(self.stem)
# derived properties.
self.multi_index: Optional[int] = None
self.test_info_paths: Set[str] = set() # the files that comprise the test case.
self.dflt_src_paths: List[str] = []
self.coverage_targets: List[str] = []
self.test_dir: str = ''
self.test_cmd: List[str] = []
self.test_env: Dict[str, str] = {}
self.test_in: Optional[str] = None
self.test_expectations: List[FileExpectation] = []
self.test_links: List[Tuple[str, str]] = [] # sequence of (orig-name, link-name) pairs.
self.test_par_args: Dict[str, Tuple[str, ...]] = {} # the match groups that resulted from applying the regex for the given parameterized stem.
# configurable properties.
self.args: Optional[List[str]] = None # arguments to follow the file under test.
self.cmd: Optional[List[str]] = None # command string/list with which to invoke the test.
self.coverage: Optional[List[str]] = None # list of names to include in code coverage analysis.
self.code: Optional[int] = None # the expected exit code.
self.compile: Optional[List[Any]] = None # the optional list of compile commands, each a string or list of strings.
self.compile_timeout: Optional[int] = None
self.desc: Optional[str] = None # description.
self.env: Optional[Dict[str, str]] = None # environment variables.
self.err_mode: Optional[str] = None # comparison mode for stderr expectation.
self.err_path: Optional[str] = None # file path for stderr expectation.
self.err_val: Optional[str] = None # stderr expectation value (mutually exclusive with err_path).
self.files: Optional[Dict[str, Dict[str, str]]] = None # additional file expectations.
self.in_: Optional[str] = None # stdin as text.
self.interpreter: Optional[str] = None # interpreter to prepend to cmd.
self.interpreter_args: Optional[List[str]] = None # interpreter args.
self.links: Union[None, Set[str], Dict[str, str]] = None # symlinks to be made into the test directory; written as a str, set or dict.
self.out_mode: Optional[str] = None # comparison mode for stdout expectation.
self.out_path: Optional[str] = None # file path for stdout expectation.
self.out_val: Optional[str] = None # stdout expectation value (mutually exclusive with out_path).
self.skip: Optional[str] = None
self.timeout: Optional[int] = None
try:
if proto is not None:
for key in case_key_validators:
setattr(self, key, getattr(proto, key))
for par_stem, par_re, par_config in par_configs:
m = par_re.fullmatch(stem)
if not m: continue
for key, val in par_config.items():
self.add_val_for_key(ctx, key, val)
self.test_par_args[par_stem] = cast(Tuple[str, ...], m.groups()) # Save the strings matching the parameters to use as arguments.
par_stems_used.add(par_stem) # Mark this parameterized config as used.
for key, val in config.items():
self.add_val_for_key(ctx, key, val)
# do all additional computations now, so as to fail as quickly as possible.
self.derive_info(ctx)
except Exception as e:
outL(f'iotest error: broken test case: {stem}')
outL(f' exception: {type(e).__name__}: {e}.')
# not sure if it makes sense to describe cases for some exceptions;
# for now, just carve out the ones for which it is definitely useless.
if not isinstance(e, IotParseError):
self.describe(stdout)
outL()
exit(1)
def __repr__(self) -> str: return f'Case(stem={self.stem!r}, ...)'
def __lt__(self, other: 'Case') -> bool: return self.stem < other.stem
@property
def coverage_path(self) -> str:
'Returned path is relative to self.test_dir.'
return self.std_name(coverage_name)
@property
def coven_cmd_prefix(self) -> List[str]:
coven_cmd = ['coven', '-output', self.coverage_path]
if self.coverage_targets:
coven_cmd += ['-targets'] + self.coverage_targets
coven_cmd.append('--')
return coven_cmd
def std_name(self, std: str) -> str: return f'{self.name}.{std}'
def describe(self, file: TextIO) -> None:
def stable_repr(val: Any) -> str:
if is_dict(val):
return '{{{}}}'.format(', '.join(f'{k!r}:{v!r}' for k, v in sorted(val.items()))) # sort dict representations. TODO: factor out.
return repr(val)
items = sorted(self.__dict__.items())
writeLSSL(file, 'Case:', *('{}: {}'.format(k, stable_repr(v)) for k, v in items))
def add_val_for_key(self, ctx:Ctx, key:str, val:Any) -> None:
try: name = iot_key_subs[key]
except KeyError: name = key.replace('-', '_')
try:
exp_desc, predicate, validator_fn = case_key_validators[name]
except KeyError as e:
raise TestCaseError(f'invalid config key: {key!r}') from e
if not predicate(val):
raise TestCaseError(f'key: {key!r}: expected value of type: {exp_desc}; received: {val!r}')
if validator_fn:
validator_fn(name, val)
if ctx.dbg:
existing = getattr(self, name)
if existing is not None and existing != val:
errL(f'note: {self.stem}: overriding value for key: {name!r};\n existing: {existing!r}\n incoming: {val!r}')
setattr(self, name, val)
def derive_info(self, ctx: Ctx) -> None:
if self.name == '_default': return # do not process prototype cases.
rel_dir, _, multi_index = self.stem.partition('.')
self.multi_index = int(multi_index) if multi_index else None
self.test_dir = path_join(ctx.build_dir, rel_dir)
env = self.test_env # local alias for convenience.
env['BUILD'] = ctx.build_dir
env['NAME'] = self.name
env['PROJ'] = abs_path(ctx.proj_dir)
env['SRC'] = self.dflt_src_paths[0] if len(self.dflt_src_paths) == 1 else 'NONE'
env['STEM'] = self.stem
env['DIR'] = path_dir(self.stem)
def default_to_env(key: str) -> None:
if key not in env and key in os.environ:
env[key] = os.environ[key]
default_to_env('HOME') # otherwise git fails with "error: Could not expand include path '~/.gitcinclude'".
default_to_env('LANG') # necessary to make std file handles unicode-aware.
default_to_env('NODE_PATH')
default_to_env('PATH')
default_to_env('PYTHONPATH')
default_to_env('SDKROOT')
def expand_str(val: Any) -> str:
t = Template(val)
return t.safe_substitute(env)
def expand(val: Any) -> List[str]:
if val is None:
return []
if is_str(val):
# note: plain strings are expanded first, then split.
# this behavior matches that of shell commands more closely than split-then-expand,
# but introduces all the confusion of shell quoting.
return shlex.split(expand_str(val))
if is_list(val):
return [expand_str(el) for el in val]
raise TestCaseError(f'expand received unexpected value: {val}')
# add the case env one item at a time.
# sorted because we want expansion to be deterministic;
# TODO: should probably expand everything with just the builtins;
# otherwise would need some dependency resolution between vars.
if self.env:
for key, val in sorted(self.env.items()):
if key in env:
raise TestCaseError(f'specified env contains reserved key: {key}')
env[key] = expand_str(val)
self.compile_cmds = [expand(cmd) for cmd in self.compile] if self.compile else []
cmd: List[str] = []
if self.interpreter:
cmd += expand(self.interpreter)
if self.interpreter_args:
if not self.interpreter: raise TestCaseError('interpreter_args specified without interpreter')
cmd += expand(self.interpreter_args)
if self.cmd is not None:
cmd += expand(self.cmd)
elif self.compile_cmds:
cmd += ['./' + self.name]
elif len(self.dflt_src_paths) > 1:
raise TestCaseError(f'no `cmd` specified and multiple default source paths found: {self.dflt_src_paths}')
elif len(self.dflt_src_paths) < 1:
raise TestCaseError('no `cmd` specified and no default source path found')
else:
dflt_path = self.dflt_src_paths[0]
dflt_name = path_name(dflt_path)
self.test_links.append((dflt_path, dflt_name))
prefix = '' if cmd else './'
cmd.append(prefix + dflt_name)
if self.args is None:
par_args = list(self.test_par_args.get(path_stem(dflt_path), ()))
cmd += par_args
if self.args:
cmd += expand(self.args) or []
self.test_cmd = cmd
if self.multi_index and self.links:
raise TestCaseError("non-lead subcase of a multicase cannot specify 'links'")
elif isinstance(self.links, str):
link = expand_str(self.links)
self.test_links += [(link, path_name(link))]
elif isinstance(self.links, set):
self.test_links += sorted((n, path_name(n)) for n in map(expand_str, self.links))
elif isinstance(self.links, dict):
self.test_links += sorted((expand_str(orig), expand_str(link)) for orig, link in self.links.items())
elif self.links is not None:
raise TestCaseError(self.links)
self.coverage_targets = expand(self.coverage)
self.test_in = expand_str(self.in_) if self.in_ is not None else None
def add_std_exp(name:str, mode:Optional[str], path:Optional[str], val:Optional[str]) -> None:
info = {}
if mode is not None: info['mode'] = mode
if path is not None: info['path'] = path
if val is not None: info['val'] = val
exp = FileExpectation(self.std_name(name), info, expand_str)
self.test_expectations.append(exp)
add_std_exp('err', self.err_mode, self.err_path, self.err_val)
add_std_exp('out', self.out_mode, self.out_path, self.out_val)
for path, info in (self.files or {}).items():
exp = FileExpectation(path, info, expand_str)
self.test_expectations.append(exp)
iot_key_subs = {
'.in' : 'in_',
'.err' : 'err_val',
'.out' : 'out_val',
'.dflt_src_paths' : 'dflt_src_paths',
'.test_info_paths' : 'test_info_paths',
'in' : 'in_',
}
def is_int_or_ellipsis(val: Any) -> bool:
return val is Ellipsis or is_int(val)
def is_compile_cmd(val: Any) -> bool:
return is_list(val) and all(is_str_or_list(el) for el in val)
def is_valid_links(val: Any) -> bool:
return is_str(val) or is_set_of_str(val) or is_dict_of_str(val)
def validate_path(key: str, path: Any) -> None:
if not path: raise TestCaseError(f'key: {key}: path is empty: {path!r}')
if '.' in path: raise TestCaseError(f"key: {key}: path cannot contain '.': {path!r}")
def validate_exp_mode(key: str, mode: str) -> None:
if mode not in file_expectation_fns:
raise TestCaseError(f'key: {key}: invalid file expectation mode: {mode}')
def validate_exp_dict(key: str, val: Any) -> None:
if not is_dict(val):
raise TestCaseError(f'file expectation: {key}: value must be a dictionary.')
for k in val:
if k not in ('mode', 'path', 'val'):
raise TestCaseError(f'file expectation: {key}: invalid expectation property: {k}')
def validate_files_dict(key: str, val: Any) -> None:
if not is_dict(val):
raise TestCaseError(f'file expectation: {key}: value must be a dictionary.')
for k, exp_dict in val.items():
if k in ('out', 'err'):
raise TestCaseError(f'key: {key}: {k}: use the standard properties instead ({k}_mode, {k}_path, {k}_val).')
validate_exp_dict(k, exp_dict)
def validate_links_dict(key: str, val: Any) -> None:
if is_str(val):
items = [(val, val)]
elif is_set(val):
items = [(p, p) for p in val]
elif is_dict(val):
items = val.items()
else: raise AssertionError('`validate_links_dict` types inconsistent with `is_valid_links`.')
for orig, link in items:
if orig.find('..') != -1: raise TestCaseError(f"key: {key}: link original contains '..': {orig}")
if link.find('..') != -1: raise TestCaseError(f"key: {key}: link location contains '..': {link}")
case_key_validators: Dict[str, Tuple[str, Callable[[Any], bool], Optional[Callable[[str, Any], None]]]] = {
# key => msg, validator_predicate, validator_fn.
'args': ('string or list of strings', is_str_or_list, None),
'cmd': ('string or list of strings', is_str_or_list, None),
'code': ('int or `...`', is_int_or_ellipsis, None),
'compile': ('list of (str | list of str)', is_compile_cmd, None),
'compile_timeout': ('positive int', is_pos_int, None),
'coverage': ('string or list of strings', is_str_or_list, None),
'desc': ('str', is_str, None),
'dflt_src_paths': ('list of str', is_list_of_str, None),
'env': ('dict of strings', is_dict_of_str, None),
'err_mode': ('str', is_str, validate_exp_mode),
'err_path': ('str', is_str, None),
'err_val': ('str', is_str, None),
'files': ('dict', is_dict, validate_files_dict),
'in_': ('str', is_str, None),
'interpreter': ('string or list of strings', is_str_or_list, None),
'interpreter_args': ('string or list of strings', is_str_or_list, None),
'links': ('string or (dict | set) of strings', is_valid_links, validate_links_dict),
'out_mode': ('str', is_str, validate_exp_mode),
'out_path': ('str', is_str, None),
'out_val': ('str', is_str, None),
'skip': ('bool', is_bool, None),
'test_info_paths': ('set of str', is_set_of_str, None),
'timeout': ('positive int', is_pos_int, None),
}
# file expectation functions.
def compare_equal(exp: FileExpectation, val: str) -> bool:
return exp.val == val # type: ignore
def compare_contain(exp: FileExpectation, val: str) -> bool:
return val.find(exp.val) != -1
def compare_match(exp: FileExpectation, val: str) -> bool:
lines: List[str] = val.splitlines(True)
for i, (pair, line) in enumerate(zip_longest(exp.match_pattern_pairs, lines), 1):
if pair is None:
exp.match_error = (i, None, line)
return False
(pattern, regex) = pair
if line is None or not regex.fullmatch(line):
exp.match_error = (i, pattern, line)
return False
return True
def compare_ignore(exp: FileExpectation, val: str) -> bool:
return True
file_expectation_fns = {
'equal' : compare_equal,
'contain' : compare_contain,
'match' : compare_match,
'ignore' : compare_ignore,
}
| 40.796339
| 146
| 0.648138
| 12,669
| 0.710624
| 0
| 0
| 376
| 0.02109
| 0
| 0
| 5,233
| 0.293527
|
9ac8a3896499bd8c6da3c5ab7c320fbd74dda4ff
| 111
|
py
|
Python
|
aiophotoprism/__init__.py
|
zhulik/aiophotoprism
|
91cc263ffbd85c7dc7ccef6d4cdafdfdaf2a4c85
|
[
"MIT"
] | 4
|
2021-08-09T05:02:23.000Z
|
2022-01-30T03:04:29.000Z
|
aiophotoprism/__init__.py
|
zhulik/aiophotoprism
|
91cc263ffbd85c7dc7ccef6d4cdafdfdaf2a4c85
|
[
"MIT"
] | null | null | null |
aiophotoprism/__init__.py
|
zhulik/aiophotoprism
|
91cc263ffbd85c7dc7ccef6d4cdafdfdaf2a4c85
|
[
"MIT"
] | null | null | null |
"""Asynchronous Python client for the Photoprism REST API."""
from .photoprism import API, Photoprism # noqa
| 27.75
| 61
| 0.756757
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 67
| 0.603604
|
9ac8a6eee2b79ed601b853802a3795b71f290223
| 5,558
|
py
|
Python
|
xen/xen-4.2.2/tools/python/scripts/test_vm_create.py
|
zhiming-shen/Xen-Blanket-NG
|
47e59d9bb92e8fdc60942df526790ddb983a5496
|
[
"Apache-2.0"
] | 1
|
2018-02-02T00:15:26.000Z
|
2018-02-02T00:15:26.000Z
|
xen/xen-4.2.2/tools/python/scripts/test_vm_create.py
|
zhiming-shen/Xen-Blanket-NG
|
47e59d9bb92e8fdc60942df526790ddb983a5496
|
[
"Apache-2.0"
] | null | null | null |
xen/xen-4.2.2/tools/python/scripts/test_vm_create.py
|
zhiming-shen/Xen-Blanket-NG
|
47e59d9bb92e8fdc60942df526790ddb983a5496
|
[
"Apache-2.0"
] | 1
|
2019-05-27T09:47:18.000Z
|
2019-05-27T09:47:18.000Z
|
#!/usr/bin/python
vm_cfg = {
'name_label': 'APIVM',
'user_version': 1,
'is_a_template': False,
'auto_power_on': False, # TODO
'memory_static_min': 64,
'memory_static_max': 128,
#'memory_dynamic_min': 64,
#'memory_dynamic_max': 128,
'VCPUs_policy': 'credit',
'VCPUs_params': '',
'VCPUs_number': 2,
'actions_after_shutdown': 'destroy',
'actions_after_reboot': 'restart',
'actions_after_crash': 'destroy',
'PV_bootloader': '',
'PV_bootloader_args': '',
'PV_kernel': '/boot/vmlinuz-2.6.18-xenU',
'PV_ramdisk': '',
'PV_args': 'root=/dev/sda1 ro',
#'HVM_boot': '',
'platform_std_VGA': False,
'platform_serial': '',
'platform_localtime': False,
'platform_clock_offset': False,
'platform_enable_audio': False,
'PCI_bus': ''
}
vdi_cfg = {
'name_label': 'API_VDI',
'name_description': '',
'virtual_size': 100 * 1024 * 1024 * 1024,
'type': 'system',
'parent': '',
'SR_name': 'QCoW',
'sharable': False,
'read_only': False,
}
vbd_cfg = {
'VDI': '',
'VM': '',
'device': 'sda2',
'mode': 'RW',
'type': 'disk',
'driver': 'paravirtualised',
}
local_vdi_cfg = {
'name_label': 'gentoo.amd64.img',
'name_description': '',
'virtual_size': 0,
'type': 'system',
'parent': '',
'SR_name': 'Local',
'sharable': False,
'read_only': False,
'other_config': {'location': 'file:/root/gentoo.amd64.img'},
}
local_vbd_cfg = {
'VDI': '',
'VM': '',
'device': 'sda1',
'mode': 'RW',
'type': 'disk',
'driver': 'paravirtualised',
}
vif_cfg = {
'name': 'API_VIF',
'type': 'paravirtualised',
'device': '',
'network': '',
'MAC': '',
'MTU': 1500,
}
console_cfg = {
'protocol': 'rfb',
'other_config': {'vncunused': 1, 'vncpasswd': 'testing'},
}
import sys
import time
from xapi import connect, execute
def test_vm_create():
server, session = connect()
vm_uuid = None
vdi_uuid = None
local_vdi_uuid = None
local_vbd_uuid = None
vbd_uuid = None
vif_uuid = None
# List all VMs
vm_list = execute(server, 'VM.get_all', (session,))
vm_names = []
for vm_uuid in vm_list:
vm_record = execute(server, 'VM.get_record', (session, vm_uuid))
vm_names.append(vm_record['name_label'])
# Get default SR
sr_list = execute(server, 'SR.get_by_name_label', (session,
vdi_cfg['SR_name']))
sr_uuid = sr_list[0]
local_sr_list = execute(server, 'SR.get_by_name_label',
(session, local_vdi_cfg['SR_name']))
local_sr_uuid = local_sr_list[0]
# Get default network
net_list = execute(server, 'network.get_all', (session,))
net_uuid = net_list[0]
try:
# Create a new VM
vm_uuid = execute(server, 'VM.create', (session, vm_cfg))
# Create a new VDI
vdi_cfg['SR'] = sr_uuid
vdi_uuid = execute(server, 'VDI.create', (session, vdi_cfg))
# Create a VDI backed VBD
vbd_cfg['VM'] = vm_uuid
vbd_cfg['VDI'] = vdi_uuid
vbd_uuid = execute(server, 'VBD.create', (session, vbd_cfg))
# Create a new VDI (Local)
local_vdi_cfg['SR'] = local_sr_uuid
local_vdi_uuid = execute(server, 'VDI.create',
(session, local_vdi_cfg))
# Create a new VBD (Local)
local_vbd_cfg['VM'] = vm_uuid
local_vbd_cfg['VDI'] = local_vdi_uuid
local_vbd_uuid = execute(server, 'VBD.create',
(session, local_vbd_cfg))
# Create a new VIF
vif_cfg['network'] = net_uuid
vif_cfg['VM'] = vm_uuid
vif_uuid = execute(server, 'VIF.create', (session, vif_cfg))
# Create a console
console_cfg['VM'] = vm_uuid
console_uuid = execute(server, 'console.create',
(session, console_cfg))
print console_uuid
# Start the VM
execute(server, 'VM.start', (session, vm_uuid, False))
time.sleep(30)
test_suspend = False
if test_suspend:
print 'Suspending VM..'
execute(server, 'VM.suspend', (session, vm_uuid))
print 'Suspended VM.'
time.sleep(5)
print 'Resuming VM ...'
execute(server, 'VM.resume', (session, vm_uuid, False))
print 'Resumed VM.'
finally:
# Wait for user to say we're good to shut it down
while True:
destroy = raw_input('destroy VM? ')
if destroy[0] in ('y', 'Y'):
break
# Clean up
if vif_uuid:
execute(server, 'VIF.destroy', (session, vif_uuid))
if local_vbd_uuid:
execute(server, 'VBD.destroy', (session, local_vbd_uuid))
if local_vdi_uuid:
execute(server, 'VDI.destroy', (session, local_vdi_uuid))
if vbd_uuid:
execute(server, 'VBD.destroy', (session, vbd_uuid))
if vdi_uuid:
execute(server, 'VDI.destroy', (session, vdi_uuid))
if vm_uuid:
try:
execute(server, 'VM.hard_shutdown', (session, vm_uuid))
time.sleep(2)
except:
pass
execute(server, 'VM.destroy', (session, vm_uuid))
if __name__ == "__main__":
test_vm_create()
| 26.216981
| 75
| 0.542821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,884
| 0.338971
|
9ac8dc710710ba41c77dd17ed479decc6f7a00ea
| 6,171
|
py
|
Python
|
portfolyo/core/pfline/tests/test_single_helper.py
|
rwijtvliet/portfolyo
|
b22948fbc55264ec5d69824e791ca7ef45c6e49c
|
[
"BSD-3-Clause"
] | null | null | null |
portfolyo/core/pfline/tests/test_single_helper.py
|
rwijtvliet/portfolyo
|
b22948fbc55264ec5d69824e791ca7ef45c6e49c
|
[
"BSD-3-Clause"
] | null | null | null |
portfolyo/core/pfline/tests/test_single_helper.py
|
rwijtvliet/portfolyo
|
b22948fbc55264ec5d69824e791ca7ef45c6e49c
|
[
"BSD-3-Clause"
] | null | null | null |
from portfolyo import testing, dev
from portfolyo.core.pfline import single_helper
from portfolyo.tools.nits import Q_
from portfolyo.tools.stamps import FREQUENCIES
import pandas as pd
import pytest
def assert_w_q_compatible(freq, w, q):
if freq == "15T":
testing.assert_series_equal(q, w * Q_(0.25, "h"), check_names=False)
elif freq == "H":
testing.assert_series_equal(q, w * Q_(1, "h"), check_names=False)
elif freq == "D":
assert (q > w * Q_(22.99, "h")).all()
assert (q < w * Q_(25.01, "h")).all()
elif freq == "MS":
assert (q > w * 27 * Q_(24, "h")).all()
assert (q < w * 32 * Q_(24, "h")).all()
elif freq == "QS":
assert (q > w * 89 * Q_(24, "h")).all()
assert (q < w * 93 * Q_(24, "h")).all()
elif freq == "AS":
assert (q > w * Q_(8759.9, "h")).all()
assert (q < w * Q_(8784.1, "h")).all()
else:
raise ValueError("Uncaught value for freq: {freq}.")
def assert_p_q_r_compatible(r, p, q):
testing.assert_series_equal(r, q * p, check_names=False)
@pytest.mark.parametrize("tz", ["Europe/Berlin", None])
@pytest.mark.parametrize("freq", FREQUENCIES)
def test_makedataframe_freqtz(freq, tz):
"""Test if dataframe can made from data with various timezones and frequencies."""
i = dev.get_index(freq, tz)
q = dev.get_series(i, "q")
result1 = single_helper.make_dataframe({"q": q})
expected = pd.DataFrame({"q": q})
expected.index.freq = freq
testing.assert_frame_equal(result1, expected, check_names=False)
if tz:
w = q / q.index.duration
result2 = single_helper.make_dataframe({"w": w})
testing.assert_frame_equal(
result2, expected, check_names=False, check_dtype=False
)
@pytest.mark.parametrize("inputtype", ["dict", "df"])
@pytest.mark.parametrize("tz", ["Europe/Berlin", None])
@pytest.mark.parametrize("freq", ["MS", "D"])
@pytest.mark.parametrize(
"columns",
[
"r",
"p",
"q",
"w",
"wq",
"pr",
"wp",
"qp",
"qr",
"wr",
"wqp",
"qpr",
"wqr",
"wpr",
"wqpr",
],
)
def test_makedataframe_consistency(tz, freq, columns, inputtype):
"""Test if conversions are done correctly and inconsistent data raises error."""
i = dev.get_index(freq, tz)
df = dev.get_dataframe(i, columns)
dic = {key: df[key] for key in columns}
if columns in ["r", "wq", "wqp", "wqr", "wpr", "qpr", "wqpr"]: # error cases
with pytest.raises(ValueError):
if inputtype == "dict":
_ = single_helper.make_dataframe(dic)
else:
_ = single_helper.make_dataframe(df)
return
# Actual result.
if inputtype == "dict":
result = single_helper.make_dataframe(dic)
else:
result = single_helper.make_dataframe(df)
# Expected result.
expected = df.rename_axis("ts_left")
if columns == "p": # kind == "p"
expected = df[["p"]]
elif columns in ["q", "w"]: # kind == "q"
if columns == "w":
df["q"] = df.w * df.w.index.duration
expected = df[["q"]]
elif columns in ["pr", "qp", "wp", "qr", "wr"]: # kind == "all"
# fill dataframe first.
if columns == "wp":
df["q"] = df.w * df.w.index.duration
df["r"] = df.p * df.q
elif columns == "pr":
df["q"] = df.r / df.p
df["w"] = df.q / df.index.duration
elif columns == "qp":
df["r"] = df.p * df.q
df["w"] = df.q / df.index.duration
elif columns == "wr":
df["q"] = df.w * df.w.index.duration
df["p"] = df.r / df.q
else:
df["p"] = df.r / df.q
df["w"] = df.q / df.index.duration
assert_p_q_r_compatible(result.r, df.p, result.q)
assert_w_q_compatible(freq, df.w, result.q)
expected = df[["q", "r"]].dropna()
testing.assert_frame_equal(result, expected)
@pytest.mark.parametrize("freq1", ["15T", "D", "MS", "QS"]) # don't do all - many!
@pytest.mark.parametrize("freq2", ["15T", "H", "D", "MS", "QS"])
@pytest.mark.parametrize("columns", ["rp", "wp", "pq", "qr", "wr"])
def test_makedataframe_unequalfrequencies(freq1, freq2, columns):
"""Test if error is raised when creating a dataframe from series with unequal frequencies."""
if freq1 == freq2:
return
kwargs = {"start": "2020", "end": "2021", "closed": "left", "tz": "Europe/Berlin"}
i1 = pd.date_range(**kwargs, freq=freq1)
i2 = pd.date_range(**kwargs, freq=freq2)
s1 = dev.get_series(i1, columns[0])
s2 = dev.get_series(i2, columns[1])
dic = {columns[0]: s1, columns[1]: s2}
with pytest.raises(ValueError):
_ = single_helper.make_dataframe(dic)
@pytest.mark.parametrize("tz", [None, "Europe/Berlin"])
@pytest.mark.parametrize("freq", ["15T", "H", "D", "MS"])
@pytest.mark.parametrize("overlap", [True, False])
def test_makedataframe_unequaltimeperiods(freq, overlap, tz):
"""Test if only intersection is kept for overlapping series, and error is raised
for non-overlapping series."""
kwargs = {"freq": freq, "inclusive": "left", "tz": tz}
start2 = "2020-03-01" if overlap else "2020-07-01"
i1 = pd.date_range(start="2020-01-01", end="2020-06-01", **kwargs)
i2 = pd.date_range(start=start2, end="2020-09-01", **kwargs)
s1 = dev.get_series(i1, "q")
s2 = dev.get_series(i2, "r")
intersection_values = [i for i in s1.index if i in s2.index]
intersection = pd.DatetimeIndex(intersection_values, freq=freq, name="ts_left")
if not overlap:
# raise ValueError("The two timeseries do not have anything in common.")
with pytest.raises(ValueError):
result = single_helper.make_dataframe({"q": s1, "r": s2})
return
result = single_helper.make_dataframe({"q": s1, "r": s2})
testing.assert_index_equal(result.index, intersection)
testing.assert_series_equal(result.q, s1.loc[intersection])
testing.assert_series_equal(result.r, s2.loc[intersection])
| 33
| 97
| 0.580943
| 0
| 0
| 0
| 0
| 5,082
| 0.823529
| 0
| 0
| 1,268
| 0.205477
|
9ac99cea9babd92f880b3baa9bf72af575865d84
| 31,044
|
py
|
Python
|
gomill/mcts_tuners.py
|
BenisonSam/goprime
|
3613f643ee765b4ad48ebdc27bd9f1121b1c5298
|
[
"MIT"
] | null | null | null |
gomill/mcts_tuners.py
|
BenisonSam/goprime
|
3613f643ee765b4ad48ebdc27bd9f1121b1c5298
|
[
"MIT"
] | null | null | null |
gomill/mcts_tuners.py
|
BenisonSam/goprime
|
3613f643ee765b4ad48ebdc27bd9f1121b1c5298
|
[
"MIT"
] | null | null | null |
"""Competitions for parameter tuning using Monte-carlo tree search."""
from __future__ import division
import operator
import random
from heapq import nlargest
from math import exp, log, sqrt
from gomill import compact_tracebacks
from gomill import game_jobs
from gomill import competitions
from gomill import competition_schedulers
from gomill.competitions import (
Competition, NoGameAvailable, CompetitionError, ControlFileError,
Player_config)
from gomill.settings import *
class Node(object):
"""A MCTS node.
Public attributes:
children -- list of Nodes, or None for unexpanded
wins
visits
value -- wins / visits
rsqrt_visits -- 1 / sqrt(visits)
"""
def count_tree_size(self):
if self.children is None:
return 1
return sum(child.count_tree_size() for child in self.children) + 1
def recalculate(self):
"""Update value and rsqrt_visits from changed wins and visits."""
self.value = self.wins / self.visits
self.rsqrt_visits = sqrt(1 / self.visits)
def __getstate__(self):
return (self.children, self.wins, self.visits)
def __setstate__(self, state):
self.children, self.wins, self.visits = state
self.recalculate()
__slots__ = (
'children',
'wins',
'visits',
'value',
'rsqrt_visits',
)
def __repr__(self):
return "<Node:%.2f{%s}>" % (self.value, repr(self.children))
class Tree(object):
"""A tree of MCTS nodes representing N-dimensional parameter space.
Parameters (available as read-only attributes):
splits -- subdivisions of each dimension
(list of integers, one per dimension)
max_depth -- number of generations below the root
initial_visits -- visit count for newly-created nodes
initial_wins -- win count for newly-created nodes
exploration_coefficient -- constant for UCT formula (float)
Public attributes:
root -- Node
dimensions -- number of dimensions in the parameter space
All changing state is in the tree of Node objects started at 'root'.
References to 'optimiser_parameters' below mean a sequence of length
'dimensions', whose values are floats in the range 0.0..1.0 representing
a point in this space.
Each node in the tree represents an N-cuboid of parameter space. Each
expanded node has prod(splits) children, tiling its cuboid.
(The splits are the same in each generation.)
Instantiate with:
all parameters listed above
parameter_formatter -- function optimiser_parameters -> string
"""
def __init__(self, splits, max_depth,
exploration_coefficient,
initial_visits, initial_wins,
parameter_formatter):
self.splits = splits
self.dimensions = len(splits)
self.branching_factor = reduce(operator.mul, splits)
self.max_depth = max_depth
self.exploration_coefficient = exploration_coefficient
self.initial_visits = initial_visits
self.initial_wins = initial_wins
self._initial_value = initial_wins / initial_visits
self._initial_rsqrt_visits = 1 / sqrt(initial_visits)
self.format_parameters = parameter_formatter
# map child index -> coordinate vector
# coordinate vector -- tuple length 'dimensions' with values in
# range(splits[d])
# The first dimension changes most slowly.
self._cube_coordinates = []
for child_index in xrange(self.branching_factor):
v = []
i = child_index
for split in reversed(splits):
i, coord = divmod(i, split)
v.append(coord)
v.reverse()
self._cube_coordinates.append(tuple(v))
def new_root(self):
"""Initialise the tree with an expanded root node."""
self.node_count = 1 # For description only
self.root = Node()
self.root.children = None
self.root.wins = self.initial_wins
self.root.visits = self.initial_visits
self.root.value = self.initial_wins / self.initial_visits
self.root.rsqrt_visits = self._initial_rsqrt_visits
self.expand(self.root)
def set_root(self, node):
"""Use the specified node as the tree's root.
This is used when restoring serialised state.
Raises ValueError if the node doesn't have the expected number of
children.
"""
if not node.children or len(node.children) != self.branching_factor:
raise ValueError
self.root = node
self.node_count = node.count_tree_size()
def expand(self, node):
"""Add children to the specified node."""
assert node.children is None
node.children = []
child_count = self.branching_factor
for _ in xrange(child_count):
child = Node()
child.children = None
child.wins = self.initial_wins
child.visits = self.initial_visits
child.value = self._initial_value
child.rsqrt_visits = self._initial_rsqrt_visits
node.children.append(child)
self.node_count += child_count
def is_ripe(self, node):
"""Say whether a node has been visted enough times to be expanded."""
return node.visits != self.initial_visits
def parameters_for_path(self, choice_path):
"""Retrieve the point in parameter space given by a node.
choice_path -- sequence of child indices
Returns optimiser_parameters representing the centre of the region
of parameter space represented by the node of interest.
choice_path must represent a path from the root to the node of interest.
"""
lo = [0.0] * self.dimensions
breadths = [1.0] * self.dimensions
for child_index in choice_path:
cube_pos = self._cube_coordinates[child_index]
breadths = [f / split for (f, split) in zip(breadths, self.splits)]
for d, coord in enumerate(cube_pos):
lo[d] += breadths[d] * coord
return [f + .5 * breadth for (f, breadth) in zip(lo, breadths)]
def retrieve_best_parameters(self):
"""Find the parameters with the most promising simulation results.
Returns optimiser_parameters
This walks the tree from the root, at each point choosing the node with
most wins, and returns the parameters corresponding to the leaf node.
"""
simulation = self.retrieve_best_parameter_simulation()
return simulation.get_parameters()
def retrieve_best_parameter_simulation(self):
"""Return the Greedy_simulation used for retrieve_best_parameters."""
simulation = Greedy_simulation(self)
simulation.walk()
return simulation
def get_test_parameters(self):
"""Return a 'typical' optimiser_parameters."""
return self.parameters_for_path([0])
def describe_choice(self, choice):
"""Return a string describing a child's coordinates in its parent."""
return str(self._cube_coordinates[choice]).replace(" ", "")
def describe(self):
"""Return a text description of the current state of the tree.
This currently dumps the full tree to depth 2.
"""
def describe_node(node, choice_path):
parameters = self.format_parameters(
self.parameters_for_path(choice_path))
choice_s = self.describe_choice(choice_path[-1])
return "%s %s %.3f %3d" % (
choice_s, parameters, node.value,
node.visits - self.initial_visits)
root = self.root
wins = root.wins - self.initial_wins
visits = root.visits - self.initial_visits
try:
win_rate = "%.3f" % (wins / visits)
except ZeroDivisionError:
win_rate = "--"
result = [
"%d nodes" % self.node_count,
"Win rate %d/%d = %s" % (wins, visits, win_rate)
]
for choice, node in enumerate(self.root.children):
result.append(" " + describe_node(node, [choice]))
if node.children is None:
continue
for choice2, node2 in enumerate(node.children):
result.append(" " + describe_node(node2, [choice, choice2]))
return "\n".join(result)
def summarise(self, out, summary_spec):
"""Write a summary of the most-visited parts of the tree.
out -- writeable file-like object
summary_spec -- list of ints
summary_spec says how many nodes to describe at each depth of the tree
(so to show only direct children of the root, pass a list of length 1).
"""
def p(s):
print >> out, s
def describe_node(node, choice_path):
parameters = self.format_parameters(
self.parameters_for_path(choice_path))
choice_s = " ".join(map(self.describe_choice, choice_path))
return "%s %-40s %.3f %3d" % (
choice_s, parameters, node.value,
node.visits - self.initial_visits)
def most_visits((child_index, node)):
return node.visits
last_generation = [([], self.root)]
for i, n in enumerate(summary_spec):
depth = i + 1
p("most visited at depth %s" % (depth))
this_generation = []
for path, node in last_generation:
if node.children is not None:
this_generation += [
(path + [child_index], child)
for (child_index, child) in enumerate(node.children)]
for path, node in sorted(
nlargest(n, this_generation, key=most_visits)):
p(describe_node(node, path))
last_generation = this_generation
p("")
class Simulation(object):
"""A single monte-carlo simulation.
Instantiate with the Tree the simulation will run in.
Use the methods in the following order:
run()
get_parameters()
update_stats(b)
describe()
"""
def __init__(self, tree):
self.tree = tree
# list of Nodes
self.node_path = []
# corresponding list of child indices
self.choice_path = []
# bool
self.candidate_won = None
def _choose_action(self, node):
"""Choose the best action from the specified node.
Returns a pair (child index, node)
"""
uct_numerator = (self.tree.exploration_coefficient *
sqrt(log(node.visits)))
def urgency((i, child)):
return child.value + uct_numerator * child.rsqrt_visits
start = random.randrange(len(node.children))
children = list(enumerate(node.children))
return max(children[start:] + children[:start], key=urgency)
def walk(self):
"""Choose a node sequence, without expansion."""
node = self.tree.root
while node.children is not None:
choice, node = self._choose_action(node)
self.node_path.append(node)
self.choice_path.append(choice)
def run(self):
"""Choose the node sequence for this simulation.
This walks down from the root, using _choose_action() at each level,
until it reaches a leaf; if the leaf has already been visited, this
expands it and chooses one more action.
"""
self.walk()
node = self.node_path[-1]
if (len(self.node_path) < self.tree.max_depth and
self.tree.is_ripe(node)):
self.tree.expand(node)
choice, child = self._choose_action(node)
self.node_path.append(child)
self.choice_path.append(choice)
def get_parameters(self):
"""Retrieve the parameters corresponding to the simulation's leaf node.
Returns optimiser_parameters
"""
return self.tree.parameters_for_path(self.choice_path)
def update_stats(self, candidate_won):
"""Update the tree's node statistics with the simulation's results.
This updates visits (and wins, if appropriate) for each node in the
simulation's node sequence.
"""
self.candidate_won = candidate_won
for node in self.node_path:
node.visits += 1
if candidate_won:
node.wins += 1
node.recalculate()
self.tree.root.visits += 1
if candidate_won:
self.tree.root.wins += 1 # For description only
self.tree.root.recalculate()
def describe_steps(self):
"""Return a text description of the simulation's node sequence."""
return " ".join(map(self.tree.describe_choice, self.choice_path))
def describe(self):
"""Return a one-line-ish text description of the simulation."""
result = "%s [%s]" % (
self.tree.format_parameters(self.get_parameters()),
self.describe_steps())
if self.candidate_won is not None:
result += (" lost", " won")[self.candidate_won]
return result
def describe_briefly(self):
"""Return a shorter description of the simulation."""
return "%s %s" % (self.tree.format_parameters(self.get_parameters()),
("lost", "won")[self.candidate_won])
class Greedy_simulation(Simulation):
"""Variant of simulation that chooses the node with most wins.
This is used to pick the 'best' parameters from the current state of the
tree.
"""
def _choose_action(self, node):
def wins((i, node)):
return node.wins
return max(enumerate(node.children), key=wins)
parameter_settings = [
Setting('code', interpret_identifier),
Setting('scale', interpret_callable),
Setting('split', interpret_positive_int),
Setting('format', interpret_8bit_string, default=None),
]
class Parameter_config(Quiet_config):
"""Parameter (ie, dimension) description for use in control files."""
# positional or keyword
positional_arguments = ('code',)
# keyword-only
keyword_arguments = tuple(setting.name for setting in parameter_settings
if setting.name != 'code')
class Parameter_spec(object):
"""Internal description of a parameter spec from the configuration file.
Public attributes:
code -- identifier
split -- integer
scale -- function float(0.0..1.0) -> player parameter
format -- string for use with '%'
"""
class Scale_fn(object):
"""Callable implementing a scale function.
Scale_fn classes are used to provide a convenient way to describe scale
functions in the control file (LINEAR, LOG, ...).
"""
class Linear_scale_fn(Scale_fn):
"""Linear scale function.
Instantiate with
lower_bound -- float
upper_bound -- float
integer -- bool (means 'round result to nearest integer')
"""
def __init__(self, lower_bound, upper_bound, integer=False):
self.lower_bound = float(lower_bound)
self.upper_bound = float(upper_bound)
self.range = float(upper_bound - lower_bound)
self.integer = bool(integer)
def __call__(self, f):
result = (f * self.range) + self.lower_bound
if self.integer:
result = int(result + .5)
return result
class Log_scale_fn(Scale_fn):
"""Log scale function.
Instantiate with
lower_bound -- float
upper_bound -- float
integer -- bool (means 'round result to nearest integer')
"""
def __init__(self, lower_bound, upper_bound, integer=False):
if lower_bound == 0.0:
raise ValueError("lower bound is zero")
self.rate = log(upper_bound / lower_bound)
self.lower_bound = lower_bound
self.integer = bool(integer)
def __call__(self, f):
result = exp(self.rate * f) * self.lower_bound
if self.integer:
result = int(result + .5)
return result
class Explicit_scale_fn(Scale_fn):
"""Scale function that returns elements from a list.
Instantiate with the list of values to use.
Normally use this with 'split' equal to the length of the list
(more generally, split**max_depth equal to the length of the list).
"""
def __init__(self, values):
if not values:
raise ValueError("empty value list")
self.values = tuple(values)
self.n = len(values)
def __call__(self, f):
return self.values[int(self.n * f)]
class LINEAR(Config_proxy):
underlying = Linear_scale_fn
class LOG(Config_proxy):
underlying = Log_scale_fn
class EXPLICIT(Config_proxy):
underlying = Explicit_scale_fn
def interpret_candidate_colour(v):
if v in ('r', 'random'):
return 'random'
else:
return interpret_colour(v)
class Mcts_tuner(Competition):
"""A Competition for parameter tuning using the Monte-carlo tree search.
The game ids are strings containing integers starting from zero.
"""
def __init__(self, competition_code, **kwargs):
Competition.__init__(self, competition_code, **kwargs)
self.outstanding_simulations = {}
self.halt_on_next_failure = True
def control_file_globals(self):
result = Competition.control_file_globals(self)
result.update({
'Parameter': Parameter_config,
'LINEAR': LINEAR,
'LOG': LOG,
'EXPLICIT': EXPLICIT,
})
return result
global_settings = (Competition.global_settings +
competitions.game_settings + [
Setting('number_of_games', allow_none(interpret_int), default=None),
Setting('candidate_colour', interpret_candidate_colour),
Setting('log_tree_to_history_period',
allow_none(interpret_positive_int), default=None),
Setting('summary_spec', interpret_sequence_of(interpret_int),
default=(30,)),
Setting('number_of_running_simulations_to_show', interpret_int,
default=12),
])
special_settings = [
Setting('opponent', interpret_identifier),
Setting('parameters',
interpret_sequence_of_quiet_configs(Parameter_config)),
Setting('make_candidate', interpret_callable),
]
# These are used to instantiate Tree; they don't turn into Mcts_tuner
# attributes.
tree_settings = [
Setting('max_depth', interpret_positive_int, default=1),
Setting('exploration_coefficient', interpret_float),
Setting('initial_visits', interpret_positive_int),
Setting('initial_wins', interpret_positive_int),
]
def parameter_spec_from_config(self, parameter_config):
"""Make a Parameter_spec from a Parameter_config.
Raises ControlFileError if there is an error in the configuration.
Returns a Parameter_spec with all attributes set.
"""
arguments = parameter_config.resolve_arguments()
interpreted = load_settings(parameter_settings, arguments)
pspec = Parameter_spec()
for name, value in interpreted.iteritems():
setattr(pspec, name, value)
optimiser_param = 1.0 / (pspec.split * 2)
try:
scaled = pspec.scale(optimiser_param)
except Exception:
raise ValueError(
"error from scale (applied to %s)\n%s" %
(optimiser_param, compact_tracebacks.format_traceback(skip=1)))
if pspec.format is None:
pspec.format = pspec.code + ":%s"
try:
pspec.format % scaled
except Exception:
raise ControlFileError("'format': invalid format string")
return pspec
def initialise_from_control_file(self, config):
Competition.initialise_from_control_file(self, config)
if self.komi == int(self.komi):
raise ControlFileError("komi: must be fractional to prevent jigos")
competitions.validate_handicap(
self.handicap, self.handicap_style, self.board_size)
try:
specials = load_settings(self.special_settings, config)
except ValueError, e:
raise ControlFileError(str(e))
try:
self.opponent = self.players[specials['opponent']]
except KeyError:
raise ControlFileError(
"opponent: unknown player %s" % specials['opponent'])
self.parameter_specs = []
if not specials['parameters']:
raise ControlFileError("parameters: empty list")
seen_codes = set()
for i, parameter_spec in enumerate(specials['parameters']):
try:
pspec = self.parameter_spec_from_config(parameter_spec)
except StandardError, e:
code = parameter_spec.get_key()
if code is None:
code = i
raise ControlFileError("parameter %s: %s" % (code, e))
if pspec.code in seen_codes:
raise ControlFileError(
"duplicate parameter code: %s" % pspec.code)
seen_codes.add(pspec.code)
self.parameter_specs.append(pspec)
self.candidate_maker_fn = specials['make_candidate']
try:
tree_arguments = load_settings(self.tree_settings, config)
except ValueError, e:
raise ControlFileError(str(e))
self.tree = Tree(splits=[pspec.split for pspec in self.parameter_specs],
parameter_formatter=self.format_optimiser_parameters,
**tree_arguments)
# State attributes (*: in persistent state):
# *scheduler -- Simple_scheduler
# *tree -- Tree (root node is persisted)
# outstanding_simulations -- map game_number -> Simulation
# halt_on_next_failure -- bool
# *opponent_description -- string (or None)
def set_clean_status(self):
self.scheduler = competition_schedulers.Simple_scheduler()
self.tree.new_root()
self.opponent_description = None
# Can bump this to prevent people loading incompatible .status files.
status_format_version = 0
def get_status(self):
# path0 is stored for consistency check
return {
'scheduler': self.scheduler,
'tree_root': self.tree.root,
'opponent_description': self.opponent_description,
'path0': self.scale_parameters(self.tree.parameters_for_path([0])),
}
def set_status(self, status):
root = status['tree_root']
try:
self.tree.set_root(root)
except ValueError:
raise CompetitionError(
"status file is inconsistent with control file")
expected_path0 = self.scale_parameters(
self.tree.parameters_for_path([0]))
if status['path0'] != expected_path0:
raise CompetitionError(
"status file is inconsistent with control file")
self.scheduler = status['scheduler']
self.scheduler.rollback()
self.opponent_description = status['opponent_description']
def scale_parameters(self, optimiser_parameters):
l = []
for pspec, v in zip(self.parameter_specs, optimiser_parameters):
try:
l.append(pspec.scale(v))
except Exception:
raise CompetitionError(
"error from scale for %s\n%s" %
(pspec.code, compact_tracebacks.format_traceback(skip=1)))
return tuple(l)
def format_engine_parameters(self, engine_parameters):
l = []
for pspec, v in zip(self.parameter_specs, engine_parameters):
try:
s = pspec.format % v
except Exception:
s = "[%s?%s]" % (pspec.code, v)
l.append(s)
return "; ".join(l)
def format_optimiser_parameters(self, optimiser_parameters):
return self.format_engine_parameters(self.scale_parameters(
optimiser_parameters))
def make_candidate(self, player_code, engine_parameters):
"""Make a player using the specified engine parameters.
Returns a game_jobs.Player.
"""
try:
candidate_config = self.candidate_maker_fn(*engine_parameters)
except Exception:
raise CompetitionError(
"error from make_candidate()\n%s" %
compact_tracebacks.format_traceback(skip=1))
if not isinstance(candidate_config, Player_config):
raise CompetitionError(
"make_candidate() returned %r, not Player" %
candidate_config)
try:
candidate = self.game_jobs_player_from_config(
player_code, candidate_config)
except Exception, e:
raise CompetitionError(
"bad player spec from make_candidate():\n"
"%s\nparameters were: %s" %
(e, self.format_engine_parameters(engine_parameters)))
return candidate
def get_player_checks(self):
test_parameters = self.tree.get_test_parameters()
engine_parameters = self.scale_parameters(test_parameters)
candidate = self.make_candidate('candidate', engine_parameters)
result = []
for player in [candidate, self.opponent]:
check = game_jobs.Player_check()
check.player = player
check.board_size = self.board_size
check.komi = self.komi
result.append(check)
return result
def choose_candidate_colour(self):
if self.candidate_colour == 'random':
return random.choice('bw')
else:
return self.candidate_colour
def get_game(self):
if (self.number_of_games is not None and
self.scheduler.issued >= self.number_of_games):
return NoGameAvailable
game_number = self.scheduler.issue()
simulation = Simulation(self.tree)
simulation.run()
optimiser_parameters = simulation.get_parameters()
engine_parameters = self.scale_parameters(optimiser_parameters)
candidate = self.make_candidate("#%d" % game_number, engine_parameters)
self.outstanding_simulations[game_number] = simulation
job = game_jobs.Game_job()
job.game_id = str(game_number)
job.game_data = game_number
if self.choose_candidate_colour() == 'b':
job.player_b = candidate
job.player_w = self.opponent
else:
job.player_b = self.opponent
job.player_w = candidate
job.board_size = self.board_size
job.komi = self.komi
job.move_limit = self.move_limit
job.handicap = self.handicap
job.handicap_is_free = (self.handicap_style == 'free')
job.use_internal_scorer = (self.scorer == 'internal')
job.internal_scorer_handicap_compensation = \
self.internal_scorer_handicap_compensation
job.sgf_event = self.competition_code
job.sgf_note = ("Candidate parameters: %s" %
self.format_engine_parameters(engine_parameters))
return job
def process_game_result(self, response):
self.halt_on_next_failure = False
self.opponent_description = response.engine_descriptions[
self.opponent.code].get_long_description()
game_number = response.game_data
self.scheduler.fix(game_number)
# Counting no-result as loss for the candidate
candidate_won = (
response.game_result.losing_player == self.opponent.code)
simulation = self.outstanding_simulations.pop(game_number)
simulation.update_stats(candidate_won)
self.log_history(simulation.describe())
if (self.log_tree_to_history_period is not None and
self.scheduler.fixed % self.log_tree_to_history_period == 0):
self.log_history(self.tree.describe())
return "%s %s" % (simulation.describe(),
response.game_result.sgf_result)
def process_game_error(self, job, previous_error_count):
## If the very first game to return a response gives an error, halt.
## If two games in a row give an error, halt.
## Otherwise, forget about the failed game
stop_competition = False
retry_game = False
game_number = job.game_data
del self.outstanding_simulations[game_number]
self.scheduler.fix(game_number)
if self.halt_on_next_failure:
stop_competition = True
else:
self.halt_on_next_failure = True
return stop_competition, retry_game
def write_static_description(self, out):
def p(s):
print >> out, s
p("MCTS tuning event: %s" % self.competition_code)
if self.description:
p(self.description)
p("board size: %s" % self.board_size)
p("komi: %s" % self.komi)
def _write_main_report(self, out):
games_played = self.scheduler.fixed
if self.number_of_games is None:
print >> out, "%d games played" % games_played
else:
print >> out, "%d/%d games played" % (
games_played, self.number_of_games)
print >> out
best_simulation = self.tree.retrieve_best_parameter_simulation()
print >> out, "Best parameters: %s" % best_simulation.describe()
print >> out
self.tree.summarise(out, self.summary_spec)
def write_screen_report(self, out):
self._write_main_report(out)
if self.outstanding_simulations:
print >> out, "In progress:"
to_show = sorted(self.outstanding_simulations.iteritems()) \
[:self.number_of_running_simulations_to_show]
for game_id, simulation in to_show:
print >> out, "game %s: %s" % (game_id, simulation.describe())
def write_short_report(self, out):
self.write_static_description(out)
self._write_main_report(out)
if self.opponent_description:
print >> out, "opponent (%s): %s" % (
self.opponent.code, self.opponent_description)
else:
print >> out, "opponent: %s" % self.opponent.code
print >> out
write_full_report = write_short_report
| 35.077966
| 95
| 0.616544
| 30,160
| 0.971524
| 0
| 0
| 0
| 0
| 0
| 0
| 8,512
| 0.274191
|
9aca58a06217030d4df687fba53565676f1f3f48
| 460
|
py
|
Python
|
Leetcoding-Actions/Explore-Monthly-Challenges/2021-02/25-shortestUnsortedContinuousSubarray.py
|
shoaibur/SWE
|
1e114a2750f2df5d6c50b48c8e439224894d65da
|
[
"MIT"
] | 1
|
2020-11-14T18:28:13.000Z
|
2020-11-14T18:28:13.000Z
|
Leetcoding-Actions/Explore-Monthly-Challenges/2021-02/25-shortestUnsortedContinuousSubarray.py
|
shoaibur/SWE
|
1e114a2750f2df5d6c50b48c8e439224894d65da
|
[
"MIT"
] | null | null | null |
Leetcoding-Actions/Explore-Monthly-Challenges/2021-02/25-shortestUnsortedContinuousSubarray.py
|
shoaibur/SWE
|
1e114a2750f2df5d6c50b48c8e439224894d65da
|
[
"MIT"
] | null | null | null |
class Solution:
def findUnsortedSubarray(self, nums: List[int]) -> int:
'''
T: O(n log n) and S: O(1)
'''
n = len(nums)
sorted_nums = sorted(nums)
start, end = n + 1, -1
for i in range(n):
if nums[i] != sorted_nums[i]:
start = min(start, i)
end = max(end, i)
diff = end - start
return diff + 1 if diff > 0 else 0
| 25.555556
| 59
| 0.428261
| 459
| 0.997826
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 0.106522
|
9acbd6e09016763ff8a75cf2e88c6a01d873ad9c
| 9,705
|
py
|
Python
|
endoscopic_ai.py
|
dennkitotaichi/AI_prediction_for_patients_with_colorectal_polyps
|
afbad36cb3fc2de31665fc3b0a7f065b7e6564a0
|
[
"MIT"
] | null | null | null |
endoscopic_ai.py
|
dennkitotaichi/AI_prediction_for_patients_with_colorectal_polyps
|
afbad36cb3fc2de31665fc3b0a7f065b7e6564a0
|
[
"MIT"
] | null | null | null |
endoscopic_ai.py
|
dennkitotaichi/AI_prediction_for_patients_with_colorectal_polyps
|
afbad36cb3fc2de31665fc3b0a7f065b7e6564a0
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import codecs
import lightgbm as lgb
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
# Read data
image_file_path = './simulated_dpc_data.csv'
with codecs.open(image_file_path, "r", "Shift-JIS", "ignore") as file:
dpc = pd.read_table(file, delimiter=",")
# dpc_r, g_dpc_r_1, g_r: restricted data from dpc
dpc_r=dpc.loc[:, ['ID','code']]
# g_dpc_r_1: made to check the details (: name of the code, ‘name’)
g_dpc_r_1=dpc.loc[:, ['ID','code','name']]
# Dummy Encoding with ‘name’
g_r = pd.get_dummies(dpc_r['code'])
# Reconstruct simulated data for AI learning
df_concat_dpc_get_dummies = pd.concat([dpc_r, g_r], axis=1)
# Remove features that may be the cause of the data leak
dpc_Remove_data_leak = df_concat_dpc_get_dummies.drop(["code",160094710,160094810,160094910,150285010,2113008,8842965,8843014,622224401,810000000,160060010], axis=1)
# Sum up the number of occurrences of each feature for each patient.
total_patient_features= dpc_Remove_data_leak.groupby("ID").sum()
total_patient_features.reset_index()
# Load a new file with ID and treatment availability
# Prepare training data
image_file_path_ID_and_polyp_pn = './simulated_patient_data.csv'
with codecs.open(image_file_path_ID_and_polyp_pn, "r", "Shift-JIS", "ignore") as file:
ID_and_polyp_pn = pd.read_table(file, delimiter=",")
ID_and_polyp_pn_data= ID_and_polyp_pn[['ID', 'target']]
#Combine the new file containing ID and treatment status with the file after dummy encoding by the ‘name’
ID_treatment_medical_statement=pd.merge(ID_and_polyp_pn_data,total_patient_features,on=["ID"],how='outer')
ID_treatment_medical_statement_o= ID_treatment_medical_statement.fillna(0)
ID_treatment_medical_statement_p=ID_treatment_medical_statement_o.drop("ID", axis=1)
ID_treatment_medical_statement_rename= ID_treatment_medical_statement_p.rename(columns={'code':"Receipt type code"})
merge_data= ID_treatment_medical_statement_rename
# Split the training/validation set into 80% and the test set into 20%, with a constant proportion of cases with lesions
X = merge_data.drop("target",axis=1).values
y = merge_data["target"].values
columns_name = merge_data.drop("target",axis=1).columns
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2,random_state=1)
# Create a function to divide data
def data_split(X,y):
for train_index, test_index in sss.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
X_train = pd.DataFrame(X_train, columns=columns_name)
X_test = pd.DataFrame(X_test, columns=columns_name)
return X_train, y_train, X_test, y_test
# Separate into training, validation, and test set
X_train, y_train, X_test, y_test = data_split(X, y)
X_train, y_train, X_val, y_val = data_split(X_train.values, y_train)
# Make test set into pandas
X_test_df = pd.DataFrame(X_test)
y_test_df = pd.DataFrame(y_test)
# Make test set into test_df to keep away for the final process
test_dfp = pd.concat([y_test_df,X_test_df], axis=1)
test_df=test_dfp.rename(columns={0:"target"})
# Make training/validation sets into pandas
y_trainp = pd.DataFrame(y_train)
X_trainp = pd.DataFrame(X_train)
train=pd.concat([y_trainp, X_trainp], axis=1)
y_valp = pd.DataFrame(y_val)
X_valp = pd.DataFrame(X_val)
val=pd.concat([y_valp, X_valp], axis=1)
test_vol=pd.concat([train, val])
training_validation_sets=test_vol.rename(columns={0:"target"})
# Create a function to save the results and feature importance after analysis with lightGBM
def reg_top10_lightGBM(merge_data,outname,no,random_state_number):
# Define the objective variable
X = merge_data.drop("target",axis=1).values
y = merge_data["target"].values
columns_name = merge_data.drop("target",axis=1).columns
# Define a function
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=random_state_number)
def data_split(X,y):
for train_index, test_index in sss.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
X_train = pd.DataFrame(X_train, columns=columns_name)
X_test = pd.DataFrame(X_test, columns=columns_name)
return X_train, y_train, X_test, y_test
X_train, y_train, X_test, y_test = data_split(X, y)
X_train, y_train, X_val, y_val = data_split(X_train.values, y_train)
y_test_df = pd.DataFrame(y_test)
# Prepare dataset: training data: X_train, label: y_train
train = lgb.Dataset(X_train, label=y_train)
valid = lgb.Dataset(X_val, label=y_val)
# Set the parameters
params = {'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'rmse',
'learning_rate': 0.1 }
# Train the model
model = lgb.train(params,
train,
valid_sets=valid,
num_boost_round=3000,
early_stopping_rounds=100)
# Prediction
y_pred = model.predict(X_test, num_iteration=model.best_iteration)
# Display actual values and predicted values
df_pred = pd.DataFrame({'regression_y_test':y_test,'regression_y_pred':y_pred})
# Calculate MSE (Mean Square Error)
mse = mean_squared_error(y_test, y_pred)
# Calculate RSME = √MSE
rmse = np.sqrt(mse)
# r2 : Calculate the coefficient of determination
r2 = r2_score(y_test,y_pred)
df_Df = pd.DataFrame({'regression_y_test_'+no:y_test,'regression_y_pred_'+no:y_pred,'RMSE_'+no:rmse,'R2_'+no:r2})
df_Df.to_csv(r""+"./"+outname+no+'.csv', encoding = 'shift-jis')
importance = pd.DataFrame(model.feature_importance(), columns=['importance'])
column_list=merge_data.drop(["target"], axis=1)
importance["columns"] =list(column_list.columns)
return importance
# Find out Top 50 features procedure / Run the model once
importance = reg_top10_lightGBM(training_validation_sets,"check_data","_1",1)
# Create a function that sorts and stores the values of feature importance.
def after_imp_save_sort(importance,outname,no):
importance.sort_values(by='importance',ascending=False)
i_df=importance.sort_values(by='importance',ascending=False)
top50=i_df.iloc[0:51,:]
g_dpc_pre= g_dpc_r_1.drop(["ID"], axis=1)
g_dpc_Remove_duplicates=g_dpc_pre.drop_duplicates()
g_dpc_r_columns=g_dpc_Remove_duplicates.rename(columns={'code':"columns"})
importance_name=pd.merge(top50,g_dpc_r_columns)
importance_all=pd.merge(i_df,g_dpc_r_columns)
importance_all.to_csv(r""+"./"+outname+no+'importance_name_all'+'.csv', encoding = 'shift-jis')
return importance_all
# Run a function to sort and save the values of feature importance.
top50_importance_all = after_imp_save_sort(importance,"check_data","_1")
# 10 runs of this procedure
dict = {}
for num in range(10):
print(num+1)
importance = reg_top10_lightGBM(training_validation_sets,"check_data","_"+str(num+1),num+1)
top50_importance_all = after_imp_save_sort(importance,"check_data","_"+str(num+1))
dict[str(num)] = top50_importance_all
# Recall and merge the saved CSV files
def concat_importance(First_pd,Next_pd):
importance_1=pd.DataFrame(dict[First_pd])
importance_1d=importance_1.drop_duplicates(subset='columns')
importance_2=pd.DataFrame(dict[Next_pd])
importance_2d=importance_2.drop_duplicates(subset='columns')
importance_1_2=pd.concat([importance_1d, importance_2d])
return importance_1_2
importance_1_2 = concat_importance("0","1")
importance_3_4 = concat_importance("2","3")
importance_5_6 = concat_importance("4","5")
importance_7_8 = concat_importance("6","7")
importance_9_10 = concat_importance("8","9")
importance_1_4=pd.concat([importance_1_2, importance_3_4])
importance_1_6=pd.concat([importance_1_4, importance_5_6])
importance_1_8=pd.concat([importance_1_6, importance_7_8])
importance_1_10=pd.concat([importance_1_8, importance_9_10])
# Calculate the total value of the feature importance for each code
group_sum=importance_1_10.groupby(["columns"]).sum()
group_sum_s = group_sum.sort_values('importance', ascending=False)
importance_group_sum=group_sum_s.reset_index()
# Create train/validation test data with all features
merge_data_test=pd.concat([training_validation_sets, test_df])
# Make features in the order of highest total feature impotance value
importance_top50_previous_data=importance_group_sum["columns"]
importance_top50_previous_data
# refine the data to top 50 features
dict_top50 = {}
pycaret_dict_top50 = {}
X = range(1, 51)
for i,v in enumerate(X):
dict_top50[str(i)] = importance_top50_previous_data.iloc[v]
pycaret_dict_top50[importance_top50_previous_data[i]] = merge_data_test[dict_top50[str(i)]]
pycaret_df_dict_top50=pd.DataFrame(pycaret_dict_top50)
# Add the value of target (: objective variable)
target_data=merge_data_test["target"]
target_top50_dataframe=pd.concat([target_data, pycaret_df_dict_top50], axis=1)
# adjust pandas (pycaret needs to set “str” to “int”)
target_top50_dataframe_int=target_top50_dataframe.astype('int')
target_top50_dataframe_columns=target_top50_dataframe_int.columns.astype(str)
numpy_target_top50=target_top50_dataframe_int.to_numpy()
target_top50_dataframe_pycaret=pd.DataFrame(numpy_target_top50,columns=target_top50_dataframe_columns)
# compare the models
from pycaret.classification import *
clf1 = setup(target_top50_dataframe_pycaret, target ='target',train_size = 0.8,data_split_shuffle=False,fold=10,session_id=0)
best_model = compare_models()
| 48.525
| 165
| 0.757651
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,631
| 0.270484
|
9acbf669f84ad525253b32c114c4e395b93adc19
| 3,488
|
py
|
Python
|
open-hackathon-tempUI/src/hackathon/config-sample.py
|
SpAiNiOr/LABOSS
|
32ad341821e9f30fecfa338b5669f574d32dd0fa
|
[
"Apache-2.0"
] | null | null | null |
open-hackathon-tempUI/src/hackathon/config-sample.py
|
SpAiNiOr/LABOSS
|
32ad341821e9f30fecfa338b5669f574d32dd0fa
|
[
"Apache-2.0"
] | null | null | null |
open-hackathon-tempUI/src/hackathon/config-sample.py
|
SpAiNiOr/LABOSS
|
32ad341821e9f30fecfa338b5669f574d32dd0fa
|
[
"Apache-2.0"
] | null | null | null |
# "javascript" section for javascript. see @app.route('/config.js') in app/views.py
# oauth constants
HOSTNAME = "http://hackathon.chinacloudapp.cn" # host name of the UI site
QQ_OAUTH_STATE = "openhackathon" # todo state should be constant. Actually it should be unguessable to prevent CSFA
HACkATHON_API_ENDPOINT = "http://hackathon.chinacloudapp.cn:15000"
Config = {
"environment": "local",
"login": {
"github": {
"access_token_url": 'https://github.com/login/oauth/access_token?client_id=a10e2290ed907918d5ab&client_secret=5b240a2a1bed6a6cf806fc2f34eb38a33ce03d75&redirect_uri=%s/github&code=' % HOSTNAME,
"user_info_url": 'https://api.github.com/user?access_token=',
"emails_info_url": 'https://api.github.com/user/emails?access_token='
},
"qq": {
"access_token_url": 'https://graph.qq.com/oauth2.0/token?grant_type=authorization_code&client_id=101192358&client_secret=d94f8e7baee4f03371f52d21c4400cab&redirect_uri=%s/qq&code=' % HOSTNAME,
"openid_url": 'https://graph.qq.com/oauth2.0/me?access_token=',
"user_info_url": 'https://graph.qq.com/user/get_user_info?access_token=%s&oauth_consumer_key=%s&openid=%s'
},
"gitcafe": {
"access_token_url": 'https://api.gitcafe.com/oauth/token?client_id=25ba4f6f90603bd2f3d310d11c0665d937db8971c8a5db00f6c9b9852547d6b8&client_secret=e3d821e82d15096054abbc7fbf41727d3650cab6404a242373f5c446c0918634&redirect_uri=%s/gitcafe&grant_type=authorization_code&code=' % HOSTNAME
},
"provider_enabled": ["github", "qq", "gitcafe"],
"session_minutes": 60,
"token_expiration_minutes": 60 * 24
},
"hackathon-api": {
"endpoint": HACkATHON_API_ENDPOINT
},
"javascript": {
"renren": {
"clientID": "client_id=7e0932f4c5b34176b0ca1881f5e88562",
"redirect_url": "redirect_uri=%s/renren" % HOSTNAME,
"scope": "scope=read_user_message+read_user_feed+read_user_photo",
"response_type": "response_type=token",
},
"github": {
"clientID": "client_id=a10e2290ed907918d5ab",
"redirect_uri": "redirect_uri=%s/github" % HOSTNAME,
"scope": "scope=user",
},
"google": {
"clientID": "client_id=304944766846-7jt8jbm39f1sj4kf4gtsqspsvtogdmem.apps.googleusercontent.com",
"redirect_url": "redirect_uri=%s/google" % HOSTNAME,
"scope": "scope=https://www.googleapis.com/auth/userinfo.profile+https://www.googleapis.com/auth/userinfo.email",
"response_type": "response_type=token",
},
"qq": {
"clientID": "client_id=101192358",
"redirect_uri": "redirect_uri=%s/qq" % HOSTNAME,
"scope": "scope=get_user_info",
"state": "state=%s" % QQ_OAUTH_STATE,
"response_type": "response_type=code",
},
"gitcafe": {
"clientID": "client_id=25ba4f6f90603bd2f3d310d11c0665d937db8971c8a5db00f6c9b9852547d6b8",
"clientSecret": "client_secret=e3d821e82d15096054abbc7fbf41727d3650cab6404a242373f5c446c0918634",
"redirect_uri": "redirect_uri=http://hackathon.chinacloudapp.cn/gitcafe",
"response_type": "response_type=code",
"scope": "scope=public"
},
"hackathon": {
"name": "open-xml-sdk",
"endpoint": HACkATHON_API_ENDPOINT
}
}
}
| 48.444444
| 294
| 0.648222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,476
| 0.709862
|
9acc78e7c1d68d1a67b2d32bd290cc493caa9d62
| 1,036
|
py
|
Python
|
marocco/first.py
|
panos1998/Thesis_Code
|
3f95730b1b2139011b060f002d5ce449a886079b
|
[
"Apache-2.0"
] | null | null | null |
marocco/first.py
|
panos1998/Thesis_Code
|
3f95730b1b2139011b060f002d5ce449a886079b
|
[
"Apache-2.0"
] | null | null | null |
marocco/first.py
|
panos1998/Thesis_Code
|
3f95730b1b2139011b060f002d5ce449a886079b
|
[
"Apache-2.0"
] | null | null | null |
#%%
import sys
import numpy as np
from typing import Any, List
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
sys.path.append('C:/Users/panos/Documents/Διπλωματική/code/fz')
from arfftocsv import function_labelize
import csv
colnames =['age', 'sex', 'cp', 'trestbps', 'chol',
'fbs', 'restecg', 'thalach','exang', 'oldpeak', 'slope',
'ca', 'thal', 'cvd']
# %%
df1 = function_labelize(dest = 'labeled_data1.txt',
labels=colnames, source = 'processed.hungarian.csv')
df2 = function_labelize(dest = 'labeled_data2.txt',
labels=colnames, source = 'processed.cleveland.data')
df3 = function_labelize(dest = 'labeled_data3.txt',
labels=colnames, source = 'processed.va.csv')
df4 =function_labelize(dest = 'labeled_data4.txt',
labels=colnames, source = 'processed.switzerland.csv')
df = pd.concat([df1,df2,df3,df4], axis=0)
print(df.isna().sum())
df['cvd'] = df['cvd'].replace([2,3,4], 1)
scaler = MinMaxScaler()
X = df[colnames[:-1]]
y = df[colnames[-1]]
X_norm = scaler.fit_transform(X)
print(X_norm)
print(y)
# %%
| 32.375
| 63
| 0.712355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 341
| 0.325692
|
9accd3c42fa9f549ce35aac4c4567cb2591c14a9
| 10,323
|
py
|
Python
|
matlab2cpp/datatype.py
|
emc2norway/m2cpp
|
81943057c184c539b409282cbbd47bbf933db04f
|
[
"BSD-3-Clause"
] | 28
|
2017-04-25T10:06:38.000Z
|
2022-02-09T07:25:34.000Z
|
matlab2cpp/datatype.py
|
emc2norway/m2cpp
|
81943057c184c539b409282cbbd47bbf933db04f
|
[
"BSD-3-Clause"
] | null | null | null |
matlab2cpp/datatype.py
|
emc2norway/m2cpp
|
81943057c184c539b409282cbbd47bbf933db04f
|
[
"BSD-3-Clause"
] | 5
|
2017-04-25T17:54:53.000Z
|
2022-03-21T20:15:15.000Z
|
"""
The follwing constructor classes exists here:
+------------------------------------------+---------------------------------------+
| Class | Description |
+==========================================+=======================================+
| :py:class:`~matlab2cpp.datatype.Type` | Frontend for the datatype string |
+------------------------------------------+---------------------------------------+
| :py:class:`~matlab2cpp.datatype.Dim` | Reference to the number of dimensions |
+------------------------------------------+---------------------------------------+
| :py:class:`~matlab2cpp.datatype.Mem` | Reference to the memory type |
+------------------------------------------+---------------------------------------+
| :py:class:`~matlab2cpp.datatype.Num` | Numerical value indicator |
+------------------------------------------+---------------------------------------+
| :py:class:`~matlab2cpp.datatype.Suggest` | Frontend for suggested datatype |
+------------------------------------------+---------------------------------------+
"""
import supplement
import matlab2cpp as mc
dim0 = {"int", "float", "uword", "double", "cx_double", "size_t"}
dim1 = {"ivec", "fvec", "uvec", "vec", "cx_vec"}
dim2 = {"irowvec", "frowvec", "urowvec", "rowvec", "cx_rowvec"}
dim3 = {"imat", "fmat", "umat", "mat", "cx_mat"}
dim4 = {"icube", "fcube", "ucube", "cube", "cx_cube"}
dims = [dim0, dim1, dim2, dim3, dim4]
mem0 = {"uword", "uvec", "urowvec", "umat", "ucube"}
mem1 = {"int", "ivec", "irowvec", "imat", "icube"}
mem2 = {"float", "fvec", "frowvec", "fmat", "fcube"}
mem3 = {"double", "vec", "rowvec", "mat", "cube"}
mem4 = {"cx_double", "cx_vec", "cx_rowvec", "cx_mat", "cx_cube"}
mems = [mem0, mem1, mem2, mem3, mem4]
others = {"char", "string", "TYPE", "func_lambda", "struct", "structs", "cell",
"wall_clock", "SPlot"}
def common_loose(vals):
"""Common denominator among several names.
Loose enforcment"""
if not isinstance(vals, (tuple, list)) or \
isinstance(vals[0], int):
vals = [vals]
vals = list(vals)
for i in xrange(len(vals)):
if isinstance(vals[i], str):
continue
if isinstance(vals[i][0], int):
vals[i] = get_name(*vals[i])
vals = set(vals)
if len(vals) == 1:
return vals.pop()
vals.discard("TYPE")
if len(vals) == 1:
return vals.pop()
for other in others:
vals.discard(other)
if len(vals) == 0:
return "TYPE"
elif len(vals) == 1:
return vals.pop()
dims_ = map(get_dim, vals)
if dims_:
dim = max(*dims_)
else:
return "TYPE"
if dim == 2 and 1 in dims_:
dim = 3
types = map(get_mem, vals)
type = max(*types)
val = get_name(dim, type)
return val
def common_strict(vals):
"""Common denominator among several names.
Strict enforcment"""
if not isinstance(vals, (tuple, list)) \
or isinstance(vals[0], int):
vals = [vals]
vals = list(vals)
for i in xrange(len(vals)):
if isinstance(vals[i], str):
continue
if isinstance(vals[i][0], int):
vals[i] = get_name(*vals[i])
vals = set(vals)
if len(vals) == 1:
return vals.pop()
for other in others:
if other in vals:
return "TYPE"
dims_ = map(get_dim, vals)
dim = max(*dims_)
if dim == 2 and 1 in dims_:
return "TYPE"
types = map(get_mem, vals)
type = max(*types)
val = get_name(dim, type)
return val
def pointer_split(name):
p = name.count("*")
if not p:
return 0, name
return p, name[:-p]
def get_dim(val):
while val[-1] == "*":
val = val[:-1]
if val in dim0: dim = 0
elif val in dim1: dim = 1
elif val in dim2: dim = 2
elif val in dim3: dim = 3
elif val in dim4: dim = 4
elif val in others: dim = None
else:
raise ValueError("Datatype '%s' not recognized" % val)
return dim
def get_mem(val):
while val[-1] == "*":
val = val[:-1]
if val in mem0: mem = 0
elif val in mem1: mem = 1
elif val in mem2: mem = 2
elif val in mem3: mem = 3
elif val in mem4: mem = 4
elif val in others: mem = None
else:
raise ValueError("Datatype '%s' not recognized" % val)
return mem
def get_num(val):
while val[-1] == "*":
val = val[:-1]
if val in others: num = False
else: num = True
return num
def get_name(dim, mem):
return dims[dim].intersection(mems[mem]).pop()
def get_type(instance):
if instance.prop["type"] == "TYPE":
instance = instance.declare
return instance.prop["type"]
class Dim(object):
"""
The `node.dim` is a help variable for handling numerical datatype.
It represents the number of dimension a numerical object represents:
+-------+--------------+
| *dim* | Description |
+=======+==============+
| 0 | scalar |
+-------+--------------+
| 1 | (col-)vector |
+-------+--------------+
| 2 | row-vector |
+-------+--------------+
| 3 | matrix |
+-------+--------------+
| 4 | cube |
+-------+--------------+
| None | Other |
+-------+--------------+
The variable can be both read and set in real time:
>>> node = mc.Var(None, "name")
>>> node.type="float"
>>> print node.dim
0
>>> node.dim = 3
>>> print node.type
fmat
"""
def __get__(self, instance, owner):
if instance is None:
return self
return get_dim(get_type(instance))
def __set__(self, instance, value):
mem = get_mem(get_type(instance))
instance.prop["type"] = get_name(value, mem)
class Mem(object):
"""
The `node.mem` is a help variable for handling numerical datatype.
It represents the internal basic datatype represented in memory:
+-------+-------------+
| *mem* | Description |
+=======+=============+
| 0 | unsiged int |
+-------+-------------+
| 1 | integer |
+-------+-------------+
| 2 | float |
+-------+-------------+
| 3 | double |
+-------+-------------+
| 4 | complex |
+-------+-------------+
| None | Other |
+-------+-------------+
The variable can be both read and set in real time:
>>> node = mc.Var(None, "name")
>>> node.type="float"
>>> print node.mem
2
>>> node.mem = 3
>>> print node.type
double
"""
def __get__(self, instance, owner):
if instance is None:
return self
return get_mem(get_type(instance))
def __set__(self, instance, value):
dim = get_dim(get_type(instance))
instance.prop["type"] = get_name(dim, value)
class Num(object):
"""
The `node.num` is a help variable for handling numerical datatype. It is
a boolean values which is true given that the datatype is of numerical type.
"""
def __get__(self, instance, owner):
if instance is None:
return self
return get_num(get_type(instance))
def __set__(self, instance, value):
if not value:
instance.prop["type"] = "TYPE"
else:
raise AttributeError("num can not be set True consistently")
class Type(object):
"""
Datatypes can be roughly split into two groups: **numerical** and
**non-numerical** types. The numerical types are as follows:
+-------------+--------------+-----------+-----------+----------+-------------+
| | unsigned int | int | float | double | complex |
+=============+==============+===========+===========+==========+=============+
| scalar | *uword* | *int* | *float* | *double* | *cx_double* |
+-------------+--------------+-----------+-----------+----------+-------------+
| vector | *uvec* | *ivec* | *fvec* | *vec* | *cx_vec* |
+-------------+--------------+-----------+-----------+----------+-------------+
| row\-vector | *urowvec* | *irowvec* | *frowvec* | *rowvec* | *cx_rowvec* |
+-------------+--------------+-----------+-----------+----------+-------------+
| matrix | *umat* | *imat* | *fmat* | *mat* | *cx_mat* |
+-------------+--------------+-----------+-----------+----------+-------------+
| cube | *ucube* | *icube* | *fcube* | *cube* | *cx_cube* |
+-------------+--------------+-----------+-----------+----------+-------------+
Values along the horizontal axis represents the amount of memory reserved per
element, and the along the vertical axis represents the various number of
dimensions. The names are equivalent to the ones in the Armadillo package.
The non-numerical types are as follows:
+---------------+------------------------+
| Name | Description |
+===============+========================+
| *char* | Single text character |
+---------------+------------------------+
| *string* | Text string |
+---------------+------------------------+
| *struct* | Struct container |
+---------------+------------------------+
| *structs* | Struct array container |
+---------------+------------------------+
| *func_lambda* | Anonymous function |
+---------------+------------------------+
The node datatype can be referenced by any node through `node.type` and can be
inserted as placeholder through `%(type)s`.
"""
def __get__(self, instance, owner):
if instance is None:
return self
return get_type(instance)
def __set__(self, instance, value):
value = value or "TYPE"
if isinstance(value, str):
p, value = pointer_split(value)
instance.pointer = p
else:
value = common_strict(value)
instance.prop["type"] = value
class Suggest(object):
"""Same as Type, but for suggested value.
"""
def __set__(self, instance, value):
if value == "TYPE":
return
instance.declare.prop["suggest"] = value
def __get__(self, instance, owner):
return supplement.suggests.get(instance)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28.675
| 84
| 0.465272
| 5,400
| 0.523104
| 0
| 0
| 0
| 0
| 0
| 0
| 5,722
| 0.554296
|
9acd3d20a14d9e96bec466426e861a98197f22b0
| 330
|
py
|
Python
|
src/the_impossible/live/migrations/newsletter/migrations/0002_auto_20200514_1518.py
|
micha31r/The-Impossible
|
7a79dea3169907eb93107107f4003c5813de58dc
|
[
"MIT"
] | null | null | null |
src/the_impossible/live/migrations/newsletter/migrations/0002_auto_20200514_1518.py
|
micha31r/The-Impossible
|
7a79dea3169907eb93107107f4003c5813de58dc
|
[
"MIT"
] | 2
|
2020-04-15T03:57:42.000Z
|
2020-06-06T01:43:34.000Z
|
src/the_impossible/live/migrations/newsletter/migrations/0002_auto_20200514_1518.py
|
micha31r/The-Impossible
|
7a79dea3169907eb93107107f4003c5813de58dc
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.7 on 2020-05-14 03:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('newsletter', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Newsletter',
new_name='Subscriber',
),
]
| 18.333333
| 47
| 0.593939
| 245
| 0.742424
| 0
| 0
| 0
| 0
| 0
| 0
| 97
| 0.293939
|
9acd4db9f55911f16eb79b057e6fc8abf0b3c6d4
| 210
|
py
|
Python
|
resident/views.py
|
felipeue/SmartBuilding
|
57d904c6166c87f836bc8fada9eb5a2bc82069b8
|
[
"MIT"
] | null | null | null |
resident/views.py
|
felipeue/SmartBuilding
|
57d904c6166c87f836bc8fada9eb5a2bc82069b8
|
[
"MIT"
] | null | null | null |
resident/views.py
|
felipeue/SmartBuilding
|
57d904c6166c87f836bc8fada9eb5a2bc82069b8
|
[
"MIT"
] | null | null | null |
from django.views.generic import TemplateView
from main.permissions import ResidentLoginRequiredMixin
class DashboardView(ResidentLoginRequiredMixin, TemplateView):
template_name = "index_dashboard.html"
| 30
| 62
| 0.852381
| 105
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 0.104762
|
9acff9f4ad0162148d8ed69428c049eb258f8169
| 9,179
|
py
|
Python
|
src/awspfx/awspfx.py
|
exfi/awspfx
|
118d2f83a365e1cd37da0b0689e6d5ff527e0f64
|
[
"MIT"
] | 1
|
2021-08-10T23:17:07.000Z
|
2021-08-10T23:17:07.000Z
|
src/awspfx/awspfx.py
|
exfi/awspfx
|
118d2f83a365e1cd37da0b0689e6d5ff527e0f64
|
[
"MIT"
] | 2
|
2021-09-22T03:59:52.000Z
|
2021-12-22T22:48:18.000Z
|
src/awspfx/awspfx.py
|
exfi/awspfx
|
118d2f83a365e1cd37da0b0689e6d5ff527e0f64
|
[
"MIT"
] | 1
|
2022-03-29T15:14:22.000Z
|
2022-03-29T15:14:22.000Z
|
#!/usr/bin/env python3
"""awspfx
Usage:
awspfx.py <profile>
awspfx.py [(-c | --current) | (-l | --list) | (-s | --swap)]
awspfx.py token [(-p | --profile) <profile>]
awspfx.py sso [(login | token)] [(-p | --profile) <profile>]
awspfx.py -h | --help
awspfx.py --version
Examples:
awspfx.py default # Change profile to 'default'
awspfx.py token # Token from current profile, default from SSO
awspfx.py token -p default # Token from profile 'default'
awspfx.py (-c | -l | -s)
SubCommands:
token Generate credentials
-p --profile Select profile
Options:
-c --current Change the profile
-l --list List profiles
-s --swap Swap previous the profile
-h --help Show this screen.
--version Show version.
WIP:
sso Option to login
sts Option to assume-role
"""
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
from configparser import ConfigParser as cfgParser
import boto3
from colorlog import ColoredFormatter
from docopt import docopt
from iterfzf import iterfzf
def setup_logging():
log_level = logging.INFO
log_format = "\n%(log_color)s%(levelname)s%(reset)s => %(log_color)s%(message)s%(reset)s"
logging.root.setLevel(log_level)
formatter = ColoredFormatter(log_format)
stream_ = logging.StreamHandler()
stream_.setLevel(log_level)
stream_.setFormatter(formatter)
log_ = logging.getLogger("pythonConfig")
log_.setLevel(log_level)
log_.addHandler(stream_)
return log_
def exit_err(msg):
log.error(msg)
sys.exit()
def has_which(command, err=True):
cmd = shutil.which(command) is not None
if cmd:
return command
else:
if err:
exit_err(f"Command not installed: {command}")
else:
return False
def has_file(file, create=False):
f = os.path.isfile(file) or False
if not f:
if create:
f_ = open(file, "w+")
f_.close()
else:
exit_err(f"File not exist: {file}")
return file
def run_cmd(command):
rc, out = subprocess.getstatusoutput(command)
if rc != 0:
err = "Occurred: ", out
exit_err(err)
return out
def fzf(data: list, current: str = None):
cmd = has_which("fzf", err=False)
if not cmd:
print(*data, sep="\n")
exit_err("Not installed 'fzf'")
return iterfzf(data) or exit_err("you did not choose any of the options")
def sed_inplace(filename, pattern, repl):
p = re.compile(pattern, re.MULTILINE)
with tempfile.NamedTemporaryFile(mode="w", delete=False) as tmp_file:
with open(filename, "r") as file:
text = file.read()
if "AWS_PROFILE" in text:
new = p.sub(repl, text)
tmp_file.write(new)
else:
print("No exist profile")
tmp_file.write(text)
tmp_file.write(f"export {repl}")
shutil.copystat(filename, tmp_file.name)
shutil.move(tmp_file.name, filename)
def setup_aws(ctx: str = None):
try:
if ctx is None:
# if aws_profile_env is None:
# del os.environ['AWS_PROFILE']
aws_session = boto3.session.Session()
else:
aws_session = boto3.session.Session(profile_name=ctx)
return aws_session
except Exception as e:
exit_err(e)
def current_profile(err=True):
ctx = aws.profile_name
if err:
return ctx or exit_err("Getting current profile")
return ctx
def get_profiles(err=True):
try:
ctx_ls = aws.available_profiles
ctx = sorted(ctx_ls, reverse=True)
if err:
return ctx or exit_err("Getting profile list")
return ctx
except Exception as e:
log.error(e)
def list_profiles(lst=False):
ctx_current = current_profile(err=False)
ctx_list = get_profiles()
if lst:
ctx = reversed(ctx_list)
print(*ctx, sep="\n")
else:
p = fzf(data=ctx_list, current=ctx_current)
return p
def read_profile():
with open(awspfx_cache, 'r') as file:
r = file.read()
return r
def save_profile(ctx_current):
ctx = ctx_current if ctx_current else ""
with open(awspfx_cache, "w") as file:
file.write(ctx)
def switch_profile(ctx, ctx_current):
ctx_old = f'AWS_PROFILE="{ctx_current}"'
ctx_repl = f'AWS_PROFILE="{ctx}"'
sed_inplace(envrc_file, ctx_old, ctx_repl)
save_profile(ctx_current)
run_cmd("direnv allow && direnv reload")
def set_profile(ctx, ctx_current=None, sms=None):
if not ctx_current:
ctx_current = current_profile(err=False)
if ctx == ctx_current:
log.warning(f"The profile is not changed: {ctx_current}")
else:
switch_profile(ctx, ctx_current)
sms_text = sms or f"Switched to profile: {ctx}"
log.info(sms_text)
def swap_profile():
ctx = read_profile()
if ctx:
sms_text = f"Switched to previous profile: {ctx}"
set_profile(ctx=ctx, sms=sms_text)
def exist_profile(ctx):
if ctx in get_profiles():
return True
else:
exit_err(f"Profile does not exist: {ctx}")
def sso(account_id, role_name):
client = aws.client("sso", region_name="us-east-1")
aws_sso_cache = os.path.expanduser("~/.aws/sso/cache")
json_files = [
pos_json for pos_json in os.listdir(
aws_sso_cache
) if pos_json.endswith(
".json"
)
]
for json_file in json_files:
path = f"{aws_sso_cache}/{json_file}"
with open(path) as file:
data = json.load(file)
if "accessToken" in data:
access_token = data['accessToken']
try:
cred = client.get_role_credentials(
accountId=account_id,
roleName=role_name,
accessToken=access_token
)
return cred
except Exception as e:
log.error(e)
log.warning("The SSO session associated with this profile has expired "
"or is otherwise invalid. To refresh this SSO session run "
"aws sso login with the corresponding profile.")
sys.exit(2)
def sts(account_id, role, region):
role_info = {
"RoleArn": f"arn:aws:iam::{account_id}:role/{role}",
"RoleSessionName": "session01"
}
client = aws.client("sts", region_name=region)
cred = client.assume_role(**role_info)
return cred
def get_token(ctx, sso_=True, sts_=False):
aws_cred = cfgParser()
aws_cred.read(creds_file)
act_id = os.getenv("AWS_ACCOUNT_ID") or aws_cred.get(ctx, "account_id")
act_role = os.getenv("AWS_ROLE_NAME") or aws_cred.get(ctx, "role_name")
act_region = os.getenv("AWS_REGION") or aws_cred.get(ctx, "region")
if sso_:
cred = sso(account_id=act_id, role_name=act_role)
elif sts_:
cred = sts(account_id=act_id, role=act_role, region=act_region)
else:
cred = {}
exit_err("Not select option from token")
aws_access_key_id = cred['roleCredentials']['accessKeyId']
aws_secret_access_key = cred['roleCredentials']['secretAccessKey']
aws_session_token = cred['roleCredentials']['sessionToken']
# print('Save Credentials in ~/.aws/credentials ...')
aws_cred.set(ctx, "aws_access_key_id", aws_access_key_id)
aws_cred.set(ctx, "aws_secret_access_key", aws_secret_access_key)
aws_cred.set(ctx, "aws_session_token", aws_session_token)
with open(creds_file, "w") as f:
aws_cred.write(f)
def main(argv):
ctx = argv['<profile>']
if ctx == "token" or argv['token']:
if argv['--profile']:
if exist_profile(ctx):
get_token(ctx)
log.info(f"Generate token to: {ctx}")
else:
ctx = current_profile()
get_token(ctx)
log.info(f"Generate token to: {ctx}")
sys.exit()
if ctx == "sso" or argv['sso']:
print("sso")
sys.exit()
if argv['--current']:
log.info(f"The current profile is: '{current_profile()}'")
sys.exit()
if argv['--list']:
list_profiles(lst=True)
sys.exit()
if argv['--swap']:
swap_profile()
sys.exit()
if ctx or ctx is None:
if ctx is None:
ctx_profile = list_profiles()
else:
ctx_profile = ctx if exist_profile(ctx) else sys.exit()
set_profile(ctx_profile)
sys.exit()
if __name__ == "__main__":
log = setup_logging()
home_path = os.getenv('HOME') or exit_err("Home directory does not exist?")
# aws_profile_env = os.getenv("AWS_PROFILE")
aws = setup_aws()
awspfx_cache = has_file(f"{home_path}/.aws/awspfx", create=True)
direnv = has_which("direnv")
envrc_file = has_file(f"{home_path}/.envrc")
creds_file = has_file(f"{home_path}/.aws/credentials")
arguments = docopt(__doc__, version=f'awspfx 0.1.6 - python {sys.version}')
main(arguments)
| 26.002833
| 93
| 0.610742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,602
| 0.283473
|
9ad11bb35b11a89ca5873c299ffa8f65fee28a06
| 3,694
|
py
|
Python
|
test/test_contacts_info_from_main_page.py
|
OlgaZtv/python_training
|
661165613ef4b9545345a8a2c61a894571ded703
|
[
"Apache-2.0"
] | null | null | null |
test/test_contacts_info_from_main_page.py
|
OlgaZtv/python_training
|
661165613ef4b9545345a8a2c61a894571ded703
|
[
"Apache-2.0"
] | null | null | null |
test/test_contacts_info_from_main_page.py
|
OlgaZtv/python_training
|
661165613ef4b9545345a8a2c61a894571ded703
|
[
"Apache-2.0"
] | null | null | null |
import re
from model.contact import Contact
def test_contact_info_from_home_page(app, db):
app.navigation.open_home_page()
contact_from_home_page = sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
def clean(contact):
return Contact(id=contact.id, firstname=contact.firstname.strip(), lastname=contact.lastname.strip(),
address=contact.address.strip(),
home=contact.home, mobile=contact.mobile, phone2=contact.phone2,
email=contact.email, email2=contact.email2, email3=contact.email3)
contact_from_db_list = list(map(clean, db.get_contact_list()))
print("Contacts_from_home_page>>>>", contact_from_home_page)
print("Contacts_from_DB>>>>", contact_from_db_list)
i = 0
for item in contact_from_home_page:
assert item.address == contact_from_db_list[i].address
assert item.lastname == contact_from_db_list[i].lastname.strip()
assert item.firstname == contact_from_db_list[i].firstname.strip()
assert item.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_db_list[i])
assert item.all_emails_from_home_page == merge_emails_like_on_home_page(contact_from_db_list[i])
i += 1
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.home, contact.mobile, contact.work, contact.phone2]))))
def merge_emails_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None, [contact.email, contact.email2, contact.email3]))))
# def test_contacts(app, ormdb):
# random_index = randrange(app.contact.count())
# # взять все контакты с главной страницы
# contact_from_home_page = app.contact.get_contact_list()
# # взять все записи конатктов из бд
# contact_from_db = ormdb.get_contact_list()
# # сравниваем списки, сортируя
# assert sorted(contact_from_home_page, key=Contact.id_or_max) == sorted(contact_from_db, key=Contact.id_or_max)
# def test_contact_info_on_main_page(app):
# if app.contact.amount() == 0:
# app.contact.create(
# Contact(firstname="TestTest", middlename="Test", lastname="Testing", nickname="testing",
# title="test", company="Test test", address="Spb", home="000222111",
# mobile="444555222", work="99966655", fax="11122255", email="test@tesr.ru",
# email2="test2@test.ru", email3="test3@test.ru", homepage="www.test.ru", bday="15",
# bmonth="May", byear="1985", aday="14", amonth="June", ayear="1985",
# address2="Spb", phone2="111111", notes="Friend"))
# random_index = randrange(app.contact.amount())
# contact_from_home_page = app.contact.get_contact_list()[random_index]
# contact_from_edit_page = app.contact.get_contact_info_from_edit_page(random_index)
# assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
# assert contact_from_home_page.firstname == contact_from_edit_page.firstname
# assert contact_from_home_page.lastname == contact_from_edit_page.lastname
# assert contact_from_home_page.address == contact_from_edit_page.address
# assert contact_from_home_page.all_emails_from_home_page == merge_emails_like_on_home_page(contact_from_edit_page)
| 52.028169
| 119
| 0.67542
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,931
| 0.511252
|
9ad1371d592dd9a07aabbaf79a51d2d1c5de33e5
| 628
|
py
|
Python
|
Leetcode/1379. Find a Corresponding Node of a Binary Tree in a Clone of That Tree/solution1.py
|
asanoviskhak/Outtalent
|
c500e8ad498f76d57eb87a9776a04af7bdda913d
|
[
"MIT"
] | 51
|
2020-07-12T21:27:47.000Z
|
2022-02-11T19:25:36.000Z
|
Leetcode/1379. Find a Corresponding Node of a Binary Tree in a Clone of That Tree/solution1.py
|
CrazySquirrel/Outtalent
|
8a10b23335d8e9f080e5c39715b38bcc2916ff00
|
[
"MIT"
] | null | null | null |
Leetcode/1379. Find a Corresponding Node of a Binary Tree in a Clone of That Tree/solution1.py
|
CrazySquirrel/Outtalent
|
8a10b23335d8e9f080e5c39715b38bcc2916ff00
|
[
"MIT"
] | 32
|
2020-07-27T13:54:24.000Z
|
2021-12-25T18:12:50.000Z
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def getTargetCopy(self, original: TreeNode, cloned: TreeNode, target: TreeNode) -> TreeNode:
if not target or not original or not cloned: return None
if target.val == original.val == cloned.val: return cloned
node = self.getTargetCopy(original.left, cloned.left, target)
if node: return node
node = self.getTargetCopy(original.right, cloned.right, target)
if node: return node
return None
| 36.941176
| 96
| 0.644904
| 464
| 0.738854
| 0
| 0
| 0
| 0
| 0
| 0
| 156
| 0.248408
|
9ad242baf7204452ac38c08eb06958775483a1b5
| 1,790
|
py
|
Python
|
benchmark.py
|
raonyguimaraes/machinelearning
|
03b18e5c69931c4ee2ea4803de72c846aba97bce
|
[
"MIT"
] | 1
|
2016-10-23T19:45:12.000Z
|
2016-10-23T19:45:12.000Z
|
benchmark.py
|
raonyguimaraes/machinelearning
|
03b18e5c69931c4ee2ea4803de72c846aba97bce
|
[
"MIT"
] | null | null | null |
benchmark.py
|
raonyguimaraes/machinelearning
|
03b18e5c69931c4ee2ea4803de72c846aba97bce
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Writing Our First Classifier - Machine Learning Recipes #5
#https://www.youtube.com/watch?v=AoeEHqVSNOw&list=PLOU2XLYxmsIIuiBfYad6rFYQU_jL2ryal&index=1
from scipy.spatial import distance
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn import datasets
from sklearn.cross_validation import train_test_split
import numpy as np
def euc(a,b):
return distance.euclidean(a,b)
class ScrappyKNN():
def fit(self, X_train, y_train):
self.X_train = X_train
self.y_train = y_train
def predict(self, X_test):
predictions = []
for row in X_test:
label = self.closest(row)
predictions.append(label)
return predictions
def closest(self, row):
best_dist = euc(row, self.X_train[0])
best_index = 0
for i in range(1,len(self.X_train)):
dist = euc(row, self.X_train[i])
if dist < best_dist:
best_dist = dist
best_index = i
return self.y_train[best_index]
iris = datasets.load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .5)
# from sklearn.neighbors import KNeighborsClassifier
my_classifier = ScrappyKNN()
my_classifier_sklearn = KNeighborsClassifier()
accuracies = []
for i in range (0,1000):
my_classifier.fit(X_train, y_train)
predictions = my_classifier.predict(X_test)
accuracy = accuracy_score(y_test, predictions)
accuracies.append(accuracy)
print 'ScrappyKNN accuracy mean:', np.mean(accuracies)
accuracies = []
for i in range (0,1000):
my_classifier_sklearn.fit(X_train, y_train)
predictions = my_classifier_sklearn.predict(X_test)
accuracy = accuracy_score(y_test, predictions)
accuracies.append(accuracy)
print 'sklearn accuracy mean:', np.mean(accuracies)
| 24.189189
| 92
| 0.754749
| 507
| 0.28324
| 0
| 0
| 0
| 0
| 0
| 0
| 299
| 0.167039
|
9ad3c6eb1d3fc248c366e0859044b8671327d992
| 2,323
|
py
|
Python
|
process_frames.py
|
w-garcia/video-caption.pytorch
|
ef3766b093815b7cfd48d29b2af880c05b45ddbe
|
[
"MIT"
] | 4
|
2019-03-27T11:37:44.000Z
|
2021-01-07T02:10:46.000Z
|
process_frames.py
|
w-garcia/video-caption.pytorch
|
ef3766b093815b7cfd48d29b2af880c05b45ddbe
|
[
"MIT"
] | 2
|
2019-07-11T20:34:19.000Z
|
2019-08-19T13:21:52.000Z
|
process_frames.py
|
w-garcia/video-caption.pytorch
|
ef3766b093815b7cfd48d29b2af880c05b45ddbe
|
[
"MIT"
] | 3
|
2020-02-12T02:31:58.000Z
|
2021-02-07T06:17:48.000Z
|
"""
Re-tooled version of the script found on VideoToTextDNN:
https://github.com/OSUPCVLab/VideoToTextDNN/blob/master/data/process_frames.py
"""
import sys
import os
import argparse
import time
from multiprocessing import Pool
def main(args):
src_dir = args.src_dir
dst_dir = args.dst_dir
start = int(args.start)
end = int(args.end)
PREPEND = args.prepend
src_files = os.listdir(src_dir)
if not os.path.isdir(dst_dir):
os.mkdir(dst_dir)
tuple_list = []
for video_file in src_files[start:end]:
src_path = os.path.join(src_dir, video_file)
dst_path = os.path.join(dst_dir, video_file)
tuple_list.append((PREPEND, video_file, src_path, dst_path))
pool = Pool() # Default to number cores
pool.map(process_vid, tuple_list)
pool.close()
pool.join()
def process_vid(args):
(PREPEND, video_file, src_path, dst_path) = args
if not os.path.isdir(dst_path):
os.mkdir(dst_path)
# command = 'ffmpeg -i '+ src_path+' -s 256x256 '+ dst_path + '/%5d.jpg' #with resize
command = PREPEND + 'ffmpeg -i '+ src_path+' -r 20 '+ dst_path + '/%6d.jpg > /dev/null 2>&1' #6 is to be in accordance with C3D features.
print(command)
os.system(command)
else:
print("Frames directory already found at {}".format(dst_path))
if __name__=='__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'src_dir',
help='directory where videos are'
)
arg_parser.add_argument(
'dst_dir',
help='directory where to store frames'
)
arg_parser.add_argument(
'start',
help='start index (inclusive)'
)
arg_parser.add_argument(
'end',
help='end index (noninclusive)'
)
arg_parser.add_argument(
'--prepend',
default='',
help='optional prepend to start of ffmpeg command (in case you want to use a non-system wide version of ffmpeg)'
'For example: --prepend ~/anaconda2/bin/ will use ffmpeg installed in anaconda2'
)
if not len(sys.argv) > 1:
print(arg_parser.print_help())
sys.exit(0)
args = arg_parser.parse_args()
start_time = time.time()
main(args)
print("Job took %s mins" % ((time.time() - start_time)/60))
| 27.329412
| 145
| 0.635385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 753
| 0.32415
|
9ad3d0b300ea5b2d36712d2ed1f19a77b925f25f
| 383
|
py
|
Python
|
plaintext_password/checks.py
|
bryanwills/django-plaintext-password
|
752cf0316cdc45dc9bed5f9107614881d613647f
|
[
"MIT"
] | null | null | null |
plaintext_password/checks.py
|
bryanwills/django-plaintext-password
|
752cf0316cdc45dc9bed5f9107614881d613647f
|
[
"MIT"
] | null | null | null |
plaintext_password/checks.py
|
bryanwills/django-plaintext-password
|
752cf0316cdc45dc9bed5f9107614881d613647f
|
[
"MIT"
] | 2
|
2021-04-23T08:24:08.000Z
|
2022-03-01T06:56:33.000Z
|
from django.contrib.auth.hashers import get_hashers_by_algorithm
from django.core import checks
@checks.register(checks.Tags.security, deploy=True)
def check_for_plaintext_passwords(app_configs, **kwargs):
if "plaintext" in get_hashers_by_algorithm():
yield checks.Critical(
"Plaintext module should not be used in production.", hint="Remove it."
)
| 34.818182
| 83
| 0.744125
| 0
| 0
| 232
| 0.605744
| 284
| 0.741514
| 0
| 0
| 75
| 0.195822
|
9ad4238b4ae5b1bf04e852349b10a5a6489f5283
| 105
|
py
|
Python
|
city.py
|
cromermw/gen_pop
|
74541590b0142fac5178e7db25b068d967618dfb
|
[
"CC0-1.0"
] | null | null | null |
city.py
|
cromermw/gen_pop
|
74541590b0142fac5178e7db25b068d967618dfb
|
[
"CC0-1.0"
] | null | null | null |
city.py
|
cromermw/gen_pop
|
74541590b0142fac5178e7db25b068d967618dfb
|
[
"CC0-1.0"
] | null | null | null |
class City:
name = "city"
size = "default"
draw = -1
danger = -1
population = []
| 17.5
| 21
| 0.47619
| 105
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 15
| 0.142857
|
9ad5dd0d9bd8fbcbf6eef199aef2d2ca49925d18
| 9,340
|
py
|
Python
|
code/preprocess/data_generation.py
|
hms-dbmi/VarPPUD
|
316a45f33c12dfecadb17fa41b699ef95096a623
|
[
"Apache-2.0"
] | null | null | null |
code/preprocess/data_generation.py
|
hms-dbmi/VarPPUD
|
316a45f33c12dfecadb17fa41b699ef95096a623
|
[
"Apache-2.0"
] | null | null | null |
code/preprocess/data_generation.py
|
hms-dbmi/VarPPUD
|
316a45f33c12dfecadb17fa41b699ef95096a623
|
[
"Apache-2.0"
] | 1
|
2022-01-18T17:14:31.000Z
|
2022-01-18T17:14:31.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 24 17:19:39 2021
@author: rayin
"""
import os, sys
import numpy as np
import pandas as pd
import torch
import warnings
import random
import torchvision.models as models
from sdv.tabular import CTGAN
from sdv.evaluation import evaluate
from sdv.metrics.tabular import CSTest, KSTest
from sdv.metrics.tabular import MulticlassDecisionTreeClassifier
from sdv.metrics.tabular import LogisticDetection, SVCDetection
from ctgan import CTGANSynthesizer
from feature_data_imputation import data_imputation
from sdv.constraints import GreaterThan
warnings.filterwarnings("ignore")
os.chdir("/Users/rayin/Google Drive/Harvard/5_data/UDN/work/")
feature = pd.read_csv('data/feature/feature.csv', index_col=0)
feature_imputation = data_imputation(feature, 'MICE')
case_gene_update = pd.read_csv('data/processed/variant_clean.csv', index_col=0)
case_gene_update['\\12_Candidate variants\\03 Interpretation\\'].replace('pathogenic', 1, inplace=True)
case_gene_update['\\12_Candidate variants\\03 Interpretation\\'].replace('less_pathogenic', 0, inplace=True)
label = case_gene_update['\\12_Candidate variants\\03 Interpretation\\'].reset_index()
label = label['\\12_Candidate variants\\03 Interpretation\\']
#Generating synthetic data based on raw data with/without imputation respectively
real_data_raw = pd.concat([feature, label], axis=1)
real_data_impu = pd.concat([feature_imputation, label], axis=1)
real_data_raw = real_data_raw.rename(columns={"\\12_Candidate variants\\03 Interpretation\\": "label"})
real_data_impu = real_data_impu.rename(columns={"\\12_Candidate variants\\03 Interpretation\\": "label"})
#splitting for imputation real data
feature_real_impu = real_data_impu[real_data_impu.columns[0:-1]]
label_real_impu = real_data_impu[real_data_impu.columns[-1]]
real_data_impu_zero = real_data_impu.loc[real_data_impu[real_data_impu.columns[-1]] == 0]
real_data_impu_one = real_data_impu.loc[real_data_impu[real_data_impu.columns[-1]] == 1]
#splitting for raw real data
feature_real_raw = real_data_raw[real_data_raw.columns[0:-1]]
label_real_raw = real_data_raw[real_data_raw.columns[-1]]
real_data_raw_zero = real_data_raw.loc[real_data_raw[real_data_raw.columns[-1]] == 0]
real_data_raw_one = real_data_raw.loc[real_data_raw[real_data_raw.columns[-1]] == 1]
#############################################################################################################################
#ctgan based on sdv
range_min = pd.DataFrame(index=range(0,500), columns=['range_min'])
range_min = range_min.fillna(0)
range_max = pd.DataFrame(index=range(0,500), columns=['range_max'])
range_max = range_max.fillna(1)
real_data_raw = pd.concat([real_data_raw, range_min.iloc[0:474], range_max.iloc[0:474]], axis=1)
real_data_raw_zero = pd.concat([real_data_raw_zero.reset_index(), range_min.iloc[0:252], range_max.iloc[0:252]], axis=1)
real_data_raw_zero.drop(['index'], axis=1, inplace=True)
real_data_raw_one = pd.concat([real_data_raw_one.reset_index(), range_min.iloc[0:222], range_max.iloc[0:222]], axis=1)
real_data_raw_one.drop(['index'], axis=1, inplace=True)
field_transformers = {'evolutionary age': 'float',
'dN/dS': 'float',
'gene essentiality': 'one_hot_encoding',
'number of chem interaction action': 'one_hot_encoding',
'number of chem interaction': 'one_hot_encoding',
'number of chem': 'one_hot_encoding',
'number of pathway': 'one_hot_encoding',
'number of phenotype': 'one_hot_encoding',
'number of rare diseases': 'one_hot_encoding',
'number of total diseases': 'one_hot_encoding',
'phylogenetic number': 'one_hot_encoding',
'net charge value diff': 'one_hot_encoding',
'secondary structure value diff': 'one_hot_encoding',
'number of hydrogen bond value diff': 'one_hot_encoding',
'number of vertices value diff': 'one_hot_encoding',
'number of edges value diff': 'one_hot_encoding',
'diameter value diff': 'one_hot_encoding'}
#constraints settings for GAN
rare_total_disease_constraint = GreaterThan(
low='number of rare diseases',
high='number of total diseases',
handling_strategy='reject_sampling')
evolutionary_age_constraint = GreaterThan(
low = 'range_max',
high = 'evolutionary age',
handling_strategy='reject_sampling')
dnds_constraint = GreaterThan(
low = 'range_min',
high = 'dN/dS',
handling_strategy='reject_sampling')
gene_haplo_min_constraint = GreaterThan(
low = 'range_min',
high = 'haploinsufficiency',
handling_strategy='reject_sampling')
gene_haplo_max_constraint = GreaterThan(
low = 'haploinsufficiency',
high = 'range_max',
handling_strategy='reject_sampling')
fathmm_min_constraint = GreaterThan(
low = 'range_min',
high = 'FATHMM',
handling_strategy='reject_sampling')
fathmm_max_constraint = GreaterThan(
low = 'FATHMM',
high = 'range_max',
handling_strategy='reject_sampling')
vest_min_constraint = GreaterThan(
low = 'range_min',
high = 'VEST',
handling_strategy='reject_sampling')
vest_max_constraint = GreaterThan(
low = 'VEST',
high = 'range_max',
handling_strategy='reject_sampling')
proven_constraint = GreaterThan(
low = 'PROVEN',
high = 'range_min',
handling_strategy='reject_sampling')
sift_min_constraint = GreaterThan(
low = 'range_min',
high = 'SIFT',
handling_strategy='reject_sampling')
sift_max_constraint = GreaterThan(
low = 'SIFT',
high = 'range_max',
handling_strategy='reject_sampling')
constraints = [rare_total_disease_constraint, evolutionary_age_constraint, dnds_constraint, gene_haplo_min_constraint,
gene_haplo_max_constraint, fathmm_min_constraint, fathmm_max_constraint, vest_min_constraint,
vest_max_constraint, proven_constraint, sift_min_constraint, sift_max_constraint]
#build the model
model = CTGAN(epochs=300, batch_size=100, field_transformers=field_transformers, constraints=constraints) #field_distributions=field_distributions
# #Mode 1: generate all samples together (not work well)
# #generate all labels data
# model.fit(real_data_raw)
# sample = model.sample(500)
# sample.drop(['range_min', 'range_max'], axis=1, inplace=True)
# feature_syn_raw = sample[sample.columns[0:-1]]
# label_syn_raw = sample[sample.columns[-1]]
# feature_syn_raw = data_imputation(feature_syn_raw, 'MICE')
# ss = ShuffleSplit(n_splits=3, test_size=0.33, random_state=0)
# for train_index, test_index in ss.split(real_data_raw):
# train_x = feature_real_impu.iloc[train_index]
# train_y = label_real_impu.iloc[train_index]
# test_x = feature_real_impu.iloc[test_index]
# test_y = label_real_impu.iloc[test_index]
# feature_combine, label_combine = merge_data(train_x, train_y, feature_syn_raw, label_syn_raw)
# rf_baseline(feature_combine, label_combine, test_x, test_y)
# #xgb_baseline(feature_syn_raw, label_syn_raw, test_x, test_y)
#Mode 2: negative and positive resampling, respectievly
#generate label '0' data of 50000 cases
real_data_raw_zero.drop(['label'], axis=1, inplace=True)
model.fit(real_data_raw_zero) #model fitting
sample_zero = model.sample(50000) #generate samples with label '0'
sample_zero.drop(['range_min', 'range_max'], axis=1, inplace=True) #drop 'range_min' and 'range_max' columns
sample_zero['label'] = 0 #add the labels
#generate label '1' data of 50000 cases
real_data_raw_one.drop(['label'], axis=1, inplace=True)
model.fit(real_data_raw_one)
sample_one = model.sample(50000)
sample_one.drop(['range_min', 'range_max'], axis=1, inplace=True)
sample_one['label'] = 1
#concatenate positive and negative synthetic samples
sample_all = pd.concat([sample_zero, sample_one], axis=0)
#sample_all.to_csv('data/synthetic/syn_data_raw.csv')
#remove samples with 'NA' in any of the columns
sample_syn = sample_all.dropna(axis=0,how='any')
#sample_syn.to_csv('data/synthetic/syn_test_raw.csv')
#select 500 synthetic test samples that keeps the similar size of raw data
syn_test_raw = pd.read_csv('data/synthetic/syn_test_raw.csv', index_col=0)
syn_test_raw = syn_test_raw.sample(frac=1)
flag0 = 0
flag1= 0
count_zero = 0
count_one = 0
syn_test_data = []
for i in range(0, len(syn_test_raw)):
if syn_test_raw['label'].iloc[i] == int(0):
if count_zero == 250:
flag0 = 1
else:
count_zero = count_zero + 1
syn_test_data.append(syn_test_raw.iloc[i])
elif syn_test_raw['label'].iloc[i] == int(1):
if count_one == 250:
flag1 = 1
else:
count_one = count_one + 1
syn_test_data.append(syn_test_raw.iloc[i])
if flag0 == 1 and flag1 == 1:
break;
syn_test_data = pd.DataFrame(syn_test_data)
syn_test_data['label'] = syn_test_data['label'].astype(int)
syn_test_data.reset_index(inplace=True)
syn_test_data = syn_test_data[syn_test_data.columns[1:40]]
#export synthetic data for external evaluation
syn_test_data.to_csv('data/synthetic/syn_test.csv')
| 37.51004
| 147
| 0.713169
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,768
| 0.403426
|
9ad633a8b545c9fd60433dd7e1485b51abf58bfc
| 1,265
|
py
|
Python
|
app/user/models.py
|
briankaemingk/streaks-with-todoist
|
c6cbc982fbedafce04e9f23af7422e996513c8bb
|
[
"MIT"
] | 3
|
2019-08-06T19:04:32.000Z
|
2022-01-19T14:00:12.000Z
|
app/user/models.py
|
briankaemingk/streaks-with-todoist
|
c6cbc982fbedafce04e9f23af7422e996513c8bb
|
[
"MIT"
] | 6
|
2018-10-14T21:32:58.000Z
|
2021-03-20T00:07:56.000Z
|
app/user/models.py
|
briankaemingk/streaks-with-todoist
|
c6cbc982fbedafce04e9f23af7422e996513c8bb
|
[
"MIT"
] | null | null | null |
from app.extensions import db
from flask import current_app
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
access_token = db.Column(db.String())
jit_feature = db.Column(db.Boolean())
recurrence_resch_feature = db.Column(db.Boolean())
streaks_feature = db.Column(db.Boolean())
in_line_comment_feature = db.Column(db.Boolean())
def __init__(self, id, access_token, jit_feature, recurrence_resch_feature, streaks_feature, in_line_comment_feature):
self.id = id
self.access_token = access_token
self.jit_feature = jit_feature
self.recurrence_resch_feature = recurrence_resch_feature
self.streaks_feature = streaks_feature
self.in_line_comment_feature = in_line_comment_feature
def __repr__(self):
return '<id {}, access token {}, jit feature {}, recurrence resch feature {}, streaks feature {}, in-line comment feature {}>'.\
format(self.id, self.access_token, self.jit_feature, self.recurrence_resch_feature, self.streaks_feature, self.in_line_comment_feature)
def launch_task(self, name, description, *args, **kwargs):
current_app.task_queue.enqueue('app.tasks.' + name, self.id, *args, **kwargs)
| 38.333333
| 147
| 0.714625
| 1,198
| 0.947036
| 0
| 0
| 0
| 0
| 0
| 0
| 138
| 0.109091
|
9ad63695127b031d5978acb9042f9c3b9cb8c5de
| 1,240
|
py
|
Python
|
output/models/boeing_data/ipo4/ipo_xsd/address.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/boeing_data/ipo4/ipo_xsd/address.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/boeing_data/ipo4/ipo_xsd/address.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from enum import Enum
from typing import Optional
from output.models.boeing_data.ipo4.ipo_xsd.ipo import AddressType
__NAMESPACE__ = "http://www.example.com/IPO"
class Usstate(Enum):
AK = "AK"
AL = "AL"
AR = "AR"
CA = "CA"
PA = "PA"
@dataclass
class Ukaddress(AddressType):
class Meta:
name = "UKAddress"
postcode: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"[A-Z]{2}\d\s\d[A-Z]{2}",
}
)
export_code: int = field(
init=False,
default=1,
metadata={
"name": "exportCode",
"type": "Attribute",
}
)
@dataclass
class Usaddress(AddressType):
class Meta:
name = "USAddress"
state: Optional[Usstate] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
}
)
zip: Optional[int] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
}
)
| 20
| 66
| 0.504032
| 1,005
| 0.810484
| 0
| 0
| 937
| 0.755645
| 0
| 0
| 253
| 0.204032
|
9ad672b90b5e5960648f597358159ab9f9c375ec
| 5,060
|
py
|
Python
|
Invaders/Displays/animation_display.py
|
JaredsGames/SpaceInvaders
|
8a0da236c97340c4a8a06e7dd68e4672f885d9e0
|
[
"MIT"
] | null | null | null |
Invaders/Displays/animation_display.py
|
JaredsGames/SpaceInvaders
|
8a0da236c97340c4a8a06e7dd68e4672f885d9e0
|
[
"MIT"
] | null | null | null |
Invaders/Displays/animation_display.py
|
JaredsGames/SpaceInvaders
|
8a0da236c97340c4a8a06e7dd68e4672f885d9e0
|
[
"MIT"
] | null | null | null |
# Jared Dyreson
# CPSC 386-01
# 2021-11-29
# jareddyreson@csu.fullerton.edu
# @JaredDyreson
#
# Lab 00-04
#
# Some filler text
#
"""
This module contains the Intro display class
"""
import pygame
import functools
import sys
import pathlib
import typing
import os
import dataclasses
import random
from pprint import pprint as pp
import time
from Invaders.Dataclasses.point import Point
from Invaders.Displays.display import Display
from Invaders.UI.button import Button
# from Invaders.Entities.cacodemon import Cacodemon
# from Invaders.Entities.Entity import Entity
from Invaders.Entities.enemy_matrix import EnemyMatrix
# from Invaders.Entities.Player import Player
from Invaders.Entities.Entity import Entity
from Invaders.Dataclasses.direction import Direction
# TODO : move this to its own respective module or something like that
def absolute_file_paths(directory: pathlib.Path) -> typing.List[pathlib.Path]:
"""
List the contents of a directory with their absolute path
@param directory: path where to look
@return: typing.List[pathlib.Path]
"""
return [
pathlib.Path(os.path.abspath(os.path.join(dirpath, f)))
for dirpath, _, filenames in os.walk(directory)
for f in filenames
]
class AnimationDisplay(Display):
def __init__(self):
super().__init__()
self.break_from_draw = False
self.entities = EnemyMatrix(5, 5, self._display_surface)
self.main_player = Entity(
self._display_surface, ["assets/rocket.png"], Point(550, 700)
)
# self.main_player = Player(self._display_surface, [
# "assets/rocket.png"], Point(550, 700))
self.DRAW_NEXT_ENTITY = pygame.USEREVENT + 1
self.ENEMY_FIRE_INTERVAL = pygame.USEREVENT + 2
self.score, self.lives = 0, 3
self.score_label_position = Point(775, 20)
self.lives_label_position = Point(775, 60)
def draw(self) -> None:
draw_loop = True
pygame.time.set_timer(self.DRAW_NEXT_ENTITY, 300)
pygame.time.set_timer(self.ENEMY_FIRE_INTERVAL, 2000)
will_move = False
enemy_group = pygame.sprite.Group()
player_group = pygame.sprite.Group()
enemy_laser_group = pygame.sprite.Group()
player_group.add(self.main_player)
# print(player_group)
for x, row in enumerate(self.entities.matrix):
for y, column in enumerate(row):
enemy_group.add(column.entity)
# FIXME
while draw_loop and not self.break_from_draw:
positions = self.entities.scan_column() # FIXME: this code is not working
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == self.DRAW_NEXT_ENTITY:
self._display_surface.fill(pygame.Color("black"))
enemy_group.update(1)
elif event.type == self.ENEMY_FIRE_INTERVAL:
for position in random.choices(positions, k=2):
x, y = position.container
__laser = self.entities.matrix[x][y].entity.fire(
Direction.SOUTH.value, True
)
enemy_laser_group.add(__laser)
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
self.main_player.fire(Direction.NORTH.value)
if event.key == pygame.K_LEFT:
self.main_player.position.x -= 20
if event.key == pygame.K_RIGHT:
self.main_player.position.x += 20
will_move = True
elif event.type != pygame.KEYDOWN:
will_move = False
if pygame.sprite.groupcollide(
self.main_player.lasers, enemy_group, True, True
):
self.score += 20
if pygame.sprite.groupcollide(
enemy_laser_group, player_group, False, False
):
print("hit the player!")
self.lives -= 1
self._display_surface.fill(self.background_color)
enemy_group.draw(self._display_surface)
self.main_player.draw()
self.main_player.lasers.draw(self._display_surface)
enemy_laser_group.draw(self._display_surface)
enemy_laser_group.update()
if not enemy_group:
draw_loop = False
self.write_text(
f"Score: {self.score}",
self.score_label_position,
pygame.font.SysFont(None, 30),
)
self.write_text(
f"Lives: {self.lives}",
self.lives_label_position,
pygame.font.SysFont(None, 30),
)
self.main_player.update(1)
pygame.display.flip()
self.fps_meter.tick(60)
| 32.025316
| 86
| 0.594862
| 3,809
| 0.752767
| 0
| 0
| 0
| 0
| 0
| 0
| 775
| 0.153162
|
9ad73e40610067893659f1466d9493e1d1fdb576
| 49
|
py
|
Python
|
ledger/checkout/models.py
|
jawaidm/ledger
|
7094f3320d6a409a2a0080e70fa7c2b9dba4a715
|
[
"Apache-2.0"
] | 59
|
2015-08-29T10:51:34.000Z
|
2021-11-03T10:00:25.000Z
|
ledger/checkout/models.py
|
jawaidm/ledger
|
7094f3320d6a409a2a0080e70fa7c2b9dba4a715
|
[
"Apache-2.0"
] | 162
|
2018-02-16T05:13:03.000Z
|
2021-05-14T02:47:37.000Z
|
ledger/checkout/models.py
|
jawaidm/ledger
|
7094f3320d6a409a2a0080e70fa7c2b9dba4a715
|
[
"Apache-2.0"
] | 22
|
2015-08-10T10:46:18.000Z
|
2020-04-04T07:11:55.000Z
|
from oscar.apps.checkout.models import * # noqa
| 24.5
| 48
| 0.755102
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.122449
|
9ad97cd25d6ffe7ca83c1fced680d4dc39e56290
| 1,642
|
py
|
Python
|
api/serializers.py
|
mariomtzjr/podemos_test
|
5efaf02a19aa8c4849e3ad0108546e95af524126
|
[
"MIT"
] | null | null | null |
api/serializers.py
|
mariomtzjr/podemos_test
|
5efaf02a19aa8c4849e3ad0108546e95af524126
|
[
"MIT"
] | null | null | null |
api/serializers.py
|
mariomtzjr/podemos_test
|
5efaf02a19aa8c4849e3ad0108546e95af524126
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from apps.calendarioPago.models import CalendarioPago
from apps.cliente.models import Cliente
from apps.cuenta.models import Cuenta
from apps.grupo.models import Grupo
from apps.miembro.models import Miembro
from apps.transaccion.models import Transaccion
# Serializers define the API representation.
class CalendarioPagoSerializer(serializers.ModelSerializer):
class Meta:
model = CalendarioPago
fields = ['id', 'cuenta_id', 'num_pago', 'monto', 'fecha_pago', 'estatus', ]
class ClienteSerializer(serializers.ModelSerializer):
class Meta:
model = Cliente
fields = ['id', 'nombre', ]
class MiembroSerializer(serializers.ModelSerializer):
cliente = ClienteSerializer(source='cliente_id', read_only=True)
class Meta:
model = Miembro
fields = ['cliente']
class MiembrosSerializer(serializers.ModelSerializer):
class Meta:
model = Miembro
fields = ['id', 'grupo_id', 'cliente_id']
class GrupoSerializer(serializers.ModelSerializer):
miembros = MiembroSerializer(many=True)
class Meta:
model = Grupo
fields = ['id', 'nombre', 'miembros']
class GruposSerializer(serializers.ModelSerializer):
class Meta:
model = Grupo
fields = ['id', 'nombre', ]
class TransaccionSerializer(serializers.ModelSerializer):
class Meta:
model = Transaccion
fields = ['id', 'cuenta_id', 'fecha', 'monto', ]
class CuentaSerializer(serializers.ModelSerializer):
class Meta:
model = Cuenta
fields = ['id', 'grupo_id', 'estatus', 'monto', 'saldo']
| 26.483871
| 84
| 0.694275
| 1,277
| 0.77771
| 0
| 0
| 0
| 0
| 0
| 0
| 256
| 0.155907
|
9ada5e1bb0d72f096389f3d35f059bd13ec5be47
| 8,194
|
py
|
Python
|
emmet/markup/format/html.py
|
emmetio/py-emmet
|
9cbb42f482526d7df18ba632b3b3f2ed3b7653a5
|
[
"MIT"
] | 29
|
2019-11-12T16:15:15.000Z
|
2022-02-06T10:51:25.000Z
|
emmet/markup/format/html.py
|
emmetio/py-emmet
|
9cbb42f482526d7df18ba632b3b3f2ed3b7653a5
|
[
"MIT"
] | 3
|
2020-04-25T11:02:53.000Z
|
2021-11-25T10:39:09.000Z
|
emmet/markup/format/html.py
|
emmetio/py-emmet
|
9cbb42f482526d7df18ba632b3b3f2ed3b7653a5
|
[
"MIT"
] | 7
|
2020-04-25T09:42:54.000Z
|
2021-02-16T20:29:41.000Z
|
import re
from .walk import walk, WalkState
from .utils import caret, is_inline_element, is_snippet, push_tokens, should_output_attribute
from .comment import comment_node_before, comment_node_after, CommentWalkState
from ...abbreviation import Abbreviation, AbbreviationNode, AbbreviationAttribute
from ...abbreviation.tokenizer.tokens import Field
from ...config import Config
from ...output_stream import tag_name, self_close, attr_name, is_boolean_attribute, attr_quote, is_inline
from ...list_utils import some, find_index, get_item
re_html_tag = re.compile(r'<([\w\-:]+)[\s>]')
class HTMLWalkState(WalkState):
__slots__ = ('comment')
def html(abbr: Abbreviation, config: Config):
state = HTMLWalkState(config)
state.comment = CommentWalkState(config)
walk(abbr, element, state)
return state.out.value
def element(node: AbbreviationNode, index: int, items: list, state: HTMLWalkState, walk_next: callable):
out = state.out
config = state.config
fmt = should_format(node, index, items, state)
# Pick offset level for current node
level = get_indent(state)
out.level += level
if fmt: out.push_newline(True)
if node.name:
name = tag_name(node.name, config)
comment_node_before(node, state)
out.push_string('<%s' % name)
if node.attributes:
for attr in node.attributes:
if should_output_attribute(attr):
push_attribute(attr, state)
if node.self_closing and not node.children and not node.value:
out.push_string('%s>' % self_close(config))
else:
out.push_string('>')
if not push_snippet(node, state, walk_next):
if node.value:
inner_format = some(has_newline, node.value) or starts_with_block_tag(node.value, config)
if inner_format:
out.level += 1
out.push_newline(out.level)
push_tokens(node.value, state)
if inner_format:
out.level -= 1
out.push_newline(out.level)
_next(node.children, walk_next)
if not node.value and not node.children:
inner_format = config.options.get('output.formatLeafNode') or \
node.name in config.options.get('output.formatForce', [])
if inner_format:
out.level += 1
out.push_newline(out.level)
push_tokens(caret, state)
if inner_format:
out.level -= 1
out.push_newline(out.level)
out.push_string('</%s>' % name)
comment_node_after(node, state)
elif not push_snippet(node, state, walk_next) and node.value:
# A text-only node (snippet)
push_tokens(node.value, state)
_next(node.children, walk_next)
if fmt and index == len(items) - 1 and state.parent:
offset = 0 if is_snippet(state.parent) else 1
out.push_newline(out.level - offset)
out.level -= level
def push_attribute(attr: AbbreviationAttribute, state: WalkState):
"Outputs given attribute’s content into output stream"
out = state.out
config = state.config
if attr.name:
name = attr_name(attr.name, config)
l_quote = attr_quote(attr, config, True)
r_quote = attr_quote(attr, config, False)
value = attr.value
if is_boolean_attribute(attr, config) and not value:
# If attribute value is omitted and it’s a boolean value, check for
# `compactBoolean` option: if it’s disabled, set value to attribute name
# (XML style)
if not config.options.get('output.compactBoolean'):
value = [name]
elif not value:
value = caret
out.push_string(' %s' % name)
if value:
out.push_string('=%s' % l_quote)
push_tokens(value, state)
out.push_string(r_quote)
elif config.options.get('output.selfClosingStyle') != 'html':
out.push_string('=%s%s' % (l_quote, r_quote))
def push_snippet(node: AbbreviationNode, state: WalkState, walk_next: callable):
if node.value and node.children:
# We have a value and child nodes. In case if value contains fields,
# we should output children as a content of first field
field_ix = find_index(is_field, node.value)
if field_ix != -1:
push_tokens(node.value[0:field_ix], state)
line = state.out.line
pos = field_ix + 1
_next(node.children, walk_next)
# If there was a line change, trim leading whitespace for better result
if state.out.line != line and isinstance(get_item(node.value, pos), str):
state.out.push_string(get_item(node.value, pos).lstrip())
pos += 1
push_tokens(node.value[pos:], state)
return True
return False
def should_format(node: AbbreviationNode, index: int, items: list, state: WalkState):
"Check if given node should be formatted in its parent context"
parent = state.parent
config = state.config
if not config.options.get('output.format'):
return False
if index == 0 and not parent:
# Do not format very first node
return False
# Do not format single child of snippet
if parent and is_snippet(parent) and len(items) == 1:
return False
if is_snippet(node):
# Adjacent text-only/snippet nodes
fmt = is_snippet(get_item(items, index - 1)) or is_snippet(get_item(items, index + 1)) or \
some(has_newline, node.value) or \
(some(is_field, node.value) and node.children)
if fmt:
return True
if is_inline(node, config):
# Check if inline node is the next sibling of block-level node
if index == 0:
# First node in parent: format if it’s followed by a block-level element
for item in items:
if not is_inline(item, config):
return True
elif not is_inline(items[index - 1], config):
# Node is right after block-level element
return True
if config.options.get('output.inlineBreak'):
# check for adjacent inline elements before and after current element
adjacent_inline = 1
before = index - 1
after = index + 1
while before >= 0 and is_inline_element(items[before], config):
adjacent_inline += 1
before -= 1
while after < len(items) and is_inline_element(items[after], config):
adjacent_inline += 1
after += 1
if adjacent_inline >= config.options.get('output.inlineBreak'):
return True
# Edge case: inline node contains node that should receive formatting
for i, child in enumerate(node.children):
if should_format(child, i, node.children, state):
return True
return False
return True
def get_indent(state: WalkState):
"Returns indentation offset for given node"
parent = state.parent
if not parent or is_snippet(parent) or (parent.name and parent.name in state.config.options.get('output.formatSkip')):
return 0
return 1
def has_newline(value):
"Check if given node value contains newlines"
return '\r' in value or '\n' in value if isinstance(value, str) else False
def starts_with_block_tag(value: list, config: Config) -> bool:
"Check if given node value starts with block-level tag"
if value and isinstance(value[0], str):
m = re_html_tag.match(value[0])
if m and m.group(1).lower() not in config.options.get('inlineElements'):
return True
return False
def _next(items: list, walk_next: callable):
for i, item in enumerate(items):
walk_next(item, i, items)
def is_field(item):
return isinstance(item, Field)
| 34.868085
| 122
| 0.611667
| 59
| 0.007193
| 0
| 0
| 0
| 0
| 0
| 0
| 1,355
| 0.165204
|
9adc3fed9b6a076b0f178e8d91edfcd0fe2b0e5f
| 2,584
|
py
|
Python
|
secant_method.py
|
FixingMind5/proyecto_metodos_I
|
4eaed1991ad18574984bcc0010394ecb9c4a620e
|
[
"MIT"
] | null | null | null |
secant_method.py
|
FixingMind5/proyecto_metodos_I
|
4eaed1991ad18574984bcc0010394ecb9c4a620e
|
[
"MIT"
] | null | null | null |
secant_method.py
|
FixingMind5/proyecto_metodos_I
|
4eaed1991ad18574984bcc0010394ecb9c4a620e
|
[
"MIT"
] | null | null | null |
"""Secant Method module"""
from numeric_method import NumericMethod
class SecantMethod(NumericMethod):
"""Secant method class"""
def secant_method(self, previous_value, value):
"""The secant method itself
@param previous_value: first value of interval
@param previous_value: second value of interval
@returns the result of the ecaluation
"""
result = value - previous_value
result /= self.function(value) - self.function(previous_value)
result *= self.function(value)
return value - result
def solve(self):
"""Solves the problem
@raise ValueError if the number of iterations isn't at
leasts 2
"""
iteration = 1
(previous_n, n, next_n) = (self.x_0, self.x, 0.0)
(f_previous_x, f_x, f_next_x) = (0.0, 0.0, 0.0)
(error, converge) = (0.0, False)
MAX_ITERATIONS = int(input("Número de iteraciones a realizar: "))
if MAX_ITERATIONS <= 1:
raise ValueError("Asegurate de usar al menos 2 iteraciones")
print("Comienza el metodo")
print(f"Iteracion | Xi | Xi+1 | f(Xi) | f(Xi+1) | error absoluto")
print(
f"{iteration} \t | {n} | {next_n} | {f_previous_x} | {f_x} | {error if error else '' }")
while iteration <= MAX_ITERATIONS:
f_previous_x = self.function(previous_n)
f_x = self.function(n)
root_in_interval = self.function(
previous_n) * self.function(n) == 0
if root_in_interval and iteration == 1:
print((
"Una raiz a la ecuacion dada es "
"es uno de los extremos del intervalo"
))
break
next_n = self.secant_method(previous_n, n)
f_next_x = self.function(next_n)
if f_next_x == 0:
converge = True
print(f"La raiz del intervalo es {next_n}")
break
if iteration > 1:
error = self.absolute_error(n, next_n)
row = f"{iteration} \t | {n} | {next_n} | {f_x} | {f_next_x} | {error if error else '' }"
print(row)
if error <= self.TOLERANCE and abs(f_next_x) <= self.TOLERANCE:
print(f"Una raiz aproximada de la ecuación es {next_n}")
converge = True
break
n = next_n
iteration += 1
if not converge:
print("El método no converge a una raiz")
| 32.708861
| 101
| 0.540635
| 2,516
| 0.972555
| 0
| 0
| 0
| 0
| 0
| 0
| 891
| 0.344414
|
9add394027ddb25c4a3c822d581f2bbeacc67447
| 245
|
py
|
Python
|
variables.py
|
bestend/korquad
|
3b92fffcc950ff584e0f9755ea9b04f8bece7a31
|
[
"MIT"
] | 1
|
2019-09-06T04:47:14.000Z
|
2019-09-06T04:47:14.000Z
|
variables.py
|
bestend/korquad
|
3b92fffcc950ff584e0f9755ea9b04f8bece7a31
|
[
"MIT"
] | 6
|
2020-01-28T22:12:50.000Z
|
2022-02-09T23:30:45.000Z
|
variables.py
|
bestend/korquad
|
3b92fffcc950ff584e0f9755ea9b04f8bece7a31
|
[
"MIT"
] | null | null | null |
import os
import re
MODEL_FILE_FORMAT = 'weights.{epoch:02d}-{val_loss:.2f}.h5'
MODEL_REGEX_PATTERN = re.compile(r'^.*weights\.(\d+)\-\d+\.\d+\.h5$')
LAST_MODEL_FILE_FORMAT = 'last.h5'
TEAMS_WEBHOOK_URL = os.environ.get('TEAMS_WEBHOOK_URL', '')
| 35
| 69
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 104
| 0.42449
|
9ade61531561b4025a09449d1265b8472b175b17
| 977
|
py
|
Python
|
svm.py
|
sciencementors2019/Image-Processer
|
a1b036f38166722d2bb0ee44de1f3558880312c5
|
[
"MIT"
] | null | null | null |
svm.py
|
sciencementors2019/Image-Processer
|
a1b036f38166722d2bb0ee44de1f3558880312c5
|
[
"MIT"
] | null | null | null |
svm.py
|
sciencementors2019/Image-Processer
|
a1b036f38166722d2bb0ee44de1f3558880312c5
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from sklearn import svm
from mlxtend.plotting import plot_decision_regions
import matplotlib.pyplot as plt
# Create arbitrary dataset for example
df = pd.DataFrame({'Planned_End': np.random.uniform(low=-5, high=5, size=50),
'Actual_End': np.random.uniform(low=-1, high=1, size=50),
'Late': np.random.random_integers(low=0, high=2, size=50)}
)
# Fit Support Vector Machine Classifier
X = df[['Planned_End', 'Actual_End']]
y = df['Late']
clf = svm.SVC(decision_function_shape='ovo')
clf.fit(X.values, y.values)
# Plot Decision Region using mlxtend's awesome plotting function
plot_decision_regions(X=X.values,
y=y.values,
clf=clf,
legend=2)
# Update plot object with X/Y axis labels and Figure Title
plt.xlabel(X.columns[0], size=14)
plt.ylabel(X.columns[1], size=14)
plt.title('SVM Decision Region Boundary', size=16)
| 32.566667
| 85
| 0.663255
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 296
| 0.302968
|
9ae1bc0d9c8249afc93cd2e786ee58fa70373ce4
| 2,544
|
py
|
Python
|
tests/importing/test_read_genes.py
|
EKingma/Transposonmapper
|
1413bda16a0bd5f5f3ccf84d86193c2dba0ab01b
|
[
"Apache-2.0"
] | 2
|
2021-11-23T09:39:35.000Z
|
2022-01-25T15:49:45.000Z
|
tests/importing/test_read_genes.py
|
EKingma/Transposonmapper
|
1413bda16a0bd5f5f3ccf84d86193c2dba0ab01b
|
[
"Apache-2.0"
] | 76
|
2021-07-07T18:31:44.000Z
|
2022-03-22T10:04:40.000Z
|
tests/importing/test_read_genes.py
|
EKingma/Transposonmapper
|
1413bda16a0bd5f5f3ccf84d86193c2dba0ab01b
|
[
"Apache-2.0"
] | 2
|
2021-09-16T10:56:20.000Z
|
2022-01-25T12:33:25.000Z
|
from transposonmapper.importing import (
load_default_files,read_genes
)
def test_output_format():
a,b,c=load_default_files(gff_file=None,essentials_file=None,gene_names_file=None)
a_0,b_0,c_0=read_genes(gff_file=a,essentials_file=b,gene_names_file=c)
assert type(a_0)==dict, "the gene coordinates have to be a dict"
assert type(b_0)==dict, "the gene coordinates have to be a dict"
assert type(c_0)==dict, "the gene coordinates have to be a dict"
def test_output_length():
a,b,c=load_default_files(gff_file=None,essentials_file=None,gene_names_file=None)
a_0,b_0,c_0=read_genes(gff_file=a,essentials_file=b,gene_names_file=c)
assert len(a_0)>=6600, "the total number of genes should not be less than 6600"
assert len(b_0)<6600, "the total number of essential genes should not be more than the number of genes"
assert len(c_0)>=6600, "the total number of genes should not be less than 6600"
def test_output_content_gff():
a,b,c=load_default_files(gff_file=None,essentials_file=None,gene_names_file=None)
a_0,b_0,c_0=read_genes(gff_file=a,essentials_file=b,gene_names_file=c)
#read the first value of the dict
first_value=next(iter(a_0.values()))
# read the first key
first_key=next(iter(a_0))
assert first_value==['I', 335, 649, '+'], "The first value of the gene coordinates is wrong"
assert first_key== 'YAL069W', "The first gene in the array should be YAL069W"
def test_output_content_essentials():
a,b,c=load_default_files(gff_file=None,essentials_file=None,gene_names_file=None)
a_0,b_0,c_0=read_genes(gff_file=a,essentials_file=b,gene_names_file=c)
#read the first value of the dict
first_value=next(iter(b_0.values()))
# read the first key
first_key=next(iter(b_0))
assert first_value==['I', 147594, 151166, '-'], "The first value of the gene coordinates is wrong"
assert first_key== 'YAL001C', "The first gene in the array should be YAL001C"
def test_output_content_names():
a,b,c=load_default_files(gff_file=None,essentials_file=None,gene_names_file=None)
a_0,b_0,c_0=read_genes(gff_file=a,essentials_file=b,gene_names_file=c)
#read the first value of the dict
first_value=next(iter(c_0.values()))
# read the first key
first_key=next(iter(c_0))
assert first_value==['AAC1'], "The first value of the gene names is wrong"
assert first_key== 'YMR056C', "The first gene in the array should be YMR056C"
| 39.138462
| 107
| 0.717374
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 802
| 0.315252
|
9ae33df6172e3d387be468447aa95067143972f3
| 4,477
|
py
|
Python
|
src/apps/tractatusapp/views_spacetree.py
|
lambdamusic/wittgensteiniana
|
f9b37282dcf4b93f9a6218cc827a6ab7386a3dd4
|
[
"MIT"
] | 1
|
2018-04-24T09:55:40.000Z
|
2018-04-24T09:55:40.000Z
|
src/apps/tractatusapp/views_spacetree.py
|
lambdamusic/wittgensteiniana
|
f9b37282dcf4b93f9a6218cc827a6ab7386a3dd4
|
[
"MIT"
] | null | null | null |
src/apps/tractatusapp/views_spacetree.py
|
lambdamusic/wittgensteiniana
|
f9b37282dcf4b93f9a6218cc827a6ab7386a3dd4
|
[
"MIT"
] | 1
|
2020-11-25T08:53:49.000Z
|
2020-11-25T08:53:49.000Z
|
"""
Using
http://thejit.org/static/v20/Docs/files/Options/Options-Canvas-js.html#Options.Canvas
"""
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.urls import reverse
from django.shortcuts import render, redirect, get_object_or_404
import json
import os
import json
from libs.myutils.myutils import printDebug
from tractatusapp.models import *
def spacetree(request):
"""
Visualizes a space tree - ORIGINAL VIEW (USED TO GENERATE HTML VERSION)
"""
# DEFAULT JSON FOR TESTING THE APP
to_json = {
'id': "190_0",
'name': "Pearl Jam",
'children': [
{
'id': "306208_1",
'name': "Pearl Jam & Cypress Hill",
'data': {
'relation': "<h4>Pearl Jam & Cypress Hill</h4><b>Connections:</b><ul><h3>Pearl Jam <div>(relation: collaboration)</div></h3><h3>Cypress Hill <div>(relation: collaboration)</div></h3></ul>"
},},
{ 'id': "191_0",
'name': "Pink Floyd",
'children': [{
'id': "306209_1",
'name': "Guns and Roses",
'data': {
'relation': "<h4>Pearl Jam & Cypress Hill</h4><b>Connections:</b><ul><h3>Pearl Jam <div>(relation: collaboration)</div></h3><h3>Cypress Hill <div>(relation: collaboration)</div></h3></ul>"
},
}],
}]}
# reconstruct the tree as a nested dictionary
TESTING = False
def nav_tree(el):
d = {}
d['id'] = el.name
d['name'] = el.name
full_ogden = generate_text(el)
preview_ogden = "%s .." % ' '.join(el.textOgden().split()[:10]).replace("div", "span")
d['data'] = {'preview_ogden' : preview_ogden, 'full_ogden' : full_ogden}
if el.get_children() and not TESTING:
d['children'] = [nav_tree(x) for x in el.get_children()]
else:
d['children'] = []
return d
treeroot = {'id': "root", 'name': "TLP", 'children': [],
'data': {'preview_ogden' : "root node", 'full_ogden' : generate_text("root")}}
# level0 = TextUnit.tree.root_nodes()
# TODO - make this a mptt tree function
level0 = TextUnit.tree_top()
for x in level0:
treeroot['children'] += [nav_tree(x)]
context = {
'json': json.dumps(treeroot),
'experiment_description': """
The Space Tree Tractatus is an experimental visualization of the <br />
<a target='_blank' href="http://en.wikipedia.org/wiki/Tractatus_Logico-Philosophicus">Tractatus Logico-Philosophicus</a>, a philosophical text by Ludwig Wittgenstein.
<br /><br />
<b>Click</b> on a node to move the tree and center that node. The text contents of the node are displayed at the bottom of the page. <b>Use the mouse wheel</b> to zoom and <b>drag and drop the canvas</b> to pan.
<br /><br />
<small>Made with <a target='_blank' href="http://www.python.org/">Python</a> and the <a target='_blank' href="http://thejit.org/">JavaScript InfoVis Toolkit</a>. More info on this <a href="http://www.michelepasin.org/blog/2012/07/08/wittgenstein-and-the-javascript-infovis-toolkit/">blog post</a></small>
"""
}
return render(request,
'tractatusapp/spacetree/spacetree.html',
context)
def generate_text(instance, expression="ogden"):
""" creates the html needed for the full text representation of the tractatus
includes the number-title, and small links to next and prev satz
# TODO: add cases for different expressions
"""
if instance == "root":
return """<div class='tnum'>Tractatus Logico-Philosophicus<span class='smalllinks'></small></div>
<div>Ludwig Wittgenstein, 1921.<br />
Translated from the German by C.K. Ogden in 1922<br />
Original title: Logisch-Philosophische Abhandlung, Wilhelm Ostwald (ed.), Annalen der Naturphilosophie, 14 (1921)</div>
"""
else:
next, prev = "", ""
next_satz = instance.tractatus_next()
prev_satz = instance.tractatus_prev()
if next_satz:
next = "<a title='Next Sentence' href='javascript:focus_node(%s);'>→ %s</a>" % (next_satz.name, next_satz.name)
if prev_satz:
prev = "<a title='Previous Sentence' href='javascript:focus_node(%s);'>%s ←</a>" % (prev_satz.name, prev_satz.name)
# HACK src images rendered via JS in the template cause WGET errors
# hence they are hidden away in this visualization
# TODO find a more elegant solution
text_js_ready = instance.textOgden().replace('src="', '-src=\"src image omitted ')
t = "<div class='tnum'><span class='smalllinks'>%s</span>%s<span class='smalllinks'>%s</span></div>%s" % (prev, instance.name, next, text_js_ready)
return t
| 33.916667
| 309
| 0.663837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,957
| 0.660487
|
9ae3c34cb81d8405b95cc94d6b0a73cbfa7be42a
| 14,772
|
py
|
Python
|
vumi/blinkenlights/metrics_workers.py
|
apopheniac/vumi
|
e04bf32a0cf09292f03dfe8628798adff512b709
|
[
"BSD-3-Clause"
] | null | null | null |
vumi/blinkenlights/metrics_workers.py
|
apopheniac/vumi
|
e04bf32a0cf09292f03dfe8628798adff512b709
|
[
"BSD-3-Clause"
] | null | null | null |
vumi/blinkenlights/metrics_workers.py
|
apopheniac/vumi
|
e04bf32a0cf09292f03dfe8628798adff512b709
|
[
"BSD-3-Clause"
] | 2
|
2018-03-05T18:01:45.000Z
|
2019-11-02T19:34:18.000Z
|
# -*- test-case-name: vumi.blinkenlights.tests.test_metrics_workers -*-
import time
import random
import hashlib
from datetime import datetime
from twisted.python import log
from twisted.internet.defer import inlineCallbacks, Deferred
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
from twisted.internet.protocol import DatagramProtocol
from vumi.service import Consumer, Publisher, Worker
from vumi.blinkenlights.metrics import (MetricsConsumer, MetricManager, Count,
Metric, Timer, Aggregator)
from vumi.blinkenlights.message20110818 import MetricMessage
class AggregatedMetricConsumer(Consumer):
"""Consumer for aggregate metrics.
Parameters
----------
callback : function (metric_name, values)
Called for each metric datapoint as it arrives. The
parameters are metric_name (str) and values (a list of
timestamp and value pairs).
"""
exchange_name = "vumi.metrics.aggregates"
exchange_type = "direct"
durable = True
routing_key = "vumi.metrics.aggregates"
def __init__(self, callback):
self.queue_name = self.routing_key
self.callback = callback
def consume_message(self, vumi_message):
msg = MetricMessage.from_dict(vumi_message.payload)
for metric_name, _aggregators, values in msg.datapoints():
self.callback(metric_name, values)
class AggregatedMetricPublisher(Publisher):
"""Publishes aggregated metrics.
"""
exchange_name = "vumi.metrics.aggregates"
exchange_type = "direct"
durable = True
routing_key = "vumi.metrics.aggregates"
def publish_aggregate(self, metric_name, timestamp, value):
# TODO: perhaps change interface to publish multiple metrics?
msg = MetricMessage()
msg.append((metric_name, (), [(timestamp, value)]))
self.publish_message(msg)
class TimeBucketConsumer(Consumer):
"""Consume time bucketed metric messages.
Parameters
----------
bucket : int
Bucket to consume time buckets from.
callback : function, f(metric_name, aggregators, values)
Called for each metric datapoint as it arrives.
The parameters are metric_name (str),
aggregator (list of aggregator names) and values (a
list of timestamp and value pairs).
"""
exchange_name = "vumi.metrics.buckets"
exchange_type = "direct"
durable = True
ROUTING_KEY_TEMPLATE = "bucket.%d"
def __init__(self, bucket, callback):
self.queue_name = self.ROUTING_KEY_TEMPLATE % bucket
self.routing_key = self.queue_name
self.callback = callback
def consume_message(self, vumi_message):
msg = MetricMessage.from_dict(vumi_message.payload)
for metric_name, aggregators, values in msg.datapoints():
self.callback(metric_name, aggregators, values)
class TimeBucketPublisher(Publisher):
"""Publish time bucketed metric messages.
Parameters
----------
buckets : int
Total number of buckets messages are being
distributed to.
bucket_size : int, in seconds
Size of each time bucket in seconds.
"""
exchange_name = "vumi.metrics.buckets"
exchange_type = "direct"
durable = True
ROUTING_KEY_TEMPLATE = "bucket.%d"
def __init__(self, buckets, bucket_size):
self.buckets = buckets
self.bucket_size = bucket_size
def find_bucket(self, metric_name, ts_key):
md5 = hashlib.md5("%s:%d" % (metric_name, ts_key))
return int(md5.hexdigest(), 16) % self.buckets
def publish_metric(self, metric_name, aggregates, values):
timestamp_buckets = {}
for timestamp, value in values:
ts_key = int(timestamp) / self.bucket_size
ts_bucket = timestamp_buckets.get(ts_key)
if ts_bucket is None:
ts_bucket = timestamp_buckets[ts_key] = []
ts_bucket.append((timestamp, value))
for ts_key, ts_bucket in timestamp_buckets.iteritems():
bucket = self.find_bucket(metric_name, ts_key)
routing_key = self.ROUTING_KEY_TEMPLATE % bucket
msg = MetricMessage()
msg.append((metric_name, aggregates, ts_bucket))
self.publish_message(msg, routing_key=routing_key)
class MetricTimeBucket(Worker):
"""Gathers metrics messages and redistributes them to aggregators.
:class:`MetricTimeBuckets` take metrics from the vumi.metrics
exchange and redistribute them to one of N :class:`MetricAggregator`
workers.
There can be any number of :class:`MetricTimeBucket` workers.
Configuration Values
--------------------
buckets : int (N)
The total number of aggregator workers. :class:`MetricAggregator`
workers must be started with bucket numbers 0 to N-1 otherwise
metric data will go missing (or at best be stuck in a queue
somewhere).
bucket_size : int, in seconds
The amount of time each time bucket represents.
"""
@inlineCallbacks
def startWorker(self):
log.msg("Starting a MetricTimeBucket with config: %s" % self.config)
buckets = int(self.config.get("buckets"))
log.msg("Total number of buckets %d" % buckets)
bucket_size = int(self.config.get("bucket_size"))
log.msg("Bucket size is %d seconds" % bucket_size)
self.publisher = yield self.start_publisher(TimeBucketPublisher,
buckets, bucket_size)
self.consumer = yield self.start_consumer(MetricsConsumer,
self.publisher.publish_metric)
class DiscardedMetricError(Exception):
pass
class MetricAggregator(Worker):
"""Gathers a subset of metrics and aggregates them.
:class:`MetricAggregators` work in sets of N.
Configuration Values
--------------------
bucket : int, 0 to N-1
An aggregator needs to know which number out of N it is. This is
its bucket number.
bucket_size : int, in seconds
The amount of time each time bucket represents.
lag : int, seconds, optional
The number of seconds after a bucket's time ends to wait
before processing the bucket. Default is 5s.
"""
_time = time.time # hook for faking time in tests
def _ts_key(self, time):
return int(time) / self.bucket_size
@inlineCallbacks
def startWorker(self):
log.msg("Starting a MetricAggregator with config: %s" % self.config)
bucket = int(self.config.get("bucket"))
log.msg("MetricAggregator bucket %d" % bucket)
self.bucket_size = int(self.config.get("bucket_size"))
log.msg("Bucket size is %d seconds" % self.bucket_size)
self.lag = float(self.config.get("lag", 5.0))
# ts_key -> { metric_name -> (aggregate_set, values) }
# values is a list of (timestamp, value) pairs
self.buckets = {}
# initialize last processed bucket
self._last_ts_key = self._ts_key(self._time() - self.lag) - 2
self.publisher = yield self.start_publisher(AggregatedMetricPublisher)
self.consumer = yield self.start_consumer(TimeBucketConsumer,
bucket, self.consume_metric)
self._task = LoopingCall(self.check_buckets)
done = self._task.start(self.bucket_size, False)
done.addErrback(lambda failure: log.err(failure,
"MetricAggregator bucket checking task died"))
def check_buckets(self):
"""Periodically clean out old buckets and calculate aggregates."""
# key for previous bucket
current_ts_key = self._ts_key(self._time() - self.lag) - 1
for ts_key in self.buckets.keys():
if ts_key <= self._last_ts_key:
log.err(DiscardedMetricError("Throwing way old metric data: %r"
% self.buckets[ts_key]))
del self.buckets[ts_key]
elif ts_key <= current_ts_key:
aggregates = []
ts = ts_key * self.bucket_size
items = self.buckets[ts_key].iteritems()
for metric_name, (agg_set, values) in items:
for agg_name in agg_set:
agg_metric = "%s.%s" % (metric_name, agg_name)
agg_func = Aggregator.from_name(agg_name)
agg_value = agg_func([v[1] for v in values])
aggregates.append((agg_metric, agg_value))
for agg_metric, agg_value in aggregates:
self.publisher.publish_aggregate(agg_metric, ts,
agg_value)
del self.buckets[ts_key]
self._last_ts_key = current_ts_key
def consume_metric(self, metric_name, aggregates, values):
if not values:
return
ts_key = self._ts_key(values[0][0])
metrics = self.buckets.get(ts_key, None)
if metrics is None:
metrics = self.buckets[ts_key] = {}
metric = metrics.get(metric_name)
if metric is None:
metric = metrics[metric_name] = (set(), [])
existing_aggregates, existing_values = metric
existing_aggregates.update(aggregates)
existing_values.extend(values)
def stopWorker(self):
self._task.stop()
self.check_buckets()
class MetricsCollectorWorker(Worker):
@inlineCallbacks
def startWorker(self):
log.msg("Starting %s with config: %s" % (
type(self).__name__, self.config))
yield self.setup_worker()
self.consumer = yield self.start_consumer(
AggregatedMetricConsumer, self.consume_metrics)
def stopWorker(self):
log.msg("Stopping %s" % (type(self).__name__,))
return self.teardown_worker()
def setup_worker(self):
pass
def teardown_worker(self):
pass
def consume_metrics(self, metric_name, values):
raise NotImplementedError()
class GraphitePublisher(Publisher):
"""Publisher for sending messages to Graphite."""
exchange_name = "graphite"
exchange_type = "topic"
durable = True
auto_delete = False
delivery_mode = 2
require_bind = False # Graphite uses a topic exchange
def publish_metric(self, metric, value, timestamp):
self.publish_raw("%f %d" % (value, timestamp), routing_key=metric)
class GraphiteMetricsCollector(MetricsCollectorWorker):
"""Worker that collects Vumi metrics and publishes them to Graphite."""
@inlineCallbacks
def setup_worker(self):
self.graphite_publisher = yield self.start_publisher(GraphitePublisher)
def consume_metrics(self, metric_name, values):
for timestamp, value in values:
self.graphite_publisher.publish_metric(
metric_name, value, timestamp)
class UDPMetricsProtocol(DatagramProtocol):
def __init__(self, ip, port):
# NOTE: `host` must be an IP, not a hostname.
self._ip = ip
self._port = port
def startProtocol(self):
self.transport.connect(self._ip, self._port)
def send_metric(self, metric_string):
return self.transport.write(metric_string)
class UDPMetricsCollector(MetricsCollectorWorker):
"""Worker that collects Vumi metrics and publishes them over UDP."""
DEFAULT_FORMAT_STRING = '%(timestamp)s %(metric_name)s %(value)s\n'
DEFAULT_TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S%z'
@inlineCallbacks
def setup_worker(self):
self.format_string = self.config.get(
'format_string', self.DEFAULT_FORMAT_STRING)
self.timestamp_format = self.config.get(
'timestamp_format', self.DEFAULT_TIMESTAMP_FORMAT)
self.metrics_ip = yield reactor.resolve(self.config['metrics_host'])
self.metrics_port = int(self.config['metrics_port'])
self.metrics_protocol = UDPMetricsProtocol(
self.metrics_ip, self.metrics_port)
self.listener = yield reactor.listenUDP(0, self.metrics_protocol)
def teardown_worker(self):
return self.listener.stopListening()
def consume_metrics(self, metric_name, values):
for timestamp, value in values:
timestamp = datetime.utcfromtimestamp(timestamp)
metric_string = self.format_string % {
'timestamp': timestamp.strftime(self.timestamp_format),
'metric_name': metric_name,
'value': value,
}
self.metrics_protocol.send_metric(metric_string)
class RandomMetricsGenerator(Worker):
"""Worker that publishes a set of random metrics.
Useful for tests and demonstrations.
Configuration Values
--------------------
manager_period : float in seconds, optional
How often to have the internal metric manager send metrics
messages. Default is 5s.
generator_period: float in seconds, optional
How often the random metric loop should send values to the
metric manager. Default is 1s.
"""
# callback for tests, f(worker)
# (or anyone else that wants to be notified when metrics are generated)
on_run = None
@inlineCallbacks
def startWorker(self):
log.msg("Starting the MetricsGenerator with config: %s" % self.config)
manager_period = float(self.config.get("manager_period", 5.0))
log.msg("MetricManager will sent metrics every %s seconds" %
manager_period)
generator_period = float(self.config.get("generator_period", 1.0))
log.msg("Random metrics values will be generated every %s seconds" %
generator_period)
self.mm = yield self.start_publisher(MetricManager, "vumi.random.",
manager_period)
self.counter = self.mm.register(Count("count"))
self.value = self.mm.register(Metric("value"))
self.timer = self.mm.register(Timer("timer"))
self.next = Deferred()
self.task = LoopingCall(self.run)
self.task.start(generator_period)
@inlineCallbacks
def run(self):
if random.choice([True, False]):
self.counter.inc()
self.value.set(random.normalvariate(2.0, 0.1))
with self.timer:
d = Deferred()
wait = random.uniform(0.0, 0.1)
reactor.callLater(wait, lambda: d.callback(None))
yield d
if self.on_run is not None:
self.on_run(self)
def stopWorker(self):
self.mm.stop()
self.task.stop()
log.msg("Stopping the MetricsGenerator")
| 36.384236
| 79
| 0.641822
| 14,097
| 0.954305
| 3,875
| 0.262321
| 4,022
| 0.272272
| 0
| 0
| 4,422
| 0.29935
|
9ae436efa8485153023aeda553abb0051a92e57f
| 1,401
|
py
|
Python
|
src/sentry/web/forms/base_organization_member.py
|
JannKleen/sentry
|
8b29c8234bb51a81d5cab821a1f2ed4ea8e8bd88
|
[
"BSD-3-Clause"
] | 1
|
2019-02-27T15:13:06.000Z
|
2019-02-27T15:13:06.000Z
|
src/sentry/web/forms/base_organization_member.py
|
rmax/sentry
|
8b29c8234bb51a81d5cab821a1f2ed4ea8e8bd88
|
[
"BSD-3-Clause"
] | 5
|
2020-07-17T11:20:41.000Z
|
2021-05-09T12:16:53.000Z
|
src/sentry/web/forms/base_organization_member.py
|
zaasmi/codeerrorhelp
|
1ab8d3e314386b9b2d58dad9df45355bf6014ac9
|
[
"BSD-3-Clause"
] | 2
|
2021-01-26T09:53:39.000Z
|
2022-03-22T09:01:47.000Z
|
from __future__ import absolute_import
from django import forms
from django.db import transaction
from sentry.models import (
OrganizationMember,
OrganizationMemberTeam,
Team,
)
class BaseOrganizationMemberForm(forms.ModelForm):
"""
Base form used by AddOrganizationMemberForm, InviteOrganizationMemberForm,
and EditOrganizationMemberForm
"""
teams = forms.ModelMultipleChoiceField(
queryset=Team.objects.none(),
widget=forms.CheckboxSelectMultiple(),
required=False,
)
role = forms.ChoiceField()
class Meta:
fields = ('role', )
model = OrganizationMember
def __init__(self, *args, **kwargs):
allowed_roles = kwargs.pop('allowed_roles')
all_teams = kwargs.pop('all_teams')
super(BaseOrganizationMemberForm, self).__init__(*args, **kwargs)
self.fields['role'].choices = ((r.id, r.name) for r in allowed_roles)
self.fields['teams'].queryset = all_teams
@transaction.atomic
def save_team_assignments(self, organization_member):
OrganizationMemberTeam.objects.filter(organizationmember=organization_member).delete()
OrganizationMemberTeam.objects.bulk_create(
[
OrganizationMemberTeam(team=team, organizationmember=organization_member)
for team in self.cleaned_data['teams']
]
)
| 29.1875
| 94
| 0.68237
| 1,206
| 0.860814
| 0
| 0
| 407
| 0.290507
| 0
| 0
| 177
| 0.126338
|
9ae66ae64bed27a4c419e21d360710c58e9c3114
| 1,589
|
py
|
Python
|
turbinia/workers/fsstat.py
|
dfjxs/turbinia
|
23a97d9d826cbcc51e6b5dfd50d85251506bf242
|
[
"Apache-2.0"
] | 1
|
2021-05-31T19:44:50.000Z
|
2021-05-31T19:44:50.000Z
|
turbinia/workers/fsstat.py
|
dfjxs/turbinia
|
23a97d9d826cbcc51e6b5dfd50d85251506bf242
|
[
"Apache-2.0"
] | null | null | null |
turbinia/workers/fsstat.py
|
dfjxs/turbinia
|
23a97d9d826cbcc51e6b5dfd50d85251506bf242
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Task to run fsstat on disk partitions."""
from __future__ import unicode_literals
import os
from turbinia import TurbiniaException
from turbinia.workers import TurbiniaTask
from turbinia.evidence import EvidenceState as state
from turbinia.evidence import ReportText
class FsstatTask(TurbiniaTask):
REQUIRED_STATES = [state.ATTACHED]
def run(self, evidence, result):
"""Task to execute fsstat.
Args:
evidence (Evidence object): The evidence we will process.
result (TurbiniaTaskResult): The object to place task results into.
Returns:
TurbiniaTaskResult object.
"""
fsstat_output = os.path.join(self.output_dir, 'fsstat.txt')
output_evidence = ReportText(source_path=fsstat_output)
cmd = ['sudo', 'fsstat', evidence.device_path]
result.log('Running fsstat as [{0!s}]'.format(cmd))
self.execute(
cmd, result, stdout_file=fsstat_output, new_evidence=[output_evidence],
close=True)
return result
| 33.104167
| 79
| 0.733166
| 715
| 0.449969
| 0
| 0
| 0
| 0
| 0
| 0
| 920
| 0.57898
|
9ae7351fe81fa3901619faf1757d1f1b2dffbe49
| 401
|
py
|
Python
|
app/django-doubtfire-api/endpoint/urls.py
|
JiatengTao/speaker-verification-api
|
89c0b82c49498426c4d35104e0e4935c193a3cb1
|
[
"MIT"
] | null | null | null |
app/django-doubtfire-api/endpoint/urls.py
|
JiatengTao/speaker-verification-api
|
89c0b82c49498426c4d35104e0e4935c193a3cb1
|
[
"MIT"
] | null | null | null |
app/django-doubtfire-api/endpoint/urls.py
|
JiatengTao/speaker-verification-api
|
89c0b82c49498426c4d35104e0e4935c193a3cb1
|
[
"MIT"
] | null | null | null |
from django.urls import include, path
from django.conf.urls import url
from endpoint.views import (
enroll_user,
validate_recording,
check_redis_health,
redirect_flower_dashboard,
)
urlpatterns = [
path("enroll", enroll_user),
path("validate", validate_recording),
path("redis-healthcheck", check_redis_health, name="up"),
path("flower", redirect_flower_dashboard),
]
| 25.0625
| 61
| 0.733167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 0.122195
|
9ae9da1c04d49fc47628f3418837d002feeee3c7
| 3,096
|
py
|
Python
|
back/src/crud.py
|
Celeo/wiki_elm
|
620caf74b4cc17d3ffe3231493df15e84bfcf67f
|
[
"MIT"
] | null | null | null |
back/src/crud.py
|
Celeo/wiki_elm
|
620caf74b4cc17d3ffe3231493df15e84bfcf67f
|
[
"MIT"
] | null | null | null |
back/src/crud.py
|
Celeo/wiki_elm
|
620caf74b4cc17d3ffe3231493df15e84bfcf67f
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from typing import List, Optional
import bcrypt
from sqlalchemy.orm import Session
from . import models, schemas
def get_user(db: Session, id: int) -> models.User:
"""Return a single user by id.
Args:
db (Session): database connection
id (int): id of the user
Returns:
models.User: user
"""
return db.query(models.User).filter(models.User.id == id).first()
def get_user_by_name(db: Session, name: str) -> models.User:
"""Return a single user by name.
Args:
db (Session): database connection
name (str): name of the user
Returns:
models.User: user
"""
return db.query(models.User).filter(models.User.name == name).first()
def get_all_articles(db: Session) -> List[models.Article]:
"""Return all articles.
Args:
db (Session): database connection
Returns:
List[models.Article]: list of articles
"""
return db.query(models.Article).all()
def get_article(db: Session, id: int) -> models.Article:
"""Return a single article by id.
Args:
db (Session): database connection
id (int): id of the article
Returns:
models.Article: article
"""
return db.query(models.Article).filter(models.Article.id == id).first()
def create_user(db: Session, user: schemas.UserCreate) -> None:
"""Create a new user.
Args:
db (Session): database connection
user: (schemas.UserCreate): creation data
"""
new_user = models.User(name=user.name)
new_user.password = bcrypt.hashpw(user.password, bcrypt.gensalt())
db.add(new_user)
db.commit()
def check_user(db: Session, name: str, password: str) -> Optional[models.User]:
"""Return true if the name and password match.
Args:
db (Session): database connection
name (str): name of the user to check
password (str): password to check against
Returns:
Optional[models.User]: user if the password matches, otherwise None
"""
from_db = get_user_by_name(db, name)
if not from_db:
return None
if bcrypt.checkpw(password.encode('UTF-8'), from_db.password.encode('UTF-8')):
return from_db
return None
def create_article(db: Session, article: schemas.ArticleCreate, creator_id: int) -> None:
"""Create a new article.
Args:
db (Session): database connection
article (schemas.ArticleCreate): data creation data
creator_id (int): user id of the creator
"""
new_article = models.Article(**article.dict(), created_by=creator_id, time_created=datetime.utcnow())
db.add(new_article)
db.commit()
def update_article(db: Session, article: schemas.ArticleUpdate) -> None:
"""Update an article.
Args:
db (Session): database connection
article (schemas.ArticleUpdate): data update data
"""
from_db = get_article(db, article.id)
if article.title:
from_db.title = article.title
if article.content:
from_db.content = article.content
db.commit()
| 26.016807
| 105
| 0.648256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,428
| 0.46124
|
9ae9dc9146555c9b41506690dc497c2bf3438943
| 170
|
py
|
Python
|
commands/cmd_invite.py
|
cygnus-dev/python01
|
e0111ef7031f2c931d433d3dc6449c6740a7880e
|
[
"MIT"
] | null | null | null |
commands/cmd_invite.py
|
cygnus-dev/python01
|
e0111ef7031f2c931d433d3dc6449c6740a7880e
|
[
"MIT"
] | 4
|
2021-06-08T22:27:42.000Z
|
2022-03-12T00:51:07.000Z
|
commands/cmd_invite.py
|
cygnus-dev/python01
|
e0111ef7031f2c931d433d3dc6449c6740a7880e
|
[
"MIT"
] | null | null | null |
async def run(ctx):
await ctx.send(''' `bot invite link:`
<https://discord.com/api/oauth2/authorize?client_id=732933945057869867&permissions=538569921&scope=bot>''')
| 42.5
| 107
| 0.747059
| 0
| 0
| 0
| 0
| 0
| 0
| 169
| 0.994118
| 129
| 0.758824
|
9aea27159d7833c105fb4af0a9c01c188110c93d
| 2,693
|
py
|
Python
|
polymorphic/tests/test_utils.py
|
likeanaxon/django-polymorphic
|
ad4e6e90c82f897300c1c135bd7a95e4b2d802a3
|
[
"BSD-3-Clause"
] | 1
|
2021-03-12T17:42:37.000Z
|
2021-03-12T17:42:37.000Z
|
polymorphic/tests/test_utils.py
|
likeanaxon/django-polymorphic
|
ad4e6e90c82f897300c1c135bd7a95e4b2d802a3
|
[
"BSD-3-Clause"
] | 10
|
2020-02-12T01:46:41.000Z
|
2022-02-10T09:00:03.000Z
|
polymorphic/tests/test_utils.py
|
likeanaxon/django-polymorphic
|
ad4e6e90c82f897300c1c135bd7a95e4b2d802a3
|
[
"BSD-3-Clause"
] | 1
|
2020-04-18T15:14:47.000Z
|
2020-04-18T15:14:47.000Z
|
from django.test import TransactionTestCase
from polymorphic.models import PolymorphicModel, PolymorphicTypeUndefined
from polymorphic.tests.models import (
Enhance_Base,
Enhance_Inherit,
Model2A,
Model2B,
Model2C,
Model2D,
)
from polymorphic.utils import (
get_base_polymorphic_model,
reset_polymorphic_ctype,
sort_by_subclass,
)
class UtilsTests(TransactionTestCase):
def test_sort_by_subclass(self):
self.assertEqual(
sort_by_subclass(Model2D, Model2B, Model2D, Model2A, Model2C),
[Model2A, Model2B, Model2C, Model2D, Model2D],
)
def test_reset_polymorphic_ctype(self):
"""
Test the the polymorphic_ctype_id can be restored.
"""
Model2A.objects.create(field1="A1")
Model2D.objects.create(field1="A1", field2="B2", field3="C3", field4="D4")
Model2B.objects.create(field1="A1", field2="B2")
Model2B.objects.create(field1="A1", field2="B2")
Model2A.objects.all().update(polymorphic_ctype_id=None)
with self.assertRaises(PolymorphicTypeUndefined):
list(Model2A.objects.all())
reset_polymorphic_ctype(Model2D, Model2B, Model2D, Model2A, Model2C)
self.assertQuerysetEqual(
Model2A.objects.order_by("pk"),
[Model2A, Model2D, Model2B, Model2B],
transform=lambda o: o.__class__,
)
def test_get_base_polymorphic_model(self):
"""
Test that finding the base polymorphic model works.
"""
# Finds the base from every level (including lowest)
self.assertIs(get_base_polymorphic_model(Model2D), Model2A)
self.assertIs(get_base_polymorphic_model(Model2C), Model2A)
self.assertIs(get_base_polymorphic_model(Model2B), Model2A)
self.assertIs(get_base_polymorphic_model(Model2A), Model2A)
# Properly handles multiple inheritance
self.assertIs(get_base_polymorphic_model(Enhance_Inherit), Enhance_Base)
# Ignores PolymorphicModel itself.
self.assertIs(get_base_polymorphic_model(PolymorphicModel), None)
def test_get_base_polymorphic_model_skip_abstract(self):
"""
Skipping abstract models that can't be used for querying.
"""
class A(PolymorphicModel):
class Meta:
abstract = True
class B(A):
pass
class C(B):
pass
self.assertIs(get_base_polymorphic_model(A), None)
self.assertIs(get_base_polymorphic_model(B), B)
self.assertIs(get_base_polymorphic_model(C), B)
self.assertIs(get_base_polymorphic_model(C, allow_abstract=True), A)
| 32.445783
| 82
| 0.671742
| 2,322
| 0.862235
| 0
| 0
| 0
| 0
| 0
| 0
| 395
| 0.146677
|
9aeae4d01c050a9274a24e3e6c5783d7fc583318
| 2,098
|
py
|
Python
|
blockchain/utils.py
|
TheEdgeOfRage/blockchain
|
f75764b5a5a87337200b14d1909077c31e2dbdc1
|
[
"MIT"
] | null | null | null |
blockchain/utils.py
|
TheEdgeOfRage/blockchain
|
f75764b5a5a87337200b14d1909077c31e2dbdc1
|
[
"MIT"
] | null | null | null |
blockchain/utils.py
|
TheEdgeOfRage/blockchain
|
f75764b5a5a87337200b14d1909077c31e2dbdc1
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 <pavle.portic@tilda.center>
#
# Distributed under terms of the BSD 3-Clause license.
import hashlib
import itertools
import json
from decimal import Decimal
from multiprocessing import (
cpu_count,
Pool,
Process,
Queue
)
class DecimalJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Decimal):
return float(obj)
return super(DecimalJsonEncoder, self).default(obj)
def dumps(*data, **kwargs):
return json.dumps(
data,
cls=DecimalJsonEncoder,
**kwargs,
)
def do_pooled_pow(last_proof, last_hash, difficulty):
queue = Queue()
with Pool(1) as p:
result = p.starmap_async(pool_worker, ((
queue,
i,
last_proof,
last_hash,
difficulty,
) for i in itertools.count()), chunksize=100)
proof = queue.get()
result.wait()
p.terminate()
return proof
def pool_worker(queue, proof, last_proof, last_hash, difficulty):
if valid_proof(last_proof, proof, last_hash):
queue.put(proof)
return proof
return None
def do_process_pow(last_proof, last_hash, difficulty):
queue = Queue()
processes = [
Process(
target=process_worker,
args=(
queue,
last_proof,
last_hash,
difficulty,
step,
)
) for step in range(cpu_count())
]
for p in processes:
p.start()
proof = queue.get()
for p in processes:
p.terminate()
return proof
def process_worker(queue, last_proof, last_hash, difficulty, step):
proof = step
while not valid_proof(last_proof, proof, last_hash, difficulty):
proof += step
queue.put(proof)
return
def valid_proof(last_proof, proof, last_hash, difficulty):
"""
Validates the Proof
:param last_proof: <int> Previous Proof
:param proof: <int> Current Proof
:param last_hash: <str> The hash of the Previous Block
:return: <bool> True if correct, False if not.
"""
guess = f'{last_proof}{proof}{last_hash}'.encode()
guess_hash = hashlib.sha256(guess)
binary_hash = ''.join(format(n, '08b') for n in guess_hash.digest())
return binary_hash[:difficulty] == '0' * difficulty
| 18.900901
| 69
| 0.702574
| 174
| 0.082897
| 0
| 0
| 0
| 0
| 0
| 0
| 417
| 0.198666
|
9aebd92051cfcf6d0045079f9f922a518fd301b8
| 5,317
|
py
|
Python
|
myfunds/web/views/joint_limits/limit/views/participants.py
|
anzodev/myfunds
|
9f6cda99f443cec064d15d7ff7780f297cbdfe10
|
[
"MIT"
] | null | null | null |
myfunds/web/views/joint_limits/limit/views/participants.py
|
anzodev/myfunds
|
9f6cda99f443cec064d15d7ff7780f297cbdfe10
|
[
"MIT"
] | null | null | null |
myfunds/web/views/joint_limits/limit/views/participants.py
|
anzodev/myfunds
|
9f6cda99f443cec064d15d7ff7780f297cbdfe10
|
[
"MIT"
] | null | null | null |
import peewee as pw
from flask import g
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from myfunds.core.models import Account
from myfunds.core.models import Category
from myfunds.core.models import JointLimitParticipant
from myfunds.web import auth
from myfunds.web import notify
from myfunds.web import utils
from myfunds.web.constants import FundsDirection
from myfunds.web.forms import AddJointLimitParticipantStep1Form
from myfunds.web.forms import AddJointLimitParticipantStep2Form
from myfunds.web.forms import DeleteJointLimitParticipantForm
from myfunds.web.forms import JointLimitParticipantGetStepForm
from myfunds.web.views.joint_limits.limit.views import bp
from myfunds.web.views.joint_limits.limit.views import verify_limit
@bp.route("/participants")
@auth.login_required
@auth.superuser_required
@verify_limit
def participants():
participants = (
JointLimitParticipant.select()
.join(Category)
.join(Account)
.where(JointLimitParticipant.limit == g.limit)
)
return render_template("limit/participants.html", participants=participants)
@bp.route("/participants/new", methods=["GET", "POST"])
@auth.login_required
@auth.superuser_required
@verify_limit
def participants_new():
# fmt: off
current_participants_query = (
JointLimitParticipant
.select()
.join(Category)
.where(
(JointLimitParticipant.limit == g.limit.id)
)
)
# fmt: on
current_participants_account_ids = [
i.category.account_id for i in current_participants_query
]
if request.method == "GET":
# fmt: off
accounts = (
Account
.select(Account, pw.fn.COUNT(Category).alias("categories_count"))
.join(Category)
.where(
(Account.id.not_in(current_participants_account_ids))
& (pw.Value("categories_count") > 0)
)
.order_by(Account.username)
.group_by(Account.id)
)
# fmt: on
return render_template("limit/new_participant.html", step=1, accounts=accounts)
redirect_url = url_for("joint_limits.i.participants_new", limit_id=g.limit.id)
step_form = JointLimitParticipantGetStepForm(request.form)
utils.validate_form(step_form, redirect_url)
step = step_form.step.data
if step == 1:
form = AddJointLimitParticipantStep1Form(request.form)
utils.validate_form(form, redirect_url)
account_id = form.account_id.data
account = Account.get_or_none(id=account_id)
if account is None:
notify.error("Account not found.")
return redirect(redirect_url)
if account.id in current_participants_account_ids:
notify.error("Account is participated already.")
return redirect(redirect_url)
# fmt: off
categories = (
Category
.select(Category)
.where(
(Category.account_id == account.id)
& (Category.direction == FundsDirection.EXPENSE.value)
)
.order_by(Category.name)
)
# fmt: on
return render_template(
"limit/new_participant.html", step=2, account=account, categories=categories
)
elif step == 2:
# fmt: off
current_categories_query = (
JointLimitParticipant
.select()
.join(Category)
.where(
(JointLimitParticipant.limit == g.limit.id)
)
)
# fmt: on
current_categories_ids = [i.category_id for i in current_categories_query]
form = AddJointLimitParticipantStep2Form(request.form)
utils.validate_form(form, redirect_url)
account_id = form.account_id.data
category_id = form.category_id.data
# fmt: off
category = (
Category
.select()
.where(
(Category.id == category_id)
& (Category.account_id.not_in(current_participants_account_ids))
& (Category.id.not_in(current_categories_ids))
)
.first()
)
# fmt: on
if category is None:
notify.error("Category not found.")
return redirect(redirect_url)
JointLimitParticipant.create(limit=g.limit, category=category)
notify.info("New participant was added successfully.")
return redirect(url_for("joint_limits.i.participants", limit_id=g.limit.id))
@bp.route("/participants/delete", methods=["POST"])
@auth.login_required
@auth.superuser_required
@verify_limit
def delete_participant():
redirect_url = url_for("joint_limits.i.participants", limit_id=g.limit.id)
form = DeleteJointLimitParticipantForm(request.form)
utils.validate_form(form, redirect_url)
participant_id = form.participant_id.data
participant = JointLimitParticipant.get_or_none(id=participant_id, limit=g.limit)
if participant is None:
notify.error("Participant not found.")
return redirect(redirect_url)
participant.delete_instance()
notify.info("Participant was deleted.")
return redirect(redirect_url)
| 30.912791
| 88
| 0.658454
| 0
| 0
| 0
| 0
| 4,501
| 0.84653
| 0
| 0
| 547
| 0.102878
|
9aec3cbbdf80ed6024cc8bfdc62a6afaf2fdc1c4
| 6,854
|
py
|
Python
|
elyra/pipeline/component_parser_kfp.py
|
rachaelhouse/elyra
|
e2f474f26f65fd7c5ec5602f6e40a229dda0a081
|
[
"Apache-2.0"
] | null | null | null |
elyra/pipeline/component_parser_kfp.py
|
rachaelhouse/elyra
|
e2f474f26f65fd7c5ec5602f6e40a229dda0a081
|
[
"Apache-2.0"
] | null | null | null |
elyra/pipeline/component_parser_kfp.py
|
rachaelhouse/elyra
|
e2f474f26f65fd7c5ec5602f6e40a229dda0a081
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2018-2021 Elyra Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from types import SimpleNamespace
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
import yaml
from elyra.pipeline.component import Component
from elyra.pipeline.component import ComponentParameter
from elyra.pipeline.component import ComponentParser
class KfpComponentParser(ComponentParser):
_component_platform = "kfp"
_file_types = [".yaml"]
def parse(self, registry_entry: SimpleNamespace) -> Optional[List[Component]]:
# Get YAML object from component definition
component_yaml = self._read_component_yaml(registry_entry)
if not component_yaml:
return None
# Assign component_id and description
component_id = self.get_component_id(registry_entry.location, component_yaml.get('name', ''))
description = ""
if component_yaml.get('description'):
# Remove whitespace characters and replace with spaces
description = ' '.join(component_yaml.get('description').split())
component_properties = self._parse_properties(component_yaml)
component = Component(id=component_id,
name=component_yaml.get('name'),
description=description,
runtime=self.component_platform,
location_type=registry_entry.location_type,
location=registry_entry.location,
properties=component_properties,
categories=registry_entry.categories)
return [component]
def _parse_properties(self, component_yaml: Dict[str, Any]) -> List[ComponentParameter]:
properties: List[ComponentParameter] = list()
# NOTE: Currently no runtime-specific properties are needed
# properties.extend(self.get_runtime_specific_properties())
# Then loop through and create custom properties
input_params = component_yaml.get('inputs', [])
for param in input_params:
# KFP components default to being required unless otherwise stated.
# Reference: https://www.kubeflow.org/docs/components/pipelines/reference/component-spec/#interface
required = True
if "optional" in param and param.get('optional') is True:
required = False
# Assign type, default to string
data_type = param.get('type', 'string')
# Set description and include parsed type information
description = self._format_description(description=param.get('description', ''),
data_type=data_type)
# Change type to reflect the type of input (inputValue vs inputPath)
data_type = self._get_adjusted_parameter_fields(component_body=component_yaml,
io_object_name=param.get('name'),
io_object_type="input",
parameter_type=data_type)
data_type, control_id, default_value = self.determine_type_information(data_type)
# Get value if provided
value = param.get('default', '')
ref = param.get('name').lower().replace(' ', '_')
properties.append(ComponentParameter(id=ref,
name=param.get('name'),
data_type=data_type,
value=(value or default_value),
description=description,
control_id=control_id,
required=required))
return properties
def get_runtime_specific_properties(self) -> List[ComponentParameter]:
"""
Define properties that are common to the KFP runtime.
"""
return [
ComponentParameter(
id="runtime_image",
name="Runtime Image",
data_type="string",
value="",
description="Docker image used as execution environment.",
control="readonly",
required=True,
)
]
def _read_component_yaml(self, registry_entry: SimpleNamespace) -> Optional[Dict[str, Any]]:
"""
Convert component_definition string to YAML object
"""
try:
return yaml.safe_load(registry_entry.component_definition)
except Exception as e:
self.log.debug(f"Could not read definition for component at "
f"location: '{registry_entry.location}' -> {str(e)}")
return None
def _get_adjusted_parameter_fields(self,
component_body: Dict[str, Any],
io_object_name: str,
io_object_type: str,
parameter_type: str) -> str:
"""
Change the parameter ref according if it is a KFP path parameter (as opposed to a value parameter)
"""
adjusted_type = parameter_type
if "implementation" in component_body and "container" in component_body['implementation']:
if "command" in component_body['implementation']['container']:
for command in component_body['implementation']['container']['command']:
if isinstance(command, dict) and list(command.values())[0] == io_object_name and \
list(command.keys())[0] == f"{io_object_type}Path":
adjusted_type = "file"
if "args" in component_body['implementation']['container']:
for arg in component_body['implementation']['container']['args']:
if isinstance(arg, dict) and list(arg.values())[0] == io_object_name and \
list(arg.keys())[0] == f"{io_object_type}Path":
adjusted_type = "file"
return adjusted_type
| 45.390728
| 111
| 0.579224
| 5,966
| 0.870441
| 0
| 0
| 0
| 0
| 0
| 0
| 2,070
| 0.302013
|
9aedf1a23d553278d5b929adc837502da68eda10
| 356
|
py
|
Python
|
mayan/apps/mimetype/apps.py
|
eshbeata/open-paperless
|
6b9ed1f21908116ad2795b3785b2dbd66713d66e
|
[
"Apache-2.0"
] | 2,743
|
2017-12-18T07:12:30.000Z
|
2022-03-27T17:21:25.000Z
|
mayan/apps/mimetype/apps.py
|
eshbeata/open-paperless
|
6b9ed1f21908116ad2795b3785b2dbd66713d66e
|
[
"Apache-2.0"
] | 15
|
2017-12-18T14:58:07.000Z
|
2021-03-01T20:05:05.000Z
|
mayan/apps/mimetype/apps.py
|
eshbeata/open-paperless
|
6b9ed1f21908116ad2795b3785b2dbd66713d66e
|
[
"Apache-2.0"
] | 257
|
2017-12-18T03:12:58.000Z
|
2022-03-25T08:59:10.000Z
|
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from common import MayanAppConfig
from .licenses import * # NOQA
class MIMETypesApp(MayanAppConfig):
name = 'mimetype'
verbose_name = _('MIME types')
def ready(self, *args, **kwargs):
super(MIMETypesApp, self).ready(*args, **kwargs)
| 22.25
| 56
| 0.727528
| 188
| 0.52809
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 0.078652
|
9aefb8bc9120b71f8727047442cac13c02b21950
| 388
|
py
|
Python
|
test/level.py
|
Matt-London/command-line-tutorial
|
5b6afeedb4075de114e8c91756ecf3a03645fde7
|
[
"MIT"
] | 1
|
2020-07-11T06:29:25.000Z
|
2020-07-11T06:29:25.000Z
|
test/level.py
|
Matt-London/Command-Line-Tutorial
|
5b6afeedb4075de114e8c91756ecf3a03645fde7
|
[
"MIT"
] | 15
|
2020-07-10T20:01:51.000Z
|
2020-08-10T05:23:47.000Z
|
test/level.py
|
Matt-London/command-line-tutorial
|
5b6afeedb4075de114e8c91756ecf3a03645fde7
|
[
"MIT"
] | null | null | null |
from packages.levels.Level import Level
import packages.levels.levels as Levels
import packages.resources.functions as function
import packages.resources.variables as var
from packages.filesystem.Directory import Directory
from packages.filesystem.File import File
var.bash_history = ("Check")
test = Level("Instruct", "Help", ("Check"))
test.instruct()
test.help()
print(test.check())
| 27.714286
| 51
| 0.796392
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 30
| 0.07732
|
9af07d32c8be1202f3730dbd2847cb3a451513ad
| 1,235
|
py
|
Python
|
tests/test_buffers.py
|
TheCharmingCthulhu/cython-vst-loader
|
2d2d358515f24f4846ca664e5a9b366a207207a6
|
[
"MIT"
] | 23
|
2020-07-29T14:44:29.000Z
|
2022-01-07T05:29:16.000Z
|
tests/test_buffers.py
|
TheCharmingCthulhu/cython-vst-loader
|
2d2d358515f24f4846ca664e5a9b366a207207a6
|
[
"MIT"
] | 14
|
2020-09-09T02:38:24.000Z
|
2022-03-04T05:19:25.000Z
|
tests/test_buffers.py
|
TheCharmingCthulhu/cython-vst-loader
|
2d2d358515f24f4846ca664e5a9b366a207207a6
|
[
"MIT"
] | 2
|
2021-06-05T23:30:08.000Z
|
2021-06-06T19:58:59.000Z
|
# noinspection PyUnresolvedReferences
import unittest
from cython_vst_loader.vst_loader_wrapper import allocate_float_buffer, get_float_buffer_as_list, \
free_buffer, \
allocate_double_buffer, get_double_buffer_as_list
class TestBuffers(unittest.TestCase):
def test_float_buffer(self):
pointer = allocate_float_buffer(10, 12.345)
assert (pointer > 1000) # something like a pointer
list_object = get_float_buffer_as_list(pointer, 10)
assert (isinstance(list_object, list))
assert (len(list_object) == 10)
for element in list_object:
assert (self.roughly_equals(element, 12.345))
free_buffer(pointer)
def test_double_buffer(self):
pointer = allocate_double_buffer(10, 12.345)
assert (pointer > 1000) # something like a pointer
list_object = get_double_buffer_as_list(pointer, 10)
assert (isinstance(list_object, list))
assert (len(list_object) == 10)
for element in list_object:
assert (self.roughly_equals(element, 12.345))
free_buffer(pointer)
def roughly_equals(self, a: float, b: float) -> bool:
tolerance: float = 0.00001
return abs(a - b) < tolerance
| 36.323529
| 99
| 0.688259
| 1,004
| 0.812955
| 0
| 0
| 0
| 0
| 0
| 0
| 89
| 0.072065
|
9af148fc623927e65f3f0abe332698d9eddb80f8
| 1,520
|
py
|
Python
|
samples/17.multilingual-bot/translation/microsoft_translator.py
|
hangdong/botbuilder-python
|
8ff979a58fadc4356d76b9ce577f94da3245f664
|
[
"MIT"
] | null | null | null |
samples/17.multilingual-bot/translation/microsoft_translator.py
|
hangdong/botbuilder-python
|
8ff979a58fadc4356d76b9ce577f94da3245f664
|
[
"MIT"
] | null | null | null |
samples/17.multilingual-bot/translation/microsoft_translator.py
|
hangdong/botbuilder-python
|
8ff979a58fadc4356d76b9ce577f94da3245f664
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import uuid
import requests
class MicrosoftTranslator:
def __init__(self, subscription_key: str, subscription_region: str):
self.subscription_key = subscription_key
self.subscription_region = subscription_region
# Don't forget to replace with your Cog Services location!
# Our Flask route will supply two arguments: text_input and language_output.
# When the translate text button is pressed in our Flask app, the Ajax request
# will grab these values from our web app, and use them in the request.
# See main.js for Ajax calls.
async def translate(self, text_input, language_output):
base_url = "https://api.cognitive.microsofttranslator.com"
path = "/translate?api-version=3.0"
params = "&to=" + language_output
constructed_url = base_url + path + params
headers = {
"Ocp-Apim-Subscription-Key": self.subscription_key,
"Ocp-Apim-Subscription-Region": self.subscription_region,
"Content-type": "application/json",
"X-ClientTraceId": str(uuid.uuid4()),
}
# You can pass more than one object in body.
body = [{"text": text_input}]
response = requests.post(constructed_url, headers=headers, json=body)
json_response = response.json()
# for this sample, return the first translation
return json_response[0]["translations"][0]["text"]
| 40
| 82
| 0.678947
| 1,394
| 0.917105
| 0
| 0
| 0
| 0
| 848
| 0.557895
| 708
| 0.465789
|
9af29a94a64ce15c2f18ac01d5658596e67aa248
| 48
|
py
|
Python
|
dachar/utils/__init__.py
|
roocs/dachar
|
687b6acb535f634791d13a435cded5f97cae8e76
|
[
"BSD-3-Clause"
] | 2
|
2020-05-01T11:17:06.000Z
|
2020-11-23T10:37:24.000Z
|
dachar/utils/__init__.py
|
roocs/dachar
|
687b6acb535f634791d13a435cded5f97cae8e76
|
[
"BSD-3-Clause"
] | 69
|
2020-03-26T15:39:26.000Z
|
2022-01-14T14:34:39.000Z
|
dachar/utils/__init__.py
|
roocs/dachar
|
687b6acb535f634791d13a435cded5f97cae8e76
|
[
"BSD-3-Clause"
] | null | null | null |
from .common import *
from .json_store import *
| 16
| 25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
9af36b234d70f262e1618ab3933e4d7b9aedd9f4
| 2,760
|
py
|
Python
|
scraper/models.py
|
mrcnc/assessor-scraper
|
b502ebb157048d20294ca44ab0d30e3a44d86c08
|
[
"MIT"
] | null | null | null |
scraper/models.py
|
mrcnc/assessor-scraper
|
b502ebb157048d20294ca44ab0d30e3a44d86c08
|
[
"MIT"
] | null | null | null |
scraper/models.py
|
mrcnc/assessor-scraper
|
b502ebb157048d20294ca44ab0d30e3a44d86c08
|
[
"MIT"
] | 1
|
2019-02-14T04:01:40.000Z
|
2019-02-14T04:01:40.000Z
|
# -*- coding: utf-8 -*-
import os
import logging
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
from sqlalchemy.engine.url import URL
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from scraper import settings
Base = declarative_base()
def db_connect():
"""
Returns sqlalchemy engine instance
"""
if 'DATABASE_URL' in os.environ:
DATABASE_URL = os.environ['DATABASE_URL']
logging.debug("Connecting to %s", URL)
else:
DATABASE_URL = URL(**settings.DATABASE)
logging.debug("Connecting with settings %s", DATABASE_URL)
return create_engine(DATABASE_URL)
def create_tables(engine):
Base.metadata.create_all(engine)
class Property(Base):
__tablename__ = 'properties'
id = Column(Integer, primary_key=True)
property_key = Column(String, nullable=False)
todays_date = Column(String)
location = Column(String)
owner_name = Column(String)
mailing_address = Column(String)
municipal_district = Column(String)
location_address = Column(String)
tax_bill_number = Column(String)
property_class = Column(String)
special_tax_district = Column(String)
subdivision_name = Column(String)
land_area_sq_ft = Column(String)
zoning_district = Column(String)
building_area_sq_ft = Column(String)
square = Column(String)
lot = Column(String)
book = Column(String)
folio = Column(String)
line = Column(String)
parcel_map = Column(String)
legal_description = Column(String)
assessment_area = Column(String)
values = relationship('PropertyValue')
transfers = relationship('PropertyTransfer')
class PropertyValue(Base):
__tablename__ = 'property_values'
id = Column(Integer, primary_key=True)
property_id = Column(Integer, ForeignKey('properties.id'))
year = Column(String)
land_value = Column(String)
building_value = Column(String)
total_value = Column(String)
assessed_land_value = Column(String)
assessed_building_value = Column(String)
total_assessed_value = Column(String)
homestead_exemption_value = Column(String)
taxable_assessment = Column(String)
age_freeze = Column(String)
disability_freeze = Column(String)
assmnt_change = Column(String)
tax_contract = Column(String)
class PropertyTransfer(Base):
__tablename__ = 'property_transfers'
id = Column(Integer, primary_key=True)
property_id = Column(Integer, ForeignKey('properties.id'))
sale_transfer_date = Column(String)
price = Column(String)
grantor = Column(String)
grantee = Column(String)
notarial_archive_number = Column(String)
instrument_number = Column(String)
| 29.677419
| 73
| 0.721014
| 1,997
| 0.723551
| 0
| 0
| 0
| 0
| 0
| 0
| 260
| 0.094203
|
9af3a835ffd32ad662ca751cd48d5f535bf94f5d
| 487
|
py
|
Python
|
WeIrD-StRiNg-CaSe.py
|
lovefov/Python
|
ba8fc49e6e503927dc1f827f37b77f3e43b5d0c8
|
[
"MIT"
] | null | null | null |
WeIrD-StRiNg-CaSe.py
|
lovefov/Python
|
ba8fc49e6e503927dc1f827f37b77f3e43b5d0c8
|
[
"MIT"
] | null | null | null |
WeIrD-StRiNg-CaSe.py
|
lovefov/Python
|
ba8fc49e6e503927dc1f827f37b77f3e43b5d0c8
|
[
"MIT"
] | 1
|
2021-02-08T08:48:44.000Z
|
2021-02-08T08:48:44.000Z
|
def to_weird_case(string):
arr=string.split()
count=0
for i in arr:
tmp=list(i)
for j in range(len(tmp)):
if j%2==0:
tmp[j]=tmp[j].upper()
arr[count] = ''.join(tmp)
count+=1
return ' '.join(arr)
'''
一个比较不错的版本
def to_weird_case(string):
recase = lambda s: "".join([c.upper() if i % 2 == 0 else c.lower() for i, c in enumerate(s)])
return " ".join([recase(word) for word in string.split(" ")])
'''
| 23.190476
| 97
| 0.521561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 237
| 0.469307
|
9af63c97cc5b9b0bb2ddfde6ccac394409cbd012
| 1,573
|
py
|
Python
|
FTP_client/LHYlearning/Entry.py
|
welles2000/CCNProject
|
0f20718aa171571a952343d7a07c2f1c0f953a6e
|
[
"MulanPSL-1.0"
] | 2
|
2022-03-29T05:43:09.000Z
|
2022-03-29T14:29:46.000Z
|
FTP_client/LHYlearning/Entry.py
|
welles2000/CCNProject
|
0f20718aa171571a952343d7a07c2f1c0f953a6e
|
[
"MulanPSL-1.0"
] | null | null | null |
FTP_client/LHYlearning/Entry.py
|
welles2000/CCNProject
|
0f20718aa171571a952343d7a07c2f1c0f953a6e
|
[
"MulanPSL-1.0"
] | null | null | null |
# 经典面向对象的GUI写法
from tkinter import *
from tkinter import messagebox
class Application(Frame):
"""一个经典的GUI程序"""
def __init__(self,master=None):
super().__init__(master)
self.master = master
self.pack()
self.createWidget()
def createWidget(self):
"""创建组件"""
self.label01 = Label(self, text="用户名")
self.label01.pack()
# StringVar变量绑定到指定组件,双向关联
v1 = StringVar() # StringVar DoubleVar IntVar BooleanVar
self.entry01 = Entry(self, textvariable=v1)
self.entry01.pack()
v1.set("admin")
# 创建密码框
self.label02 = Label(self, text="密码")
self.label02.pack()
# StringVar变量绑定到指定组件,双向关联
v2 = StringVar() # StringVar DoubleVar IntVar BooleanVar
self.entry02 = Entry(self, textvariable=v2, show="*")
self.entry02.pack()
self.btn01 = Button(self, text="登录", command=self.login)
self.btn01.pack()
# 创建一个退出按钮
self.btnQuit = Button(self, text="退出", command=self.master.destroy)
self.btnQuit.pack()
def login(self):
username = self.entry01.get()
pwd = self.entry02.get()
print("用户名:"+username)
print("密码:"+pwd)
if username == "lhy" and pwd == "whl":
messagebox.showinfo("登录界面", "您已登录,欢迎")
else:
messagebox.showinfo("登录界面", "密码错误")
if __name__ == '__main__':
root = Tk()
root.geometry("1280x720+200+300")
root.title("")
app = Application(master=root)
root.mainloop()
| 24.2
| 75
| 0.577241
| 1,514
| 0.85779
| 0
| 0
| 0
| 0
| 0
| 0
| 494
| 0.279887
|
9af728f0342a41c7e42c05bfe4ce250d82a4e42b
| 839
|
py
|
Python
|
curso-em-video/ex054.py
|
joseluizbrits/sobre-python
|
316143c341e5a44070a3b13877419082774bd730
|
[
"MIT"
] | null | null | null |
curso-em-video/ex054.py
|
joseluizbrits/sobre-python
|
316143c341e5a44070a3b13877419082774bd730
|
[
"MIT"
] | null | null | null |
curso-em-video/ex054.py
|
joseluizbrits/sobre-python
|
316143c341e5a44070a3b13877419082774bd730
|
[
"MIT"
] | null | null | null |
# Grupo da Maioridade
'''Crie um programa que leia o ANO DE NASCIMENTO de
SETE PESSOAS. No final, mostre quantas pessoas ainda
não atingiram a maioridade e quantas já são maiores'''
from datetime import date
anoatual = date.today().year # Pegará o ano atual configurado na máquina
totalmaior = 0
totalmenor = 0
for pessoas in range(1, 8):
anonasc = int(input('Digite o ano de nascimento da {}ª pessoa: '.format(pessoas)))
if 1900 < anonasc < anoatual:
idade = anoatual - anonasc
if idade >= 21:
totalmaior += 1
else:
totalmenor += 1
else:
print('\033[31m''Ocorreu um erro no ano em que você digitou! Tente novamente.')
print('Há {} pessoas neste grupo que estão na maioridade'.format(totalmaior))
print('E há {} pessoas que ainda são menor de idade'.format(totalmenor))
| 38.136364
| 87
| 0.682956
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 447
| 0.525882
|
9af8cf4aed2f78a490c8a32e60b1aabe24f15e72
| 2,160
|
py
|
Python
|
stellar/simulation/data.py
|
strfx/stellar
|
41b190eed016d2d6ad8548490a0c9620a02d711e
|
[
"MIT"
] | null | null | null |
stellar/simulation/data.py
|
strfx/stellar
|
41b190eed016d2d6ad8548490a0c9620a02d711e
|
[
"MIT"
] | null | null | null |
stellar/simulation/data.py
|
strfx/stellar
|
41b190eed016d2d6ad8548490a0c9620a02d711e
|
[
"MIT"
] | null | null | null |
from typing import Tuple
import numpy as np
import png
from skimage.transform import resize
def load_world(filename: str, size: Tuple[int, int], resolution: int) -> np.array:
"""Load a preconstructred track to initialize world.
Args:
filename: Full path to the track file (png).
size: Width and height of the map
resolution: Resolution of the grid map (i.e. into how many cells)
one meter is divided into.
Returns:
An initialized gridmap based on the preconstructed track as
an n x m dimensional numpy array, where n is the width (num cells)
and m the height (num cells) - (after applying resolution).
"""
width_in_cells, height_in_cells = np.multiply(size, resolution)
world = np.array(png_to_ogm(
filename, normalized=True, origin='lower'))
# If the image is already in our desired shape, no need to rescale it
if world.shape == (height_in_cells, width_in_cells):
return world
# Otherwise, scale the image to our desired size.
resized_world = resize(world, (width_in_cells, height_in_cells))
return resized_world
def png_to_ogm(filename, normalized=False, origin='lower'):
"""Convert a png image to occupancy grid map.
Inspired by https://github.com/richardos/occupancy-grid-a-star
Args:
filename: Path to the png file.
normalized: Whether to normalize the data, i.e. to be in value range [0, 1]
origin: Point of origin (0,0)
Returns:
2D Array
"""
r = png.Reader(filename)
img = r.read()
img_data = list(img[2])
out_img = []
bitdepth = img[3]['bitdepth']
for i in range(len(img_data)):
out_img_row = []
for j in range(len(img_data[0])):
if j % img[3]['planes'] == 0:
if normalized:
out_img_row.append(img_data[i][j]*1.0/(2**bitdepth))
else:
out_img_row.append(img_data[i][j])
out_img.append(out_img_row)
if origin == 'lower':
out_img.reverse()
return out_img
| 29.189189
| 83
| 0.611574
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,046
| 0.484259
|
9af8e51dd66ea49555fb4a24794f6c9c1dc7752a
| 885
|
py
|
Python
|
apps/user/serializers.py
|
major-hub/soil_app
|
ddd250161ad496afd4c8484f79500ff2657b51df
|
[
"MIT"
] | null | null | null |
apps/user/serializers.py
|
major-hub/soil_app
|
ddd250161ad496afd4c8484f79500ff2657b51df
|
[
"MIT"
] | null | null | null |
apps/user/serializers.py
|
major-hub/soil_app
|
ddd250161ad496afd4c8484f79500ff2657b51df
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from user.models import User
from main.exceptions.user_exceptions import UserException
user_exception = UserException
class UserRegisterSerializer(serializers.ModelSerializer):
password_confirmation = serializers.CharField(max_length=128)
class Meta:
model = User
fields = ['email', 'phone_number', 'first_name', 'last_name', 'password', 'password_confirmation']
def validate(self, attrs):
password_confirmation = attrs.pop('password_confirmation')
if password_confirmation != attrs.get('password'):
raise serializers.ValidationError({'non_field_errors': user_exception("NOT_MATCHED_PASSWORDS").message})
return attrs
class UserLoginSerializer(serializers.Serializer):
email = serializers.EmailField(max_length=255)
password = serializers.CharField(max_length=128)
| 32.777778
| 116
| 0.754802
| 719
| 0.812429
| 0
| 0
| 0
| 0
| 0
| 0
| 151
| 0.170621
|
9af8e62cf5607d29f1d31c790e20bc86925e4fe4
| 7,332
|
py
|
Python
|
bf_compiler.py
|
PurpleMyst/bf_compiler
|
51832ac9bb493b478c88f68798e99727cf43e180
|
[
"MIT"
] | 31
|
2018-03-09T15:40:46.000Z
|
2021-01-15T10:03:40.000Z
|
bf_compiler.py
|
PurpleMyst/bf_compiler
|
51832ac9bb493b478c88f68798e99727cf43e180
|
[
"MIT"
] | null | null | null |
bf_compiler.py
|
PurpleMyst/bf_compiler
|
51832ac9bb493b478c88f68798e99727cf43e180
|
[
"MIT"
] | 2
|
2018-03-09T23:59:28.000Z
|
2021-01-15T10:05:00.000Z
|
#!/usr/bin/env python3
import argparse
import ctypes
import os
import sys
from llvmlite import ir, binding as llvm
INDEX_BIT_SIZE = 16
def parse(bf):
bf = iter(bf)
result = []
for c in bf:
if c == "[":
result.append(parse(bf))
elif c == "]":
break
else:
result.append(c)
return result
def bf_to_ir(bf):
ast = parse(bf)
byte = ir.IntType(8)
int32 = ir.IntType(32)
size_t = ir.IntType(64)
void = ir.VoidType()
module = ir.Module(name=__file__)
main_type = ir.FunctionType(int32, ())
main_func = ir.Function(module, main_type, name="main")
entry = main_func.append_basic_block(name="entry")
builder = ir.IRBuilder(entry)
putchar_type = ir.FunctionType(int32, (int32,))
putchar = ir.Function(module, putchar_type, name="putchar")
getchar_type = ir.FunctionType(int32, ())
getchar = ir.Function(module, getchar_type, name="getchar")
bzero_type = ir.FunctionType(void, (byte.as_pointer(), size_t))
bzero = ir.Function(module, bzero_type, name="bzero")
index_type = ir.IntType(INDEX_BIT_SIZE)
index = builder.alloca(index_type)
builder.store(ir.Constant(index_type, 0), index)
tape_type = byte
tape = builder.alloca(tape_type, size=2 ** INDEX_BIT_SIZE)
builder.call(bzero, (tape, size_t(2 ** INDEX_BIT_SIZE)))
zero8 = byte(0)
one8 = byte(1)
eof = int32(-1)
def get_tape_location():
index_value = builder.load(index)
index_value = builder.zext(index_value, int32)
location = builder.gep(tape, (index_value,), inbounds=True)
return location
def compile_instruction(instruction):
if isinstance(instruction, list):
# You may initially analyze this code and think that it'll error
# due to there being multiple blocks with the same name (e.g. if we
# have two loops, there are two "preloop" blocks), but llvmlite
# handles that for us.
preloop = builder.append_basic_block(name="preloop")
# In the LLVM IR, every block needs to be terminated. Our builder
# is still at the end of the previous block, so we can just insert
# an unconditional branching to the preloop branch.
builder.branch(preloop)
builder.position_at_start(preloop)
# load tape value
location = get_tape_location()
tape_value = builder.load(location)
# check tape value
is_zero = builder.icmp_unsigned("==", tape_value, zero8)
# We'll now create *another* block, but we won't terminate the
# "preloop" block until later. This is because we need a reference
# to both the "body" and the "postloop" block to know where to
# jump.
body = builder.append_basic_block(name="body")
builder.position_at_start(body)
for inner_instruction in instruction:
compile_instruction(inner_instruction)
builder.branch(preloop)
postloop = builder.append_basic_block(name="postloop")
builder.position_at_end(preloop)
builder.cbranch(is_zero, postloop, body)
builder.position_at_start(postloop)
elif instruction == "+" or instruction == "-":
location = get_tape_location()
value = builder.load(location)
if instruction == "+":
new_value = builder.add(value, one8)
else:
new_value = builder.sub(value, one8)
builder.store(new_value, location)
elif instruction == ">" or instruction == "<":
index_value = builder.load(index)
if instruction == ">":
index_value = builder.add(index_value, index_type(1))
else:
index_value = builder.sub(index_value, index_type(1))
builder.store(index_value, index)
elif instruction == ".":
location = get_tape_location()
tape_value = builder.load(location)
tape_value = builder.zext(tape_value, int32)
builder.call(putchar, (tape_value,))
elif instruction == ",":
location = get_tape_location()
char = builder.call(getchar, ())
is_eof = builder.icmp_unsigned("==", char, eof)
with builder.if_else(is_eof) as (then, otherwise):
with then:
builder.store(zero8, location)
with otherwise:
char = builder.trunc(char, tape_type)
builder.store(char, location)
for instruction in ast:
compile_instruction(instruction)
builder.ret(int32(0))
return module
# courtesy of the llvmlite docs
def create_execution_engine():
"""
Create an ExecutionEngine suitable for JIT code generation on
the host CPU. The engine is reusable for an arbitrary number of
modules.
"""
# Create a target machine representing the host
target = llvm.Target.from_default_triple()
target_machine = target.create_target_machine()
# And an execution engine with an empty backing module
backing_mod = llvm.parse_assembly("")
engine = llvm.create_mcjit_compiler(backing_mod, target_machine)
return engine
def main():
argp = argparse.ArgumentParser()
argp.add_argument("filename",
help="The brainfuck code file.")
argp.add_argument("-i", "--ir", action="store_true",
help="Print out the human-readable LLVM IR to stderr")
argp.add_argument('-r', '--run', action="store_true",
help="Run the brainfuck code with McJIT.")
argp.add_argument('-c', '--bitcode', action="store_true",
help="Emit a bitcode file.")
argp.add_argument('-o', '--optimize', action="store_true",
help="Optimize the bitcode.")
argv = argp.parse_args()
llvm.initialize()
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
with open(argv.filename) as bf_file:
ir_module = bf_to_ir(bf_file.read())
basename = os.path.basename(argv.filename)
basename = os.path.splitext(basename)[0]
if argv.ir:
with open(basename + ".ll", "w") as f:
f.write(str(ir_module))
print("Wrote IR to", basename + ".ll")
binding_module = llvm.parse_assembly(str(ir_module))
binding_module.verify()
if argv.optimize:
# TODO: We should define our own pass order.
llvm.ModulePassManager().run(binding_module)
if argv.bitcode:
bitcode = binding_module.as_bitcode()
with open(basename + ".bc", "wb") as output_file:
output_file.write(bitcode)
print("Wrote bitcode to", basename + ".bc")
if argv.run:
with create_execution_engine() as engine:
engine.add_module(binding_module)
engine.finalize_object()
engine.run_static_constructors()
func_ptr = engine.get_function_address("main")
asm_main = ctypes.CFUNCTYPE(ctypes.c_int)(func_ptr)
result = asm_main()
sys.exit(result)
if __name__ == "__main__":
main()
| 31.2
| 79
| 0.610475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,429
| 0.194899
|
9afad36409d9c59fa007a59c5630a3d8610a0ebd
| 4,715
|
py
|
Python
|
dapbench/record_dap.py
|
cedadev/dapbench
|
e722c52f1d38d0ea008e177a1d68adff0a5daecc
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
dapbench/record_dap.py
|
cedadev/dapbench
|
e722c52f1d38d0ea008e177a1d68adff0a5daecc
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
dapbench/record_dap.py
|
cedadev/dapbench
|
e722c52f1d38d0ea008e177a1d68adff0a5daecc
|
[
"BSD-3-Clause-Clear"
] | 1
|
2019-08-05T20:01:23.000Z
|
2019-08-05T20:01:23.000Z
|
#!/usr/bin/env python
# BSD Licence
# Copyright (c) 2011, Science & Technology Facilities Council (STFC)
# All rights reserved.
#
# See the LICENSE file in the source distribution of this software for
# the full license text.
"""
Execute a programme that makes NetCDF-API OPeNDAP calls, capturing
request events and timings.
This script uses 2 methods of capturing OPeNDAP requests:
1. It assumes CURL.VERBOSE=1 in ~/.dodsrc
2. It runns the command through "strace" to capture request timings
The result is a dapbench.dap_stats.DapStats object containing all OPeNDAP
requests made.
WARNING: It is possible to fool record_dap if the wrapped script
writes to stderr lines begining "* Connected to" or "> GET"
"""
import tempfile
import os, sys
from subprocess import Popen, PIPE
import re
import urllib
from dapbench.dap_request import DapRequest
from dapbench.dap_stats import DapStats, SingleTimestampRecorder, echofilter_to_stats
import logging
log = logging.getLogger(__name__)
TMP_PREFIX='record_dap-'
DODSRC = '.dodsrc'
class Wrapper(object):
def __init__(self, tmpdir=None):
if tmpdir is None:
tmpdir = tempfile.mkdtemp(prefix=TMP_PREFIX)
self.tmpdir = tmpdir
def check_dodsrc(self):
try:
rcpath = os.path.join(os.environ['HOME'], DODSRC)
assert os.path.exists(rcpath)
rcdata = open(rcpath).read()
mo = re.search(r'^\s*CURL.VERBOSE\s*=\s*1', rcdata, re.M)
assert mo
log.debug('CURL.VERBOSE=1 confirmed')
except AssertionError:
raise Exception("~/.dodsrc doesn't have CURL.VERBOSE defined")
def call(self, command):
self.check_dodsrc()
os.chdir(self.tmpdir)
cmd = 'strace -ttt -f -e trace=network %s' % command
log.info('Executing traced command: %s' % command)
log.debug('Full command: %s' % cmd)
pipe = Popen(cmd, shell=True, stderr=PIPE).stderr
recorder = SingleTimestampRecorder(self.iter_requests(pipe))
return recorder.stats
def iter_requests(self, pipe):
timestamp = None
host = 'unknown'
for line in pipe:
mo = re.match('\* Connected to ([^\s]+)', line)
if mo:
host = mo.group(1)
log.info('New Connection: %s' % host)
elif re.match('> GET ', line):
#!TODO: handle other stderr output from wrapped tool
req = urllib.unquote(line.strip()[2:])
request = DapRequest.from_get(host, req)
log.info('Request: %s %s' % (timestamp, request))
assert timestamp is not None
yield (timestamp, request)
timestamp = None
else:
mo = re.match('(?:\[pid\s*(\d+)\])?\s*(\d+\.\d+)\s+(send|recv)', line)
if mo:
pid, timestamp, syscall = mo.groups()
timestamp = float(timestamp)
#!TODO: track pids
# Mark terminal event
log.info('End: %s' % timestamp)
yield (timestamp, None)
def make_parser():
import optparse
usage = "%prog [options] [--] command"
parser = optparse.OptionParser(usage=usage)
parser.add_option('-s', '--stats', action="store",
help="Store stats in the pickle file STATS")
parser.add_option('-d', '--dir', action='store',
default='.',
help="Execute in directory DIR")
parser.add_option('-l', '--loglevel', action='store',
default='INFO',
help="Set logging level")
parser.add_option('-p', '--proxy', action="store", metavar='PROXY_OUTPUT',
help="Record via grinder TCPProxy output file PROXY_OUTPUT. Command is ignored")
return parser
def record_curl(opts, args):
if not args:
parser.error("No command specified")
w = Wrapper(opts.dir)
command = ' '.join(args)
stats = w.call(command)
return stats
def record_proxy(opts, args):
echofile = open(opts.proxy)
return echofilter_to_stats(echofile)
def main(argv=sys.argv):
import pickle
parser = make_parser()
opts, args = parser.parse_args()
loglevel = getattr(logging, opts.loglevel)
logging.basicConfig(level=loglevel)
if opts.proxy:
stats = record_proxy(opts, args)
else:
stats = record_curl(opts, args)
stats.print_summary()
if opts.stats:
statfile = open(opts.stats, 'w')
pickle.dump(stats, statfile)
statfile.close()
if __name__ == '__main__':
main()
| 30.419355
| 103
| 0.599152
| 2,111
| 0.44772
| 1,078
| 0.228632
| 0
| 0
| 0
| 0
| 1,486
| 0.315164
|