content
stringlengths 5
1.05M
|
|---|
import requests
import json
from io import BytesIO
from lxml import etree
from django.conf import settings
from bs4 import BeautifulSoup
api_key = settings.WORLDCAT_API
def search_worldcat(query:str):
url = f"http://www.worldcat.org/webservices/catalog/search/worldcat/opensearch?q={query}&wskey={api_key}"
response = requests.get(url)
tree = etree.parse(BytesIO(response.content))
#print(etree.tostring(tree, encoding='utf8', method='xml'))
data = []
for element in tree.findall("{http://www.w3.org/2005/Atom}entry"):
row= {}
for this in element.iter():
if this.tag.endswith("author"):
row['author'] =''.join(this.itertext()).replace('\n ','').replace('\n ','').replace(', author.','')
if this.tag.endswith("title"):
row['title'] =''.join(this.itertext())
if this.tag.endswith("id"):
row['id'] =''.join(this.itertext())
if this.tag.endswith("summary"):
row['summary'] =''.join(this.itertext())
data.append(row)
return data
def worldcat_soup(url:str):
result = {}
soup = BeautifulSoup(requests.get(url).text, features="lxml")
result['thumbnail'] = soup.findAll("img", {"class": "cover"})[0]['src']
result['title'] = soup.findAll("h1", {"class": "title"})[0].text
authors = soup.findAll("td", {"id": "bib-author-cell"})
authors = authors[0].findChildren("a" , recursive=False)
result['authors'] = [author.text for author in authors]
subjects = soup.findAll("li", {"class": "subject-term"})
data = []
for subject in subjects:
subject = subject.findChildren("a" , recursive=False)
subject = subject[0].text
data.append(subject)
result['subjects'] = data
return result
|
#!/usr/bin/env python
# coding=utf-8
"""WTF OTP Fields."""
from .secret_key import OTPSecretKeyField
__all__ = ("OTPSecretKeyField", )
|
'''A module for computing the A_lambda given teh Ia_A_poly.pickle
file'''
import pickle
from numpy import power
f = open('Ia_A_poly.pickle')
d = pickle.load(f)
f.close()
def A_lamb(f, EBV, Rv, redlaw='ccm'):
global d
order = d[redlaw]['order'][f]
coefs = d[redlaw][f]
Al = EBV*0
id = 0
for j in range(order+1):
for i in range(order+1):
if i + j <= order:
Al = Al + coefs[id]*power(Rv,i)*power(EBV,j+1)
id += 1
return Al
|
from kgx import LogicTermTransformer, ObanRdfTransformer, RdfOwlTransformer
from rdflib import Namespace
from rdflib.namespace import RDF
import rdflib
import gzip
# TODO: make this a proper test with assertions
def save(g, outfile):
w = LogicTermTransformer(g)
w.save('target/' + outfile + '.sxpr')
w1 = LogicTermTransformer(g, 'prolog')
w1.save("target/" + outfile + ".pl")
|
import argparse
import mxnet as mx
import logging
# import os, sys
# import numpy as np
from fa_dataset import get_dataIter_from_rec
from fa_config import fa_config as fc
from fa_eighth_mobile_net import get_symbol
from fa_eval_metric import ClassAccMetric
def get_fine_tune_model(sym, arg_params, num_classes, layer_name):
all_layers = sym.get_internals()
net = all_layers[layer_name+'_output']
net = mx.symbol.FullyConnected(data=net, num_hidden=num_classes, name='fc')
net = mx.symbol.sigmoid(data=net, name='sig')
net = mx.symbol.Custom(data=net, name='softmax', op_type='CrossEntropyLoss')
new_args = dict({k:arg_params[k] for k in arg_params if 'fc' not in k})
return (net, new_args)
def multi_factor_scheduler(begin_epoch, epoch_size, lr_steps, factor=0.1):
steps = [int(x) for x in lr_steps.split(',')]
step_ = [epoch_size * (x-begin_epoch) for x in steps if x-begin_epoch > 0]
return mx.lr_scheduler.MultiFactorScheduler(step=step_, factor=factor) if len(step_) else None
def train_model(args, net):
kv = mx.kvstore.create(args.kv_store)
ctx = [mx.gpu(int(i)) for i in args.gpus.split(',')]
if len(ctx) == 0:
ctx = [mx.cpu()]
logger.warning('Use cpu for train')
else:
logger.info('Use %s for train' % ctx )
if net.startswith('legacy'):
prefix = net
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, args.begin_epoch)
(new_sym, new_args) = get_fine_tune_model(
sym, arg_params, args.num_classes, 'flatten0')
else:
new_sym = get_symbol(args)
aux_params = None
new_args = None
args.batch_size = args.batch_size * len(ctx)
epoch_size = max(int(args.num_examples / args.batch_size / kv.num_workers), args.num_epoch)
lr_scheduler = multi_factor_scheduler(args.begin_epoch, epoch_size, lr_steps=args.lr_steps)
optimizer_params = {
'learning_rate': args.lr,
'momentum': args.mom,
'wd': args.wd,
'lr_scheduler': lr_scheduler}
initializer = mx.init.Xavier(
rnd_type='gaussian', factor_type="in", magnitude=2)
model = mx.mod.Module(
context = ctx,
symbol = new_sym
)
train, val = get_dataIter_from_rec(args=args)
# eval_metric = list()
# eval_metric.append(mx.metric.np(acc))
# eval_metric.append(mx.metric.np(loss))
labels = {
'Gender' : 2,
'Mask' : 2,
'Glass' : 3,
'MouthOpen' : 3,
'EyesOpen' : 3
}
# add acc metric for every class
index = 0
pred_offset = 0
metric_list = []
for key in labels:
metric_list.append(ClassAccMetric(name=key, label_index=index, pred_offset=pred_offset, class_num=labels[key]))
index += 1
pred_offset += labels[key]
eval_metric = mx.metric.CompositeEvalMetric(metric_list)
def _epoch_callback(epoch, symbol, arg, aux):
mx.model.save_checkpoint(args.save_result, 1, symbol, arg, aux)
# checkpoint = mx.callback.do_checkpoint(args.save_result)
model.fit(train,
begin_epoch=args.begin_epoch,
num_epoch=args.num_epoch,
eval_data=val,
eval_metric=eval_metric,
validation_metric=eval_metric,
kvstore=kv,
optimizer='sgd',
optimizer_params=optimizer_params,
arg_params=new_args,
aux_params=aux_params,
initializer=initializer,
allow_missing=True,
batch_end_callback=mx.callback.Speedometer(args.batch_size, 20),
epoch_end_callback=_epoch_callback)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='score a model on a dataset')
parser.add_argument('--model', type=str, default= None)
parser.add_argument('--gpus', type=str, default= fc.gpus)
parser.add_argument('--batch-size', type=int, default= fc.batch_size)
parser.add_argument('--begin-epoch', type=int, default=0)
parser.add_argument('--image-shape', type=str, default='3,224,224')
parser.add_argument('--imgrec-train', type=str, default=fc.imgrec_train)
parser.add_argument('--imgrec-val', type=str, default=fc.imgrec_val)
parser.add_argument('--imgidx-train', type=str, default=fc.imgidx_train)
parser.add_argument('--imgidx-val', type=str, default=fc.imgidx_val)
parser.add_argument('--num-classes', type=int, default=fc.num_class)
parser.add_argument('--lr', type=float, default=fc.base_lr)
parser.add_argument('--lr-steps', type=str, default='50, 100, 200', help='steps of lr changing')
parser.add_argument('--num-epoch', type=int, default=fc.num_epoch)
parser.add_argument('--kv-store', type=str, default='device', help='the kvstore type')
parser.add_argument('--save-result', type = str, default=fc.model_path, help='the save path')
parser.add_argument('--num-examples', type=int, default=fc.num_examples)
parser.add_argument('--mom', type=float, default=0.9, help='momentum for sgd')
parser.add_argument('--wd', type=float, default=0.0001, help='weight decay for sgd')
parser.add_argument('--save-name', type=str, help='the save name of model')
parser.add_argument('--rand-mirror', type=int, default=True, help='if do random mirror in training')
parser.add_argument('--cutoff', type=int, default=0, help='cut off aug')
parser.add_argument('--color', type=int, default=0, help='color jittering aug')
parser.add_argument('--ce-loss', default=False, action='store_true', help='if output ce loss')
args = parser.parse_args()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
kv = mx.kvstore.create(args.kv_store)
logging.info(args)
train_model(args=args, net="mobilenet")
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
#About Data
"""
Acknowledgements
Ahmed H, Traore I, Saad S. “Detecting opinion spams and fake news using text classification”, Journal of Security and Privacy, Volume 1, Issue 1, Wiley, January/February 2018.
Ahmed H, Traore I, Saad S. (2017) “Detection of Online Fake News Using N-Gram Analysis and Machine Learning Techniques. In: Traore I., Woungang I., Awad A. (eds) Intelligent, Secure, and Dependable Systems in Distributed and Cloud Environments. ISDDC 2017. Lecture Notes in Computer Science, vol 10618. Springer, Cham (pp. 127-138).
Inspiration
Can you use this data set to make an algorithm able to determine if an article is fake news or not ?
source: Kaggle
"""
#Read the Data
df_True = pd.read_csv('data/True.csv')
df_Fake = pd.read_csv('data/Fake.csv')
#Pippline
"""
Import
Data EDA
Train an LSTM Model
Evaluate trained model performance"""
import tensorflow as tf
import matplotlib.pyplot as plt
import seaborn as sns
from wordcloud import WordCloud, STOPWORDS
import nltk
import re
from nltk.stem import PorterStemmer, WordNetLemmatizer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
import gensim
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import STOPWORDS
#Now keras libraries
from tensorflow.keras.preprocessing.text import one_hot, Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Embedding, Input, LSTM, Conv1D, MaxPool1D, Bidirectional
from tensorflow.keras.models import Model
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score
import seaborn as sns
plt.style.use('ggplot')
print(df_Fake.head(5))
print(df_Fake.columns)
print(df_True.head())
print(df_True.columns)
#check null values
print(df_Fake.isnull().sum())
print(df_True.isnull().sum())
#Check Data type
print(df_Fake.subject.unique())
print(df_True.subject.unique())
#Get info
print(df_Fake.info())
print(df_True.info())
# Data Exploratory Data Analysis (EDA) An Feature Engineering
#an advice before cleaning or deletig something from df make copy of data
Fake_df = df_Fake.copy()
True_df = df_True.copy()
#drop unuseles columns for us
df_Fake.drop(['date', 'subject'], axis=1, inplace=True)
df_True.drop(['date', 'subject'], axis=1, inplace=True)
#add a label/tag to data
df_Fake['class']=0
df_True['class']=1
#plot datas
plt.figure(figsize=(10,5))
plt.bar('Fake News', len(df_Fake), color='red')
plt.bar('True News', len(df_True), color='green')
plt.title('Distribution of Fake and True News', size=15)
plt.xlabel('News Type', size=15)
plt.ylabel('# of News Articles', size=15)
print('Difference in news articles:',len(df_Fake)-len(df_True))
#Time to concat both of data
news_df = pd.concat([df_Fake, df_True], ignore_index=True, sort=False)
print(news_df)
#make more useble data
news_df['text'] = news_df['title'] + news_df['text']
news_df.drop('title', axis=1, inplace=True)
#define target and features
features = news_df['text']
targets = news_df['class']
#Split data as X_train and X_test
X_train, X_test, y_train, y_test = train_test_split(features, targets, test_size=0.20, random_state=18)
# NLP Engineering : cleaning texts
def normalize(data):
normalized = []
for i in data:
i = i.lower()
# get rid of urls
i = re.sub('https?://\S+|www\.\S+', '', i)
# get rid of non words and extra spaces
i = re.sub('\\W', ' ', i)
i = re.sub('\n', '', i)
i = re.sub(' +', ' ', i)
i = re.sub('^ ', '', i)
i = re.sub(' $', '', i)
normalized.append(i)
return normalized
X_train = normalize(X_train)
X_test = normalize(X_test)
#define Tokenizer numbers
max_vocab = 10000
tokenizer = Tokenizer(num_words=max_vocab)
#fit train data to Tokenizer
tokenizer.fit_on_texts(X_train)
# tokenize the text into vectors
X_train = tokenizer.texts_to_sequences(X_train)
X_test = tokenizer.texts_to_sequences(X_test)
#Build RNN (LSTM) modle
model = tf.keras.Sequential([
tf.keras.layers.Embedding(max_vocab, 32),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, return_sequences=True)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(16)),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(1)
])
print(model.summary())
#Preprocessind data for train
X_train = tf.keras.preprocessing.sequence.pad_sequences(X_train, padding='post', maxlen=256)
X_test = tf.keras.preprocessing.sequence.pad_sequences(X_test, padding='post', maxlen=256)
# Train an LSTM Model
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=2, restore_best_weights=True)
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.Adam(1e-4),metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=10,validation_split=0.1, batch_size=30, shuffle=True, callbacks=[early_stop])
history_dict = history.history
#get accuracy
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = history.epoch
#plot loss
plt.figure(figsize=(12,9))
plt.plot(epochs, loss, 'r', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss', size=20)
plt.xlabel('Epochs', size=20)
plt.ylabel('Loss', size=20)
plt.legend(prop={'size': 20})
plt.show()
#plot accuracy
plt.figure(figsize=(12,9))
plt.plot(epochs, acc, 'g', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy', size=20)
plt.xlabel('Epochs', size=20)
plt.ylabel('Accuracy', size=20)
plt.legend(prop={'size': 20})
plt.ylim((0.5,1))
plt.show()
# Evulate trained model performance on Test dataset
model.evaluate(X_test, y_test)
#predict Test dataset
pred = model.predict(X_test)
binary_predictions = []
for i in pred:
if i >= 0.5:
binary_predictions.append(1)
else:
binary_predictions.append(0)
#print evulation results
print('Accuracy on testing set:', accuracy_score(binary_predictions, y_test))
print('Precision on testing set:', precision_score(binary_predictions, y_test))
print('Recall on testing set:', recall_score(binary_predictions, y_test))
#get and plot Confusion Matrix (it is important to understan for make a consult)
matrix = confusion_matrix(binary_predictions, y_test, normalize='all')
plt.figure(figsize=(16, 9))
ax= plt.subplot()
sns.heatmap(matrix, annot=True, ax = ax)
# labels, title and ticks
ax.set_xlabel('Predicted Labels', size=20)
ax.set_ylabel('True Labels', size=20)
ax.set_title('Confusion Matrix', size=20)
ax.xaxis.set_ticklabels([0,1], size=15)
ax.yaxis.set_ticklabels([0,1], size=15)
e = model.layers[0]
weights = e.get_weights()[0]
print(weights.shape) # shape: (vocab_size, embedding_dim)
word_index = list(tokenizer.word_index.keys())
word_index = word_index[:max_vocab-1]
#output metadata and vectors data you can use for nest time to train
import io
out_v = io.open('fakenews_vecs.tsv', 'w', encoding='utf-8')
out_m = io.open('fakenews_meta.tsv', 'w', encoding='utf-8')
for num, word in enumerate(word_index):
vec = weights[num+1] # skip 0, it's padding.
out_m.write(word + "\n")
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_v.close()
out_m.close()
|
from rest_framework import serializers
from api.model.ingredient import Ingredient
from api.model.ateIngredient import AteIngredient
from api.serializer.ingredient import IngredientSerializer
from django.contrib.auth.models import User
class AteIngredientSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
ingredient = serializers.PrimaryKeyRelatedField(queryset=Ingredient.objects.all())
value = serializers.FloatField(min_value=0)
unit = serializers.CharField(default='g')
class Meta:
model = AteIngredient
fields = ('user', 'ingredient', 'value', 'unit')
class AteIngredientDetailSerializer(serializers.ModelSerializer):
ingredient = IngredientSerializer()
class Meta:
model = AteIngredient
fields = ('ingredient', 'value', 'unit', 'created')
|
class Interval(object):
def __init__(self, bits, low=None, high=None):
assert bits > 0
self.bits = bits
self.max = (2 << (bits - 1)) - 1
self.low = low if low is not None else 0
self.high = high if high is not None else self.max
self.low = self.low & self.max
self.high = self.high & self.max
assert self.high >= self.low
def __str__(self):
return "<Interval%d [%s -> %s]>" % (
self.bits, hex(self.low), hex(self.high)
)
def __repr__(self):
return self.__str__()
@property
def is_top(self):
return self.low == 0 and self.high == self.max
def __add__(self, other):
assert other.bits == self.bits
new_low = self.low + other.low
new_high = self.high + other.high
if new_high > self.max:
new_high = self.max
new_low = 0
return Interval(
self.bits,
new_low,
new_high
)
def __sub__(self, other):
assert other.bits == self.bits
new_low = self.low - other.high
new_high = self.high - other.low
if new_low < 0:
new_high = self.max
new_low = 0
return Interval(
self.bits,
new_low,
new_high
)
def __mul__(self, other):
assert other.bits == self.bits
new_low = self.low * other.low
new_high = self.high * other.high
if new_high > self.max:
new_high = self.max
new_low = 0
return Interval(
self.bits,
new_low,
new_high
)
def __truediv__(self, other):
assert other.bits == self.bits
return Interval(self.bits)
def __mod__(self, other):
assert other.bits == self.bits
new_low = 0
new_high = other.high
return Interval(
self.bits,
new_low,
new_high
)
def __xor__(self, other):
assert other.bits == self.bits
new_low = 0
new_high = max(self.high, other.high)
return Interval(
self.bits,
new_low,
new_high
)
def __and__(self, other):
assert other.bits == self.bits
new_low = 0
new_high = self.high & other.high
return Interval(
self.bits,
new_low,
new_high
)
def __or__(self, other):
assert other.bits == self.bits
new_low = min(self.low, other.low)
new_high = max(self.high, other.high, self.high | other.high)
return Interval(
self.bits,
new_low,
new_high
)
def __lshift__(self, other):
# arithmetic/logical left shift
assert other.bits == self.bits
new_low = self.low << other.low
new_high = self.high << other.high
if new_high > self.max:
new_high = self.max
new_low = 0
return Interval(
self.bits,
new_low,
new_high
)
def __rshift__(self, other):
# arithmetic right shift
assert other.bits == self.bits
new_low = self.low >> other.high
# check sign
if self.high >> (self.bits-1) == 1:
new_high = self.high
else:
new_high = self.high >> other.low
return Interval(
self.bits,
new_low,
new_high
)
def __invert__(self):
return Interval(self.bits)
def __neg__(self):
return Interval(self.bits)
def UDiv(self, other):
assert other.bits == self.bits
if other.low == 0 or other.high == 0:
return Interval(self.bits)
new_low = self.low // other.high
new_high = self.high // other.low
return Interval(
self.bits,
new_low,
new_high
)
def SDiv(self, other):
return self.__truediv__(other)
def URem(self, other):
assert other.bits == self.bits
return Interval(
self.bits,
0,
min(self.high, other.high - 1)
)
def SRem(self, other):
assert other.bits == self.bits
return Interval(self.bits)
def LShL(self, other):
return self.__lshift__(other)
def AShL(self, other):
# arithmetic and logical left shift are identical
return self.__lshift__(other)
def LShR(self, other):
assert other.bits == self.bits
new_low = self.low >> other.high
new_high = self.high >> other.low
return Interval(
self.bits,
new_low,
new_high
)
def AShR(self, other):
return self.__rshift__(other)
def RotateLeft(self, other):
assert self.bits == other.bits
return Interval(self.bits)
def RotateRight(self, other):
assert self.bits == other.bits
return Interval(self.bits)
def Concat(self, other):
new_low = (self.low << other.bits) + other.low
new_high = (self.high << other.bits) + other.high
return Interval(
self.bits + other.bits,
new_low,
new_high
)
def Extract(self, high: int, low: int):
mask = (2 << (high - low)) - 1
new_low = (self.low >> low) & mask
new_high = (((self.high - self.low) >> low) & mask) + new_low
if new_high > mask:
new_high = mask
return Interval(
high - low + 1,
new_low,
new_high
)
def SignExt(self, n: int):
assert n >= 0
new_low = self.low
if self.high >> (self.bits-1) == 1:
new_high = (2 << (self.bits + n - 1)) - 1
else:
new_high = self.high
return Interval(
self.bits + n,
new_low,
new_high
)
def ZeroExt(self, n: int):
assert n >= 0
return Interval(
self.bits + n,
self.low,
self.high
)
|
#!/usr/bin/env python3
# ver 0.1 - coding python by Hyuntae Jung on 9/08/2016
# - (Ref.) Appendix D. Statistical Errors
# in the book Understanding Molecular Simulation
# by Daan Frenkel
# - (Ref.) Chapter2 in annual Reports in Computational Chemistry,
# Vol 5, 2009, doi: 10.1016/S1574-1400(09)00502-7
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='autocorrelation functions for each columns (molecules)')
# args
parser.add_argument('-i', '--input', default='acf', nargs='?',
help='input .npy file (excludes .npy)')
parser.add_argument('-o', '--output', default='.acf', nargs='?',
help='surfix of output file')
parser.add_argument('-b', '--begin', default=-1, nargs='?', type=int,
help='beginning time iframe')
parser.add_argument('args', nargs=argparse.REMAINDER)
parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.1')
## read args
args = parser.parse_args()
## Check arguments for log
print(" input arguments: {0}".format(args))
args.output = args.input + args.output
## import modules
import numpy as np
import math
import sys
sys.path.append('/home/htjung/Utility/python/')
import hjung
from hjung import *
## timer
start_proc, start_prof = hjung.time.init()
## load rdf files
data = np.load(args.input+str('.npy'))
n_frames = data.shape[0]
print("read {} frames".format(n_frames))
if args.begin == -1:
data = data[int(n_frames/2):]
else:
data = data[args.begin:]
n_frames = data.shape[0]
n_species = data.shape[1] # number of independent molecules to average
print("refine as total {} frames and {} species".format(n_frames,n_species))
# autocorrelation function
acf_data = hjung.analyze.autocorr_1d_t(np.transpose(data),'constant') # assume no correlation between start- and end-points
acf_out = np.transpose(acf_data)
acf_out = acf_out[int(n_frames/2):] # remove half due to symmetry
#acf_data = hjung.analyze.autocorr_signal_1d_t(np.transpose(data),'full')
#acf_out = np.transpose(acf_data)
#acf_out2 = acf_out2[int(n_frames/2):] # remove half due to symmetry
#print(acf_out2.shape)
#print(acf_out2)
# save data and avg
np.savetxt(args.output, acf_out,
header='acfs for {} species with {} frames'.format(n_species,n_frames), fmt='%f', comments='# ')
np.save(args.output, acf_out)
acf_out_avg = np.mean(acf_out,axis=1)
acf_out_std = np.std(acf_out,axis=1)
avg_data = np.column_stack((acf_out_avg,acf_out_std))
np.savetxt(args.output+str('.avg'), avg_data,
header='averaged acf (mean, std)', fmt='%f', comments='# ')
np.save(args.output+str('.avg'), avg_data)
## timer
hjung.time.end_print(start_proc, start_prof)
|
from .client import Team
|
'''
PROBLEM STATEMENT:
~~~~~~~~~~~~~~~~~~
Design a program to take a word as an input, and then encode it into a Pig Latin.
'''
'''
DESIGN:
~~~~~~
if the first letter is a vowel:
append 'way' to the end of the word and return that
otherwise:
iterate over all chars that are not vowels, keep track on index
slice where the first vowel is found, rearrange, and put back together
append 'ay' to the end of the word and return that
'''
def toPigLatin(_str: str) -> str:
vowels = ['a', 'e', 'i', 'o', 'u']
firstLetter = _str[0]
if (firstLetter in vowels): # words that start with a vowel
return f"{_str}yay"
else: # words that start with a consonant
index = 0
for char in _str:
if (char in vowels):
break
index += 1
return f"{_str[index:]}{_str[:index]}ay"
print(toPigLatin("yello"))
|
#!/usr/bin/env python3
# See: https://github.com/VirusTrack/COVIDvu/blob/master/LICENSE
# vim: set fileencoding=utf-8:
from covidvu.config import MASTER_DATABASE
from covidvu.config import SITE_DATA
from covidvu.cryostation import Cryostation
import json
import os
# --- constants ---
DEFAULT_OUTPUT_JSON_FILE_NAME = 'bundle-continental-regions.json'
# +++ functions +++
def _applyCountFor(bundle, country, casesType = 'confirmed'):
region = country['info'].get('region', None)
if region not in bundle[casesType]:
bundle[casesType][region] = dict()
for date in country[casesType].keys():
if date not in bundle[casesType][region]:
bundle[casesType][region][date] = float(country[casesType][date])
else:
bundle[casesType][region][date] += float(country[casesType][date])
# *** main ***
def main(database = MASTER_DATABASE, siteData = SITE_DATA, bundleOutputFileName = DEFAULT_OUTPUT_JSON_FILE_NAME):
bundle = {
'confirmed': { },
'deaths': { },
}
casesType = ( 'confirmed', 'deaths', )
requiredAttributes = ('info', )+casesType
cryostation = Cryostation(database)
for element in cryostation.items():
country = element[1]
if not all((r in country.keys() for r in requiredAttributes)):
continue
if not country['info'].get('region', None):
continue
for caseType in casesType:
_applyCountFor(bundle, country, caseType)
cryostation.close()
bundleFileName = os.path.join(siteData, bundleOutputFileName)
with open(bundleFileName, 'w') as outputStream:
json.dump(bundle, outputStream)
return bundle, bundleFileName
if '__main__' == __name__:
main()
|
from cement.core.controller import CementBaseController, expose
from cement.core import handler, hook
from ee.core.services import EEService
from ee.core.logging import Log
from ee.core.variables import EEVariables
from ee.core.aptget import EEAptGet
class EEStackStatusController(CementBaseController):
class Meta:
label = 'stack_services'
stacked_on = 'stack'
stacked_type = 'embedded'
description = 'Get status of stack'
arguments = [
(['--memcache'],
dict(help='start/stop/restart memcache', action='store_true')),
(['--dovecot'],
dict(help='start/stop/restart dovecot', action='store_true')),
]
@expose(help="Start stack services")
def start(self):
"""Start services"""
services = []
if not (self.app.pargs.nginx or self.app.pargs.php or self.app.pargs.php7
or self.app.pargs.mysql or self.app.pargs.postfix
or self.app.pargs.hhvm or self.app.pargs.memcache
or self.app.pargs.dovecot or self.app.pargs.redis):
self.app.pargs.nginx = True
self.app.pargs.php = True
self.app.pargs.mysql = True
self.app.pargs.postfix = True
if self.app.pargs.nginx:
if EEAptGet.is_installed(self, 'nginx-custom') or EEAptGet.is_installed(self,'nginx-mainline'):
services = services + ['nginx']
else:
Log.info(self, "Nginx is not installed")
if self.app.pargs.php:
if (EEVariables.ee_platform_distro == 'debian' or EEVariables.ee_platform_codename == 'precise'):
if EEAptGet.is_installed(self, 'php5-fpm'):
services = services + ['php5-fpm']
else:
Log.info(self, "PHP5-FPM is not installed")
else:
if EEAptGet.is_installed(self, 'php5.6-fpm'):
services = services + ['php5.6-fpm']
else:
Log.info(self, "PHP5.6-FPM is not installed")
if EEAptGet.is_installed(self, 'php7.0-fpm'):
services = services + ['php7.0-fpm']
else:
Log.info(self, "PHP7.0-FPM is not installed")
if self.app.pargs.php7:
if (EEVariables.ee_platform_codename == 'trusty' or EEVariables.ee_platform_codename == 'xenial'):
if EEAptGet.is_installed(self, 'php7.0-fpm'):
services = services + ['php7.0-fpm']
else:
Log.info(self, "PHP7.0-FPM is not installed")
else:
Log.info(self, "Your platform does not support PHP 7")
if self.app.pargs.mysql:
if ((EEVariables.ee_mysql_host is "localhost") or
(EEVariables.ee_mysql_host is "127.0.0.1")):
if (EEAptGet.is_installed(self, 'mysql-server') or
EEAptGet.is_installed(self, 'percona-server-server-5.6') or
EEAptGet.is_installed(self, 'mariadb-server')):
services = services + ['mysql']
else:
Log.info(self, "MySQL is not installed")
else:
Log.warn(self, "Remote MySQL found, "
"Unable to check MySQL service status")
if self.app.pargs.postfix:
if EEAptGet.is_installed(self, 'postfix'):
services = services + ['postfix']
else:
Log.info(self, "Postfix is not installed")
if self.app.pargs.hhvm:
if EEAptGet.is_installed(self, 'hhvm'):
services = services + ['hhvm']
else:
Log.info(self, "HHVM is not installed")
if self.app.pargs.memcache:
if EEAptGet.is_installed(self, 'memcached'):
services = services + ['memcached']
else:
Log.info(self, "Memcache is not installed")
if self.app.pargs.dovecot:
if EEAptGet.is_installed(self, 'dovecot-core'):
services = services + ['dovecot']
else:
Log.info(self, "Mail server is not installed")
if self.app.pargs.redis:
if EEAptGet.is_installed(self, 'redis-server'):
services = services + ['redis-server']
else:
Log.info(self, "Redis server is not installed")
for service in services:
Log.debug(self, "Starting service: {0}".format(service))
EEService.start_service(self, service)
@expose(help="Stop stack services")
def stop(self):
"""Stop services"""
services = []
if not (self.app.pargs.nginx or self.app.pargs.php or self.app.pargs.php7
or self.app.pargs.mysql or self.app.pargs.postfix
or self.app.pargs.hhvm or self.app.pargs.memcache
or self.app.pargs.dovecot or self.app.pargs.redis):
self.app.pargs.nginx = True
self.app.pargs.php = True
self.app.pargs.mysql = True
self.app.pargs.postfix = True
if self.app.pargs.nginx:
if EEAptGet.is_installed(self, 'nginx-custom') or EEAptGet.is_installed(self,'nginx-mainline'):
services = services + ['nginx']
else:
Log.info(self, "Nginx is not installed")
if self.app.pargs.php:
if (EEVariables.ee_platform_distro == 'debian' or EEVariables.ee_platform_codename == 'precise'):
if EEAptGet.is_installed(self, 'php5-fpm'):
services = services + ['php5-fpm']
else:
Log.info(self, "PHP5-FPM is not installed")
else:
if EEAptGet.is_installed(self, 'php5.6-fpm'):
services = services + ['php5.6-fpm']
else:
Log.info(self, "PHP5.6-FPM is not installed")
if EEAptGet.is_installed(self, 'php7.0-fpm'):
services = services + ['php7.0-fpm']
else:
Log.info(self, "PHP7.0-FPM is not installed")
if self.app.pargs.php7:
if (EEVariables.ee_platform_codename == 'trusty' or EEVariables.ee_platform_codename == 'xenial'):
if EEAptGet.is_installed(self, 'php7.0-fpm'):
services = services + ['php7.0-fpm']
else:
Log.info(self, "PHP7.0-FPM is not installed")
else:
Log.info(self, "Your platform does not support PHP 7")
if self.app.pargs.mysql:
if ((EEVariables.ee_mysql_host is "localhost") or
(EEVariables.ee_mysql_host is "127.0.0.1")):
if (EEAptGet.is_installed(self, 'mysql-server') or
EEAptGet.is_installed(self, 'percona-server-server-5.6') or
EEAptGet.is_installed(self, 'mariadb-server')):
services = services + ['mysql']
else:
Log.info(self, "MySQL is not installed")
else:
Log.warn(self, "Remote MySQL found, "
"Unable to check MySQL service status")
if self.app.pargs.postfix:
if EEAptGet.is_installed(self, 'postfix'):
services = services + ['postfix']
else:
Log.info(self, "Postfix is not installed")
if self.app.pargs.hhvm:
if EEAptGet.is_installed(self, 'hhvm'):
services = services + ['hhvm']
else:
Log.info(self, "HHVM is not installed")
if self.app.pargs.memcache:
if EEAptGet.is_installed(self, 'memcached'):
services = services + ['memcached']
else:
Log.info(self, "Memcache is not installed")
if self.app.pargs.dovecot:
if EEAptGet.is_installed(self, 'dovecot-core'):
services = services + ['dovecot']
else:
Log.info(self, "Mail server is not installed")
if self.app.pargs.redis:
if EEAptGet.is_installed(self, 'redis-server'):
services = services + ['redis-server']
else:
Log.info(self, "Redis server is not installed")
for service in services:
Log.debug(self, "Stopping service: {0}".format(service))
EEService.stop_service(self, service)
@expose(help="Restart stack services")
def restart(self):
"""Restart services"""
services = []
if not (self.app.pargs.nginx or self.app.pargs.php or self.app.pargs.php7
or self.app.pargs.mysql or self.app.pargs.postfix
or self.app.pargs.hhvm or self.app.pargs.memcache
or self.app.pargs.dovecot or self.app.pargs.redis):
self.app.pargs.nginx = True
self.app.pargs.php = True
self.app.pargs.mysql = True
self.app.pargs.postfix = True
if self.app.pargs.nginx:
if EEAptGet.is_installed(self, 'nginx-custom') or EEAptGet.is_installed(self,'nginx-mainline'):
services = services + ['nginx']
else:
Log.info(self, "Nginx is not installed")
if self.app.pargs.php:
if (EEVariables.ee_platform_distro == 'debian' or EEVariables.ee_platform_codename == 'precise'):
if EEAptGet.is_installed(self, 'php5-fpm'):
services = services + ['php5-fpm']
else:
Log.info(self, "PHP5-FPM is not installed")
else:
if EEAptGet.is_installed(self, 'php5.6-fpm'):
services = services + ['php5.6-fpm']
else:
Log.info(self, "PHP5.6-FPM is not installed")
if EEAptGet.is_installed(self, 'php7.0-fpm'):
services = services + ['php7.0-fpm']
else:
Log.info(self, "PHP7.0-FPM is not installed")
if self.app.pargs.php7:
if (EEVariables.ee_platform_codename == 'trusty' or EEVariables.ee_platform_codename == 'xenial'):
if EEAptGet.is_installed(self, 'php7.0-fpm'):
services = services + ['php7.0-fpm']
else:
Log.info(self, "PHP7.0-FPM is not installed")
else:
Log.info(self, "Your platform does not support PHP 7")
if self.app.pargs.mysql:
if ((EEVariables.ee_mysql_host is "localhost") or
(EEVariables.ee_mysql_host is "127.0.0.1")):
if (EEAptGet.is_installed(self, 'mysql-server') or
EEAptGet.is_installed(self, 'percona-server-server-5.6') or
EEAptGet.is_installed(self, 'mariadb-server')):
services = services + ['mysql']
else:
Log.info(self, "MySQL is not installed")
else:
Log.warn(self, "Remote MySQL found, "
"Unable to check MySQL service status")
if self.app.pargs.postfix:
if EEAptGet.is_installed(self, 'postfix'):
services = services + ['postfix']
else:
Log.info(self, "Postfix is not installed")
if self.app.pargs.hhvm:
if EEAptGet.is_installed(self, 'hhvm'):
services = services + ['hhvm']
else:
Log.info(self, "HHVM is not installed")
if self.app.pargs.memcache:
if EEAptGet.is_installed(self, 'memcached'):
services = services + ['memcached']
else:
Log.info(self, "Memcache is not installed")
if self.app.pargs.dovecot:
if EEAptGet.is_installed(self, 'dovecot-core'):
services = services + ['dovecot']
else:
Log.info(self, "Mail server is not installed")
if self.app.pargs.redis:
if EEAptGet.is_installed(self, 'redis-server'):
services = services + ['redis-server']
else:
Log.info(self, "Redis server is not installed")
for service in services:
Log.debug(self, "Restarting service: {0}".format(service))
EEService.restart_service(self, service)
@expose(help="Get stack status")
def status(self):
"""Status of services"""
services = []
if not (self.app.pargs.nginx or self.app.pargs.php or self.app.pargs.php7
or self.app.pargs.mysql or self.app.pargs.postfix
or self.app.pargs.hhvm or self.app.pargs.memcache
or self.app.pargs.dovecot or self.app.pargs.redis):
self.app.pargs.nginx = True
self.app.pargs.php = True
self.app.pargs.mysql = True
self.app.pargs.postfix = True
self.app.pargs.hhvm = True
if self.app.pargs.nginx:
if EEAptGet.is_installed(self, 'nginx-custom') or EEAptGet.is_installed(self,'nginx-mainline'):
services = services + ['nginx']
else:
Log.info(self, "Nginx is not installed")
if self.app.pargs.php:
if (EEVariables.ee_platform_distro == 'debian' or EEVariables.ee_platform_codename == 'precise'):
if EEAptGet.is_installed(self, 'php5-fpm'):
services = services + ['php5-fpm']
else:
Log.info(self, "PHP5-FPM is not installed")
else:
if EEAptGet.is_installed(self, 'php5.6-fpm'):
services = services + ['php5.6-fpm']
else:
Log.info(self, "PHP5.6-FPM is not installed")
if EEAptGet.is_installed(self, 'php7.0-fpm'):
services = services + ['php7.0-fpm']
else:
Log.info(self, "PHP7.0-FPM is not installed")
if self.app.pargs.php7:
if (EEVariables.ee_platform_codename == 'trusty' or EEVariables.ee_platform_codename == 'xenial'):
if EEAptGet.is_installed(self, 'php7.0-fpm'):
services = services + ['php7.0-fpm']
else:
Log.info(self, "PHP7.0-FPM is not installed")
else:
Log.info(self, "Your platform does not support PHP 7")
if self.app.pargs.mysql:
if ((EEVariables.ee_mysql_host is "localhost") or
(EEVariables.ee_mysql_host is "127.0.0.1")):
if (EEAptGet.is_installed(self, 'mysql-server') or
EEAptGet.is_installed(self, 'percona-server-server-5.6') or
EEAptGet.is_installed(self, 'mariadb-server')):
services = services + ['mysql']
else:
Log.info(self, "MySQL is not installed")
else:
Log.warn(self, "Remote MySQL found, "
"Unable to check MySQL service status")
if self.app.pargs.postfix:
if EEAptGet.is_installed(self, 'postfix'):
services = services + ['postfix']
else:
Log.info(self, "Postfix is not installed")
if self.app.pargs.hhvm:
if EEAptGet.is_installed(self, 'hhvm'):
services = services + ['hhvm']
else:
Log.info(self, "HHVM is not installed")
if self.app.pargs.memcache:
if EEAptGet.is_installed(self, 'memcached'):
services = services + ['memcached']
else:
Log.info(self, "Memcache is not installed")
if self.app.pargs.dovecot:
if EEAptGet.is_installed(self, 'dovecot-core'):
services = services + ['dovecot']
else:
Log.info(self, "Mail server is not installed")
if self.app.pargs.redis:
if EEAptGet.is_installed(self, 'redis-server'):
services = services + ['redis-server']
else:
Log.info(self, "Redis server is not installed")
for service in services:
if EEService.get_service_status(self, service):
Log.info(self, "{0:10}: {1}".format(service, "Running"))
@expose(help="Reload stack services")
def reload(self):
"""Reload service"""
services = []
if not (self.app.pargs.nginx or self.app.pargs.php or self.app.pargs.php7
or self.app.pargs.mysql or self.app.pargs.postfix
or self.app.pargs.hhvm or self.app.pargs.memcache
or self.app.pargs.dovecot or self.app.pargs.redis):
self.app.pargs.nginx = True
self.app.pargs.php = True
self.app.pargs.mysql = True
self.app.pargs.postfix = True
if self.app.pargs.nginx:
if EEAptGet.is_installed(self, 'nginx-custom') or EEAptGet.is_installed(self,'nginx-mainline'):
services = services + ['nginx']
else:
Log.info(self, "Nginx is not installed")
if self.app.pargs.php:
if (EEVariables.ee_platform_distro == 'debian' or EEVariables.ee_platform_codename == 'precise'):
if EEAptGet.is_installed(self, 'php5-fpm'):
services = services + ['php5-fpm']
else:
Log.info(self, "PHP5-FPM is not installed")
else:
if EEAptGet.is_installed(self, 'php5.6-fpm'):
services = services + ['php5.6-fpm']
else:
Log.info(self, "PHP5.6-FPM is not installed")
if EEAptGet.is_installed(self, 'php7.0-fpm'):
services = services + ['php7.0-fpm']
else:
Log.info(self, "PHP7.0-FPM is not installed")
if self.app.pargs.php7:
if (EEVariables.ee_platform_codename == 'trusty' or EEVariables.ee_platform_codename == 'xenial'):
if EEAptGet.is_installed(self, 'php7.0-fpm'):
services = services + ['php7.0-fpm']
else:
Log.info(self, "PHP7.0-FPM is not installed")
else:
Log.info(self, "Your platform does not support PHP 7")
if self.app.pargs.mysql:
if ((EEVariables.ee_mysql_host is "localhost") or
(EEVariables.ee_mysql_host is "127.0.0.1")):
if (EEAptGet.is_installed(self, 'mysql-server') or
EEAptGet.is_installed(self, 'percona-server-server-5.6') or
EEAptGet.is_installed(self, 'mariadb-server')):
services = services + ['mysql']
else:
Log.info(self, "MySQL is not installed")
else:
Log.warn(self, "Remote MySQL found, "
"Unable to check MySQL service status")
if self.app.pargs.postfix:
if EEAptGet.is_installed(self, 'postfix'):
services = services + ['postfix']
else:
Log.info(self, "Postfix is not installed")
if self.app.pargs.hhvm:
Log.info(self, "HHVM does not support to reload")
if self.app.pargs.memcache:
if EEAptGet.is_installed(self, 'memcached'):
services = services + ['memcached']
else:
Log.info(self, "Memcache is not installed")
if self.app.pargs.dovecot:
if EEAptGet.is_installed(self, 'dovecot-core'):
services = services + ['dovecot']
else:
Log.info(self, "Mail server is not installed")
if self.app.pargs.redis:
if EEAptGet.is_installed(self, 'redis-server'):
services = services + ['redis-server']
else:
Log.info(self, "Redis server is not installed")
for service in services:
Log.debug(self, "Reloading service: {0}".format(service))
EEService.reload_service(self, service)
|
from .Scene import Scene
|
from .encoders import create_encoder_factory, EncoderFactory
from .preprocessing.scalers import create_scaler, Scaler
from .augmentation import create_augmentation, AugmentationPipeline
from .augmentation.base import Augmentation
from .gpu import Device
def check_encoder(value):
""" Checks value and returns EncoderFactory object.
Returns:
d3rlpy.encoders.EncoderFactory: encoder factory object.
"""
if isinstance(value, EncoderFactory):
return value
if isinstance(value, str):
return create_encoder_factory(value)
raise ValueError('This argument must be str or EncoderFactory object.')
def check_scaler(value):
""" Checks value and returns Scaler object.
Returns:
d3rlpy.preprocessing.scalers.Scaler: scaler object.
"""
if isinstance(value, Scaler):
return value
if isinstance(value, str):
return create_scaler(value)
if value is None:
return None
raise ValueError('This argument must be str or Scaler object.')
def check_augmentation(value):
""" Checks value and returns AugmentationPipeline object.
Returns:
d3rlpy.augmentation.AugmentationPipeline: augmentation pipeline object.
"""
if isinstance(value, AugmentationPipeline):
return value
if isinstance(value, list):
augmentations = []
for v in value:
if isinstance(v, str):
v = create_augmentation(v)
elif not isinstance(v, Augmentation):
raise ValueError('str or Augmentation is expected.')
augmentations.append(v)
return AugmentationPipeline(augmentations)
if value is None:
return AugmentationPipeline([])
raise ValueError('This argument must be list or AugmentationPipeline.')
def check_use_gpu(value):
""" Checks value and returns Device object.
Returns:
d3rlpy.gpu.Device: device object.
"""
# isinstance cannot tell difference between bool and int
if type(value) == bool:
if value:
return Device(0)
return None
if type(value) == int:
return Device(value)
if isinstance(value, Device):
return value
if value is None:
return None
raise ValueError('This argument must be bool, int or Device.')
|
###
# Python xz compress/uncompress test.
#
# License - MIT.
###
import os
import lzma
# xz_uncompress - Uncompress xz file.
def xz_uncompress(islines):
# {
with lzma.open(xzip_name, 'rb') as fd_xz:
with open('xz_' + file_name, 'wb') as fd:
if (True == islines):
fd.writelines(fd_xz)
else:
fd.write(fd_xz.read())
# }
# xz_compress - Compress xz file.
def xz_compress(islines):
# {
with lzma.open(xzip_name, 'wb') as fd_xz:
with open(file_name, 'rb') as fd:
if (True == islines):
fd_xz.writelines(fd)
else:
fd_xz.write(fd.read())
# }
# Main function.
def main():
# {
# Example 1: Compress xz file.
xz_compress(False)
# Example 2: Uncompress xz file.
xz_uncompress(True)
return 0
# }
# Program entry.
if '__main__' == __name__:
xzip_name = 'README.md.xz'
file_name = 'README.md'
extract_path = 'et_test'
main()
|
#!/usr/bin/env python
import os
from dotenv import load_dotenv
from flask import Flask, request, abort
from func import menu, user
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage,
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.models.events import PostbackEvent
from func import menu, amount
import math
load_dotenv()
app = Flask(__name__)
line_bot_api = LineBotApi(os.getenv("CHANNEL_TOKEN"))
handler = WebhookHandler(os.getenv("CHANNEL_SECRET"))
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
print("Invalid signature. Please check your channel access token/channel secret.")
abort(400)
return 'OK'
@handler.add(MessageEvent)
def handle_text_message(event, TextMessage):
userId = event.source.user_id
profile = line_bot_api.get_profile(userId)
if user.checkUserExist(profile) == "NewUser":
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text="歡迎使用本程式"))
elif(event.message.text == "開啟選單"):
menu.welcomeMenu(event)
# Add food amount step 2
elif(user.checkUserStatus(userId) == "AddFoodAmount"):
user.updateTempData(userId, event.message.text)
user.changeUserStatus(userId, "AddFoodAmountMoney")
message = "請輸入" + event.message.text + "的金額"
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text=message)
)
# Add food amount step 3 (comfirm food amount correct)
elif(user.checkUserStatus(userId) == "AddFoodAmountMoney"):
menu.confirm(event)
# Add amount
elif(user.checkUserStatus(userId) == "AddAmount"):
user.updateTempData(userId, event.message.text)
user.changeUserStatus(userId, "AddAmountMoney")
message = "請輸入" + event.message.text + "的金額"
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text=message)
)
elif(user.checkUserStatus(userId) == "AddAmountMoney"):
menu.confirm(event)
@ handler.add(PostbackEvent)
def postback_message(event, PostbackMessage):
userId = event.source.user_id
# Force Quit (if anything wrong)
if(event.postback.data == "forceQuit"):
user.clearDataToDefault(userId)
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text = "您已退出操作,請重新開始")
)
# Open Amount Menu
if(event.postback.data == "Amount"):
menu.amountMenu(event)
# Add Food Amount Step 1
elif(event.postback.data == "addFoodAmount"):
user.changeUserStatus(userId, "AddFoodAmount")
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text = "請輸入食物")
)
# Add Food Amount to database
elif(user.checkUserStatus(userId) == "AddFoodAmountMoney"):
try:
data = event.postback.data
data = data.split()
food = ""
for i in range(0, len(data)-1):
food += data[i]
foodAmount = float(data[-1])
amount.insertFoodData(food, foodAmount)
user.deleteTempData(userId)
user.changeUserStatus(userId, "free")
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text = "新增成功")
)
except TypeError:
user.clearDataToDefault(userId)
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text = "輸入格式有誤,請重新操作")
)
# Add Amount Step 1
elif(event.postback.data == "addAmount"):
user.changeUserStatus(userId, "AddAmount")
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text = "請輸入項目")
)
# Add Amount to database
if(user.checkUserStatus(userId) == "AddAmountMoney"):
data = event.postback.data
data = data.split()
subject = ""
for i in range(0, len(data)-1):
subject += data[i]
subjectAmount = float(data[-1])
amount.insertData(subject, subjectAmount)
user.deleteTempData(userId)
user.changeUserStatus(userId, "free")
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text = "新增成功")
)
# Get total amount expect already checkout
if(event.postback.data == "totalAmount"):
total = amount.getTotalAmount()
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text = total)
)
if(event.postback.data == "giveAmount"):
user.changeUserStatus(userId, "giveAmount")
menu.giveAmountConfirm(event)
if(user.checkUserStatus(userId) == "giveAmount"):
total = event.postback.data
total = math.floor(float(total))
amount.giveAmount(float(total))
user.changeUserStatus(userId, "free")
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text = "已完成結帳,金額 " + str(total) + " 元")
)
if(event.postback.data == "getHistoryAmount"):
history = amount.getHistory()
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text = history)
)
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=int(os.environ.get("PORT", 8080)))
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# __author__ = Lyon
from base.db.mysql import MySQLModel
from peewee import IntegerField, CharField
class FundOptionalRecord(MySQLModel):
"""基金自选记录"""
class Meta:
table_name = 'fund_optional_record'
user_id = IntegerField(verbose_name='用户id', index=True)
code = CharField(max_length=6, verbose_name='基金代码', index=True)
fund_name = CharField(max_length=128, null=True, verbose_name='基金名称')
@classmethod
async def async_create(cls, user_id, code, fund_name, **kwargs):
_obj = await super(FundOptionalRecord, cls).async_create(user_id=user_id, code=code, fund_name=fund_name)
return _obj
async def normal_info(self):
data = await super(FundOptionalRecord, self).normal_info()
data.update({
'code': self.code,
'fund_name': self.fund_name
})
return data
|
res = "1" + "8" * 80
while "18" in res or "288" in res or "3888" in res:
if "18" in res:
res = res.replace("18", "2", 1)
elif "288" in res:
res = res.replace("288", "3", 1)
else:
res = res.replace("3888", "1", 1)
print(res)
|
import cv2
import os
import numpy as np
def convert_format(dir_folder, destination_foler):
imgs_folder_dir = dir_folder
list_images = sorted(os.listdir(imgs_folder_dir))
for image in list_images:
print(dir_folder, image)
img = cv2.imread(''.join([dir_folder, image]))
img = cv2.resize(img, (256, 256), cv2.INTER_AREA)
cv2.imwrite(''.join([destination_foler, image[:-4], '.png']), img)
def main():
base_dir = '/home/nearlab/Jorge/data/polyp_sundataset/case100/image/rgb/'
destination_foler = '/home/nearlab/Jorge/data/polyp_sundataset/case100/image/grayscale/'
convert_format(base_dir, destination_foler)
if __name__ == '__main__':
main()
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Network(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
"""
super(Network, self).__init__()
self.seed = torch.manual_seed(seed)
"*** YOUR CODE HERE ***"
feature_size = 64
self.feature_layer = nn.Sequential(
nn.Linear(state_size, feature_size),
nn.ReLU())
value_size = 64
self.value_layer = nn.Sequential(
nn.Linear(feature_size, value_size),
nn.ReLU(),
nn.Linear(value_size, 1))
advantage_size = 64
self.advantage_layer = nn.Sequential(
nn.Linear(feature_size, advantage_size),
nn.ReLU(),
nn.Linear(advantage_size, action_size))
def forward(self, state):
"""Build a network that maps state -> action values."""
x = state
feature = self.feature_layer(x)
action_value = self.value_layer(feature)
advantage = self.advantage_layer(feature)
q_value = action_value + (advantage - advantage.mean(dim=1, keepdim=True))
return q_value
|
from django.shortcuts import render,HttpResponse,redirect
from .forms import RegisterForm,LoginForm,UserProfileUpdateForm,UserPasswordChangeForm
from django.contrib.auth import authenticate, login ,logout,update_session_auth_hash
from django.contrib import messages
def user_register(request):
form = RegisterForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
user = form.save(commit = False)
password = form.cleaned_data.get('password')
user.set_password(password)
user.save()
login(request,user)
messages.success(request,'Ugurla qeydiyyatdan keçdiniz')
return redirect('post:main-page')
return render(request,'user/register.html',{'form':form})
def user_login(request):
form = LoginForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username= username,password= password)
if user is None:
print('Username ve ya password yalnisdir')
return render(request,'user/login.html',{'form':form})
messages.success(request,'Ugurla daxil oldunuz ')
return redirect('post:main-page')
return render(request,'user/login.html',{'form':form})
def user_settings(request):
about = request.user.userprofile.about
avatar = request.user.userprofile.avatar
initial = {'about': about , 'avatar': avatar}
form = UserProfileUpdateForm(initial= initial,instance=request.user ,data=request.POST or None,files=request.FILES or None)
if request.method == 'POST':
if form.is_valid():
user = form.save(commit=True)
about = form.cleaned_data.get('about',None)
avatar = form.cleaned_data.get('avatar',None)
user.userprofile.about = about
user.userprofile.avatar = avatar
user.userprofile.save()
print('Burada mesaj gonderilecek')
return HttpResponse('Burada yonlendirilecek link bilinecek')
else:
print('Melumatlarin dogru olduguna emin olun')
return render(request,'user/settings.html',{'form':form})
def user_password_change(request):
form = UserPasswordChangeForm(user= request.user,data= request.POST or None)
if form.is_valid():
new_password = form.cleaned_data.get('new_password')
request.user.set_password(new_password)
request.user.save()
update_session_auth_hash(request,request.user)
print('Burada sizin mesajiniz')
return HttpResponse('burada kecid linki')
return render(request,'user/password_change.html',{'form':form})
def user_logout(request):
logout(request)
print('burada sizin mesajiniz')
return redirect('post:main-page')
|
"""empty message
Revision ID: 742c6fcc49cc
Revises:
Create Date: 2020-07-22 04:41:25.722811
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '742c6fcc49cc'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('role',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=20), nullable=True),
sa.Column('default', sa.Boolean(), nullable=True),
sa.Column('permissions', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_role_default'), 'role', ['default'], unique=False)
op.create_table('doctor',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('doc_id', sa.String(length=200), nullable=True),
sa.Column('doc_pass', sa.String(length=10), nullable=True),
sa.Column('first_name', sa.String(length=50), nullable=True),
sa.Column('last_name', sa.String(length=50), nullable=True),
sa.Column('qualification', sa.String(length=200), nullable=True),
sa.Column('docs', sa.String(length=500), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['role.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('guides',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('done', sa.Boolean(), nullable=True),
sa.Column('info', sa.PickleType(), nullable=True),
sa.Column('time_lapse', sa.String(length=50), nullable=True),
sa.Column('doc_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['doc_id'], ['doctor.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_guides_done'), 'guides', ['done'], unique=False)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('first_name', sa.String(length=100), nullable=True),
sa.Column('last_name', sa.String(length=100), nullable=True),
sa.Column('email', sa.String(length=100), nullable=True),
sa.Column('profile_pic', sa.String(length=200), nullable=True),
sa.Column('tel', sa.String(length=50), nullable=True),
sa.Column('country', sa.String(length=50), nullable=True),
sa.Column('countryVisited', sa.String(length=50), nullable=True),
sa.Column('state', sa.String(length=50), nullable=True),
sa.Column('address', sa.String(length=200), nullable=True),
sa.Column('travel_history', sa.Boolean(), nullable=True),
sa.Column('age', sa.Integer(), nullable=True),
sa.Column('user_id', sa.String(length=100), nullable=True),
sa.Column('sign_up_date', sa.DateTime(), nullable=True),
sa.Column('sign_up_method', sa.String(length=100), nullable=True),
sa.Column('med_state', sa.String(length=50), nullable=True),
sa.Column('days_left', sa.Integer(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('guide_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['guide_id'], ['guides.id'], ),
sa.ForeignKeyConstraint(['role_id'], ['role.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('user_id')
)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('content', sa.Text(), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('doctor_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['doctor_id'], ['doctor.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('patients',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('guide_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['guide_id'], ['guides.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], )
)
op.create_table('symptoms',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('cough', sa.Boolean(), nullable=True),
sa.Column('resp', sa.Boolean(), nullable=True),
sa.Column('fever', sa.Boolean(), nullable=True),
sa.Column('fatigue', sa.Boolean(), nullable=True),
sa.Column('other', sa.String(length=100), nullable=True),
sa.Column('date_added', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('specifics',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('cough_degree', sa.String(length=50), nullable=True),
sa.Column('fever_degree', sa.String(length=50), nullable=True),
sa.Column('fatigue_degree', sa.String(length=50), nullable=True),
sa.Column('other_degree', sa.String(length=50), nullable=True),
sa.Column('symptom_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['symptom_id'], ['symptoms.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('specifics')
op.drop_table('symptoms')
op.drop_table('patients')
op.drop_table('comments')
op.drop_table('user')
op.drop_index(op.f('ix_guides_done'), table_name='guides')
op.drop_table('guides')
op.drop_table('doctor')
op.drop_index(op.f('ix_role_default'), table_name='role')
op.drop_table('role')
# ### end Alembic commands ###
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.image import NonUniformImage
import matplotlib
from matplotlib import cm
from brainbox.core import Bunch
axis_dict = {'x': 0, 'y': 1, 'z': 2}
class DefaultPlot(object):
def __init__(self, plot_type, data):
"""
Base class for organising data into a structure that can be easily used to create plots.
The idea is that the dictionary is independent of plotting method and so can be fed into
matplotlib, pyqtgraph, datoviz (or any plotting method of choice).
:param plot_type: type of plot (just for reference)
:type plot_type: string
:param data: dict of data containing at least 'x', 'y', and may additionally contain 'z'
for 3D plots and 'c' 2D (image or scatter plots) with third variable represented by colour
:type data: dict
"""
self.plot_type = plot_type
self.data = data
self.hlines = []
self.vlines = []
self.set_labels()
def add_lines(self, pos, orientation, lim=None, style='--', width=3, color='k'):
"""
Method to specify position and style of horizontal or vertical reference lines
:param pos: position of line
:param orientation: either 'v' for vertical line or 'h' for horizontal line
:param lim: extent of lines
:param style: line style
:param width: line width
:param color: line colour
:return:
"""
if orientation == 'v':
lim = self._set_default(lim, self.ylim)
self.vlines.append(Bunch({'pos': pos, 'lim': lim, 'style': style, 'width': width,
'color': color}))
if orientation == 'h':
lim = self._set_default(lim, self.xlim)
self.hlines.append(Bunch({'pos': pos, 'lim': lim, 'style': style, 'width': width,
'color': color}))
def set_labels(self, title=None, xlabel=None, ylabel=None, zlabel=None, clabel=None):
"""
Set labels for plot
:param title: title
:param xlabel: x axis label
:param ylabel: y axis label
:param zlabel: z axis label
:param clabel: cbar label
:return:
"""
self.labels = Bunch({'title': title, 'xlabel': xlabel, 'ylabel': ylabel, 'zlabel': zlabel,
'clabel': clabel})
def set_xlim(self, xlim=None):
"""
Set xlim values
:param xlim: xlim values (min, max) supports tuple, list or np.array of len(2). If not
specified will compute as min, max of y data
"""
self.xlim = self._set_lim('x', lim=xlim)
def set_ylim(self, ylim=None):
"""
Set ylim values
:param ylim: ylim values (min, max) supports tuple, list or np.array of len(2). If not
specified will compute as min, max of y data
"""
self.ylim = self._set_lim('y', lim=ylim)
def set_zlim(self, zlim=None):
"""
Set zlim values
:param zlim: zlim values (min, max) supports tuple, list or np.array of len(2). If not
specified will compute as min, max of z data
"""
self.zlim = self._set_lim('z', lim=zlim)
def set_clim(self, clim=None):
"""
Set clim values
:param clim: clim values (min, max) supports tuple, list or np.array of len(2). If not
specified will compute as min, max of c data
"""
self.clim = self._set_lim('c', lim=clim)
def _set_lim(self, axis, lim=None):
"""
General function to set limits to either specified value if lim is not None or to nanmin,
nanmin of data
:param axis: x, y, z or c
:param lim: lim values (min, max) supports tuple, list or np.array of len(2)
:return:
"""
if lim is not None:
assert(len(lim) == 2)
else:
lim = (np.nanmin(self.data[axis]), np.nanmax(self.data[axis]))
return lim
def _set_default(self, val, default):
"""
General function to set value of attribute. If val is not None, the value of val will be
returned otherwise default value will be returned
:param val: non-default value to set attribute to
:param default: default value of attribute
:return:
"""
if val is None:
return default
else:
return val
def convert2dict(self):
"""
Convert class object to dictionary
:return: dict with variables needed for plotting
"""
return vars(self)
class ImagePlot(DefaultPlot):
def __init__(self, img, x=None, y=None, cmap=None):
"""
Class for organising data that will be used to create 2D image plots
:param img: 2D image data
:param x: x coordinate of each image voxel in x dimension
:param y: y coordinate of each image voxel in y dimension
:param cmap: name of colormap to use
"""
data = Bunch({'x': self._set_default(x, np.arange(img.shape[0])),
'y': self._set_default(y, np.arange(img.shape[1])), 'c': img})
# Make sure dimensions agree
assert data['c'].shape[0] == data['x'].shape[0], 'dimensions must agree'
assert data['c'].shape[1] == data['y'].shape[0], 'dimensions must agree'
# Initialise default plot class with data
super().__init__('image', data)
self.scale = None
self.offset = None
self.cmap = self._set_default(cmap, 'viridis')
self.set_xlim()
self.set_ylim()
self.set_clim()
def set_scale(self, scale=None):
"""
Set the scaling factor to apply to image (mainly for pyqtgraph implementation)
:param scale: scale values (xscale, yscale), supports tuple, list or np.array of len(2).
If not specified will automatically compute from xlims/ylims and shape of data
:return:
"""
# For pyqtgraph implementation
if scale is not None:
assert(len(scale) == 2)
self.scale = self._set_default(scale, (self._get_scale('x'), self._get_scale('y')))
def _get_scale(self, axis):
"""
Calculate scaling factor to apply along axis. Don't use directly, use set_scale() method
:param axis: 'x' or 'y'
:return:
"""
lim = self._set_lim(axis)
scale = (lim[1] - lim[0]) / self.data['c'].shape[axis_dict[axis]]
return scale
def set_offset(self, offset=None):
"""
Set the offset to apply to the image (mainly for pyqtgraph implementation)
:param offset: offset values (xoffset, yoffset), supports tuple, list or np.array of len(2)
If not specified will automatically compute from minimum of xlim and ylim
:return:
"""
# For pyqtgraph implementation
if offset is not None:
assert(len(offset) == 2)
self.offset = self._set_default(offset, (self._get_offset('x'), self._get_offset('y')))
def _get_offset(self, axis):
"""
Calculate offset to apply to axis. Don't use directly, use set_offset() method
:param axis: 'x' or 'y'
:return:
"""
offset = np.nanmin(self.data[axis])
return offset
class ProbePlot(DefaultPlot):
def __init__(self, img, x, y, cmap=None):
"""
Class for organising data that will be used to create 2D probe plots. Use function
plot_base.arrange_channels2bank to prepare data in correct format before using this class
:param img: list of image data for each bank of probe
:param x: list of x coordinate for each bank of probe
:param y: list of y coordinate for each bank or probe
:param cmap: name of cmap
"""
# Make sure we have inputs as lists, can get input from arrange_channels2banks
assert(type(img) == list)
assert(type(x) == list)
assert(type(y) == list)
data = Bunch({'x': x, 'y': y, 'c': img})
super().__init__('probe', data)
self.cmap = self._set_default(cmap, 'viridis')
self.set_xlim()
self.set_ylim()
self.set_clim()
self.set_scale()
self.set_offset()
def set_scale(self, idx=None, scale=None):
if scale is not None:
self.scale[idx] = scale
else:
self.scale = [(self._get_scale(i, 'x'), self._get_scale(i, 'y'))
for i in range(len(self.data['x']))]
def _get_scale(self, idx, axis):
lim = self._set_lim_list(axis, idx)
scale = (lim[1] - lim[0]) / self.data['c'][idx].shape[axis_dict[axis]]
return scale
def _set_lim_list(self, axis, idx, lim=None):
if lim is not None:
assert(len(lim) == 2)
else:
lim = (np.nanmin(self.data[axis][idx]), np.nanmax(self.data[axis][idx]))
return lim
def set_offset(self, idx=None, offset=None):
if offset is not None:
self.offset[idx] = offset
else:
self.offset = [(np.min(self.data['x'][i]), np.min(self.data['y'][i]))
for i in range(len(self.data['x']))]
def _set_lim(self, axis, lim=None):
if lim is not None:
assert (len(lim) == 2)
else:
data = np.concatenate([np.squeeze(np.ravel(d)) for d in self.data[axis]]).ravel()
lim = (np.nanmin(data), np.nanmax(data))
return lim
class ScatterPlot(DefaultPlot):
def __init__(self, x, y, z=None, c=None, cmap=None, plot_type='scatter'):
"""
Class for organising data that will be used to create scatter plots. Can be 2D or 3D (if
z given). Can also represent variable through color by specifying c
:param x: x values for data
:param y: y values for data
:param z: z values for data
:param c: values to use to represent color of scatter points
:param cmap: name of colormap to use if c is given
:param plot_type:
"""
data = Bunch({'x': x, 'y': y, 'z': z, 'c': c})
assert len(data['x']) == len(data['y']), 'dimensions must agree'
if data['z'] is not None:
assert len(data['z']) == len(data['x']), 'dimensions must agree'
if data['c'] is not None:
assert len(data['c']) == len(data['x']), 'dimensions must agree'
super().__init__(plot_type, data)
self._set_init_style()
self.set_xlim()
self.set_ylim()
# If we have 3D data
if data['z'] is not None:
self.set_zlim()
# If we want colorbar associated with scatter plot
self.set_clim()
self.cmap = self._set_default(cmap, 'viridis')
def _set_init_style(self):
"""
Initialise defaults
:return:
"""
self.set_color()
self.set_marker_size()
self.set_marker_type('o')
self.set_opacity()
self.set_line_color()
self.set_line_width()
self.set_line_style()
def set_color(self, color=None):
"""
Color of scatter points.
:param color: string e.g 'k', single RGB e,g [0,0,0] or np.array of RGB. In the latter case
must give same no. of colours as datapoints i.e. len(np.array(RGB)) == len(data['x'])
:return:
"""
self.color = self._set_default(color, 'b')
def set_marker_size(self, marker_size=None):
"""
Size of each scatter point
:param marker_size: int or np.array of int. In the latter case must give same no. of
marker_size as datapoints i.e len(np.array(marker_size)) == len(data['x'])
:return:
"""
self.marker_size = self._set_default(marker_size, None)
def set_marker_type(self, marker_type=None):
"""
Shape of each scatter point
:param marker_type:
:return:
"""
self.marker_type = self._set_default(marker_type, None)
def set_opacity(self, opacity=None):
"""
Opacity of each scatter point
:param opacity:
:return:
"""
self.opacity = self._set_default(opacity, 1)
def set_line_color(self, line_color=None):
"""
Colour of edge of scatter point
:param line_color: string e.g 'k' or RGB e.g [0,0,0]
:return:
"""
self.line_color = self._set_default(line_color, None)
def set_line_width(self, line_width=None):
"""
Width of line on edge of scatter point
:param line_width: int
:return:
"""
self.line_width = self._set_default(line_width, None)
def set_line_style(self, line_style=None):
"""
Style of line on edge of scatter point
:param line_style:
:return:
"""
self.line_style = self._set_default(line_style, '-')
class LinePlot(ScatterPlot):
def __init__(self, x, y):
"""
Class for organising data that will be used to create line plots.
:param x: x values for data
:param y: y values for data
"""
super().__init__(x, y, plot_type='line')
self._set_init_style()
self.set_xlim()
self.set_ylim()
def _set_init_style(self):
self.set_line_color('k')
self.set_line_width(2)
self.set_line_style()
self.set_marker_size()
self.set_marker_type()
def add_lines(ax, data, **kwargs):
"""
Function to add vertical and horizontal reference lines to matplotlib axis
:param ax: matplotlib axis
:param data: dict of plot data
:param kwargs: matplotlib keywords arguments associated with vlines/hlines
:return:
"""
for vline in data['vlines']:
ax.vlines(vline['pos'], ymin=vline['lim'][0], ymax=vline['lim'][1],
linestyles=vline['style'], linewidth=vline['width'], colors=vline['color'],
**kwargs)
for hline in data['hlines']:
ax.hlines(hline['pos'], xmin=hline['lim'][0], xmax=hline['lim'][1],
linestyles=hline['style'], linewidth=hline['width'], colors=hline['color'],
**kwargs)
return ax
def plot_image(data, ax=None, show_cbar=True, fig_kwargs=dict(), line_kwargs=dict(),
img_kwargs=dict()):
"""
Function to create matplotlib plot from ImagePlot object
:param data: ImagePlot object, either class or dict
:param ax: matplotlib axis to plot on, if None, will create figure
:param show_cbar: whether or not to display colour bar
:param fig_kwargs: dict of matplotlib keywords associcated with plt.subplots e.g can be
fig size, tight layout etc.
:param line_kwargs: dict of matplotlib keywords associated with ax.hlines/ax.vlines
:param img_kwargs: dict of matplotlib keywords associated with matplotlib.imshow
:return: matplotlib axis and figure handles
"""
if not isinstance(data, dict):
data = data.convert2dict()
if not ax:
fig, ax = plt.subplots(**fig_kwargs)
else:
fig = plt.gcf()
img = ax.imshow(data['data']['c'].T, extent=np.r_[data['xlim'], data['ylim']],
cmap=data['cmap'], vmin=data['clim'][0], vmax=data['clim'][1], origin='lower',
aspect='auto', **img_kwargs)
ax.set_xlim(data['xlim'][0], data['xlim'][1])
ax.set_ylim(data['ylim'][0], data['ylim'][1])
ax.set_xlabel(data['labels']['xlabel'])
ax.set_ylabel(data['labels']['ylabel'])
ax.set_title(data['labels']['title'])
if show_cbar:
cbar = fig.colorbar(img, ax=ax)
cbar.set_label(data['labels']['clabel'])
ax = add_lines(ax, data, **line_kwargs)
plt.show()
return ax, fig
def plot_scatter(data, ax=None, show_cbar=True, fig_kwargs=dict(), line_kwargs=dict(),
scat_kwargs=dict()):
"""
Function to create matplotlib plot from ScatterPlot object. If data['colors'] is given for each
data point it will override automatic colours that would be generated from data['data']['c']
:param data: ScatterPlot object, either class or dict
:param ax: matplotlib axis to plot on, if None, will create figure
:param show_cbar: whether or not to display colour bar
:param fig_kwargs: dict of matplotlib keywords associcated with plt.subplots e.g can be
fig size, tight layout etc.
:param line_kwargs: dict of matplotlib keywords associated with ax.hlines/ax.vlines
:param scat_kwargs: dict of matplotlib keywords associated with matplotlib.scatter
:return: matplotlib axis and figure handles
"""
if not isinstance(data, dict):
data = data.convert2dict()
if not ax:
fig, ax = plt.subplots(**fig_kwargs)
else:
fig = plt.gcf()
# Single color for all points
if data['data']['c'] is None:
scat = ax.scatter(x=data['data']['x'], y=data['data']['y'], c=data['color'],
s=data['marker_size'], marker=data['marker_type'],
edgecolors=data['line_color'], linewidths=data['line_width'],
**scat_kwargs)
else:
# Colour for each point specified
if len(data['color']) == len(data['data']['x']):
if np.max(data['color']) > 1:
data['color'] = data['color'] / 255
scat = ax.scatter(x=data['data']['x'], y=data['data']['y'], c=data['color'],
s=data['marker_size'], marker=data['marker_type'],
edgecolors=data['line_color'], linewidths=data['line_width'],
**scat_kwargs)
if show_cbar:
norm = matplotlib.colors.Normalize(vmin=data['clim'][0], vmax=data['clim'][1],
clip=True)
cbar = fig.colorbar(cm.ScalarMappable(norm=norm, cmap=data['cmap']), ax=ax)
cbar.set_label(data['labels']['clabel'])
# Automatically generate from c data
else:
scat = ax.scatter(x=data['data']['x'], y=data['data']['y'], c=data['data']['c'],
s=data['marker_size'], marker=data['marker_type'], cmap=data['cmap'],
vmin=data['clim'][0], vmax=data['clim'][1],
edgecolors=data['line_color'], linewidths=data['line_width'],
**scat_kwargs)
if show_cbar:
cbar = fig.colorbar(scat, ax=ax)
cbar.set_label(data['labels']['clabel'])
ax = add_lines(ax, data, **line_kwargs)
ax.set_xlim(data['xlim'][0], data['xlim'][1])
ax.set_ylim(data['ylim'][0], data['ylim'][1])
ax.set_xlabel(data['labels']['xlabel'])
ax.set_ylabel(data['labels']['ylabel'])
ax.set_title(data['labels']['title'])
plt.show()
return ax, fig
def plot_probe(data, ax=None, show_cbar=True, make_pretty=True, fig_kwargs=dict(),
line_kwargs=dict()):
"""
Function to create matplotlib plot from ProbePlot object
:param data: ProbePlot object, either class or dict
:param ax: matplotlib axis to plot on, if None, will create figure
:param show_cbar: whether or not to display colour bar
:param make_pretty: get rid of spines on axis
:param fig_kwargs: dict of matplotlib keywords associcated with plt.subplots e.g can be
fig size, tight layout etc.
:param line_kwargs: dict of matplotlib keywords associated with ax.hlines/ax.vlines
:return: matplotlib axis and figure handles
"""
if not isinstance(data, dict):
data = data.convert2dict()
if not ax:
fig, ax = plt.subplots(figsize=(2, 8), **fig_kwargs)
else:
fig = plt.gcf()
for (x, y, dat) in zip(data['data']['x'], data['data']['y'], data['data']['c']):
im = NonUniformImage(ax, interpolation='nearest', cmap=data['cmap'])
im.set_clim(data['clim'][0], data['clim'][1])
im.set_data(x, y, dat.T)
ax.images.append(im)
ax.set_xlim(data['xlim'][0], data['xlim'][1])
ax.set_ylim(data['ylim'][0], data['ylim'][1])
ax.set_xlabel(data['labels']['xlabel'])
ax.set_ylabel(data['labels']['ylabel'])
ax.set_title(data['labels']['title'])
if make_pretty:
ax.get_xaxis().set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
if show_cbar:
cbar = fig.colorbar(im, orientation="horizontal", pad=0.02, ax=ax)
cbar.set_label(data['labels']['clabel'])
ax = add_lines(ax, data, **line_kwargs)
plt.show()
return ax, fig
def plot_line(data, ax=None, fig_kwargs=dict(), line_kwargs=dict()):
"""
Function to create matplotlib plot from LinePlot object
:param data: LinePlot object either class or dict
:param ax: matplotlib axis to plot on
:param fig_kwargs: dict of matplotlib keywords associcated with plt.subplots e.g can be
fig size, tight layout etc.
:param line_kwargs: dict of matplotlib keywords associated with ax.hlines/ax.vlines
:return: matplotlib axis and figure handles
"""
if not isinstance(data, dict):
data = data.convert2dict()
if not ax:
fig, ax = plt.subplots(**fig_kwargs)
else:
fig = plt.gcf()
ax.plot(data['data']['x'], data['data']['y'], color=data['line_color'],
linestyle=data['line_style'], linewidth=data['line_width'], marker=data['marker_type'],
markersize=data['marker_size'])
ax = add_lines(ax, data, **line_kwargs)
ax.set_xlim(data['xlim'][0], data['xlim'][1])
ax.set_ylim(data['ylim'][0], data['ylim'][1])
ax.set_xlabel(data['labels']['xlabel'])
ax.set_ylabel(data['labels']['ylabel'])
ax.set_title(data['labels']['title'])
plt.show()
return ax, fig
def scatter_xyc_plot(x, y, c, cmap=None, clim=None, rgb=False):
"""
General function for preparing x y scatter plot with third variable encoded by colour of points
:param x:
:param y:
:param c:
:param cmap:
:param clim:
:param rgb: Whether to compute rgb (set True when preparing pyqtgraph data)
:return:
"""
data = ScatterPlot(x=x, y=y, c=c, cmap=cmap)
data.set_clim(clim=clim)
if rgb:
norm = matplotlib.colors.Normalize(vmin=data.clim[0], vmax=data.clim[1], clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap=cm.get_cmap(cmap))
cluster_color = np.array([mapper.to_rgba(col) for col in c])
data.set_color(color=cluster_color)
return data
def arrange_channels2banks(data, chn_coords, depth=None, pad=True, x_offset=1):
"""
Rearranges data on channels so it matches geometry of probe. e.g For Neuropixel 2.0 rearranges
channels into 4 banks with checkerboard pattern
:param data: data on channels
:param chn_coords: local coordinates of channels on probe
:param depth: depth location of electrode (for example could be relative to bregma). If none
given will stay in probe local coordinates
:param pad: for matplotlib implementation with NonUniformImage we need to surround our data
with nans so that it shows as finite display
:param x_offset: spacing between banks in x direction
:return: list, data, x position and y position for each bank
"""
data_bank = []
x_bank = []
y_bank = []
if depth is None:
depth = chn_coords[:, 1]
for iX, x in enumerate(np.unique(chn_coords[:, 0])):
bnk_idx = np.where(chn_coords[:, 0] == x)[0]
bnk_data = data[bnk_idx, np.newaxis].T
# This is a hack! Although data is 1D we give it two x coords so we can correctly set
# scale and extent (compatible with pyqtgraph and matplotlib.imshow)
# For matplotlib.image.Nonuniformimage must use pad=True option
bnk_x = np.array((iX * x_offset, (iX + 1) * x_offset))
bnk_y = depth[bnk_idx]
if pad:
# pad data in y direction
bnk_data = np.insert(bnk_data, 0, np.nan)
bnk_data = np.append(bnk_data, np.nan)
# pad data in x direction
bnk_data = bnk_data[:, np.newaxis].T
bnk_data = np.insert(bnk_data, 0, np.full(bnk_data.shape[1], np.nan), axis=0)
bnk_data = np.append(bnk_data, np.full((1, bnk_data.shape[1]), np.nan), axis=0)
# pad the x values
bnk_x = np.arange(iX * x_offset, (iX + 3) * x_offset, x_offset)
# pad the y values
diff = np.diff(bnk_y)
diff = diff[np.nonzero(diff)]
bnk_y = np.insert(bnk_y, 0, bnk_y[0] - np.abs(diff[0]))
bnk_y = np.append(bnk_y, bnk_y[-1] + np.abs(diff[-1]))
data_bank.append(bnk_data)
x_bank.append(bnk_x)
y_bank.append(bnk_y)
return data_bank, x_bank, y_bank
|
import time
import logging
import yaml
# dynamic, float
global relative_time_origin
def read_config(filename):
with open(filename, 'r', encoding='utf-8') as f:
result = yaml.load(f.read(), Loader=yaml.FullLoader)
return result
def with_relative_time(func, *args, **kwargs):
global relative_time_origin
relative_time_origin = time.time()
logging.info("Relative time begins now: {}".format(relative_time_origin))
return func(*args, **kwargs)
def compute_relative_time() -> float:
"""
计算从relative_time_origin的相对时间,单位:浮点秒
"""
global relative_time_origin
assert relative_time_origin # 须在调用with_relative_time函数设置起始时间后再调用这个
return time.time() - relative_time_origin
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import fixtures
import testtools
from testtools import matchers
from openstack_requirements.cmds import generate
class TestFreeze(testtools.TestCase):
def test_freeze_smoke(self):
# Use an aribtrary python. The installation of virtualenv system wide
# is presumed.
versions = ['/usr/bin/python%(v)s' % dict(v=v) for v in
["2.7", "3.4"]]
found = [v for v in versions if os.path.exists(v)][0]
req = self.useFixture(fixtures.TempDir()).path + '/r.txt'
with open(req, 'wt') as output:
output.write('fixtures==1.2.0')
frozen = generate._freeze(req, found)
expected_version = found[-3:]
self.expectThat(frozen, matchers.HasLength(2))
self.expectThat(frozen[0], matchers.Equals(expected_version))
# There are multiple items in the dependency tree of fixtures.
# Since this is a smoke test, just ensure fixtures is there.
self.expectThat(frozen[1], matchers.Contains(('fixtures', '1.2.0')))
class TestParse(testtools.TestCase):
def test_parse(self):
text = "linecache2==1.0.0\nargparse==1.2\n\n# fred\n"
parsed = generate._parse_freeze(text)
self.assertEqual(
[('linecache2', '1.0.0'), ('argparse', '1.2')], parsed)
def test_editable_banned(self):
text = "-e git:..."
self.assertRaises(Exception, generate._parse_freeze, text) # noqa
class TestCombine(testtools.TestCase):
def test_same_items(self):
fixtures = [('fixtures', '1.2.0')]
freeze_27 = ('2.7', fixtures)
freeze_34 = ('3.4', fixtures)
self.assertEqual(
['fixtures===1.2.0\n'],
list(generate._combine_freezes([freeze_27, freeze_34])))
def test_distinct_items(self):
freeze_27 = ('2.7', [('fixtures', '1.2.0')])
freeze_34 = ('3.4', [('fixtures', '1.2.0'), ('enum', '1.5.0')])
self.assertEqual(
["enum===1.5.0;python_version=='3.4'\n", 'fixtures===1.2.0\n'],
list(generate._combine_freezes([freeze_27, freeze_34])))
def test_different_versions(self):
freeze_27 = ('2.7', [('fixtures', '1.2.0')])
freeze_34 = ('3.4', [('fixtures', '1.5.0')])
self.assertEqual(
["fixtures===1.2.0;python_version=='2.7'\n",
"fixtures===1.5.0;python_version=='3.4'\n"],
list(generate._combine_freezes([freeze_27, freeze_34])))
def test_duplicate_pythons(self):
with testtools.ExpectedException(Exception):
list(generate._combine_freezes([('2.7', []), ('2.7', [])]))
def test_blacklist(self):
blacklist = ['Fixtures']
freeze_27 = ('2.7', [('fixtures', '1.2.0')])
freeze_34 = ('3.4', [('fixtures', '1.2.0'), ('enum', '1.5.0')])
self.assertEqual(
["enum===1.5.0;python_version=='3.4'\n"],
list(generate._combine_freezes(
[freeze_27, freeze_34], blacklist=blacklist)))
def test_blacklist_with_safe_name(self):
blacklist = ['flake8_docstrings']
freeze_27 = ('2.7', [('flake8-docstrings', '0.2.1.post1'),
('enum', '1.5.0')])
self.assertEqual(
['enum===1.5.0\n'],
list(generate._combine_freezes(
[freeze_27], blacklist=blacklist)))
|
from django.contrib import admin
from django.urls import path, include
# para visualização do arquivo no navegador
# necessario por estas linhas para visualizar juntamente
# com a concatenação da linha '+ static()'
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('app.core.urls')),
path('funcionarios/', include('app.funcionarios.urls')),
path('documentos/', include('app.documentos.urls')),
path('departamentos/', include('app.departamentos.urls')),
path('empresas/', include('app.empresas.urls')),
path('horas-extras/', include('app.registro_hora_extra.urls')),
path('admin/', admin.site.urls),
path('accounts/', include('django.contrib.auth.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
from __future__ import annotations
from typing import Tuple, final
import functools
import numpy as np
import pyccl
from .statistic import Statistic
from ....parameters import ParamsMap, RequiredParameters
# only supported types are here, any thing else will throw
# a value error
SACC_DATA_TYPE_TO_CCL_KIND = {"supernova": "sn"}
Z_FOR_MU_DEFAULTS = dict(min=0, max=2, n=100)
def _z_for_mu(*, min, max, n):
"""Build an array of z to sample the distance modulus
predictions.
"""
return np.linspace(min, max, n)
@functools.lru_cache(maxsize=128)
def _cached_distmod(cosmo, tracers, z):
a = 1.0 / (1 + z)
return pyccl.background.distance_modulus(cosmo, *tracers, np.array(a))
class Supernova(Statistic):
def __init__(self, sacc_tracer):
self.sacc_tracer = sacc_tracer
self.data_vector = None
def read(self, sacc_data):
"""Read the data for this statistic from the SACC file.
Parameters
----------
sacc_data : sacc.Sacc
The data in the sacc format.
"""
data_points = sacc_data.get_data_points(
data_type="supernova_distance_mu", tracers=(self.sacc_tracer,)
)
self.z = np.array([dp.get_tag("z") for dp in data_points])
self.a = 1.0 / (1.0 + self.z)
self.data_vector = np.array([dp.value for dp in data_points])
self.sacc_inds = list(range(0, len(self.data_vector)))
@final
def _update(self, params: ParamsMap):
self.M = params["m"] # CosmoSIS makes everything lowercase
@final
def required_parameters(self) -> RequiredParameters:
return RequiredParameters(["m"])
def compute(
self, cosmo: pyccl.Cosmology, params: ParamsMap
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute a two-point statistic from sources.
Parameters
----------
cosmo : pyccl.Cosmology
A pyccl.Cosmology object.
params : dict
A dictionary mapping parameter names to their current values.
sources : dict
A dictionary mapping sources to their objects. The sources must
already have been rendered by calling `render` on them.
systematics : dict, optional
A dictionary mapping systematic names to their objects. The
default of `None` corresponds to no systematics.
"""
theory_vector = self.M + pyccl.distance_modulus(cosmo, self.a)
assert self.data_vector is not None
return np.array(self.data_vector), np.array(theory_vector)
|
import unittest
import os
import sys
sys.path.insert(0, os.path.abspath("."))
from ease4lmp import __version__
print(__version__)
from io import StringIO
io = StringIO()
sys.stdout = io
from test_bonded_atoms import suite as suite_bonded_atoms
from test_lammps_reader import suite as suite_lammps_reader
from test_lammps_cycle import suite as suite_lammps_cycle
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(suite_bonded_atoms())
runner.run(suite_lammps_reader())
runner.run(suite_lammps_cycle())
|
import logging
import os
import random
import sys
from time import sleep
import pymysql
import redis
from sqlalchemy import BIGINT, Column, String, UniqueConstraint, create_engine
from sqlalchemy.orm import sessionmaker
from telstar import config as tlconfig
from telstar.com import Message
from telstar.com.sqla import Base, StagedMessageRepository
from telstar.consumer import MultiConsumer
logger = logging.getLogger('telstar')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
pymysql.install_as_MySQLdb()
link = redis.from_url(os.environ["REDIS"])
engine = create_engine(os.environ["DATABASE"])
Session = sessionmaker(bind=engine)
session = Session(autocommit=True)
tlconfig.staging.repository = StagedMessageRepository
tlconfig.staging.repository.setup(session)
class Test(Base):
__tablename__ = "test"
id = Column(BIGINT(), primary_key=True)
number = Column(BIGINT)
group_name = Column(String(length=80))
topic = Column(String(length=80))
__table_args__ = (UniqueConstraint('number', 'group_name', 'topic', name='all_together'),)
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "setup":
print("Recreating table in order to start from scratch")
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
def simple(consumer, record: Message, done):
with session.begin():
t = Test(number=int(record.data["value"]), group_name=consumer.group_name, topic=record.stream)
session.add(t)
sleep(random.randrange(int(os.environ.get("SLEEPINESS"))) / 100)
done()
MultiConsumer(link=link,
group_name=os.environ.get("GROUP_NAME"),
consumer_name=os.environ.get("CONSUMER_NAME"),
config={
os.environ["STREAM_NAME"]: simple,
os.environ["STREAM_NAME_TWO"]: simple
}).run()
|
# -*- coding: utf-8 -*-
from __future__ import division
__copyright__ = "Copyright (C) 2014 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from django.shortcuts import ( # noqa
render, get_object_or_404, redirect)
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied, SuspiciousOperation
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import transaction
from django import forms
from crispy_forms.layout import Submit
from course.models import (
get_user_status, user_status,
Course,
Participation, ParticipationPreapproval,
participation_role, participation_status,
PARTICIPATION_ROLE_CHOICES)
from course.views import get_role_and_participation
from course.utils import course_view, render_course_page
from courseflow.utils import StyledForm
# {{{ enrollment
@login_required
@transaction.atomic
def enroll(request, course_identifier):
if request.method != "POST":
raise SuspiciousOperation("can only enroll using POST request")
course = get_object_or_404(Course, identifier=course_identifier)
role, participation = get_role_and_participation(request, course)
if role != participation_role.unenrolled:
messages.add_message(request, messages.ERROR,
"Already enrolled. Cannot re-renroll.")
return redirect("course.views.course_page", course_identifier)
user = request.user
ustatus = get_user_status(user)
if (course.enrollment_required_email_suffix
and ustatus.status != user_status.active):
messages.add_message(request, messages.ERROR,
"Your email address is not yet confirmed. "
"Confirm your email to continue.")
return redirect("course.views.course_page", course_identifier)
if (course.enrollment_required_email_suffix
and not user.email.endswith(course.enrollment_required_email_suffix)):
messages.add_message(request, messages.ERROR,
"Enrollment not allowed. Please use your '%s' email to "
"enroll." % course.enrollment_required_email_suffix)
return redirect("course.views.course_page", course_identifier)
def enroll(status, role):
participations = Participation.objects.filter(course=course, user=user)
assert participations.count() <= 1
if participations.count() == 0:
participation = Participation()
participation.user = user
participation.course = course
participation.role = role
participation.status = status
participation.save()
else:
(participation,) = participations
participation.status = status
participation.save()
return participation
preapproval = None
if request.user.email:
try:
preapproval = ParticipationPreapproval.objects.get(
course=course, email__iexact=request.user.email)
except ParticipationPreapproval.DoesNotExist:
pass
role = participation_role.student
if preapproval is not None:
role = preapproval.role
if course.enrollment_approval_required and preapproval is None:
enroll(participation_status.requested, role)
from django.template.loader import render_to_string
message = render_to_string("course/enrollment-request-email.txt", {
"user": user,
"course": course,
"admin_uri": request.build_absolute_uri(
reverse("admin:course_participation_changelist"))
})
from django.core.mail import send_mail
send_mail("[%s] New enrollment request" % course_identifier,
message,
settings.ROBOT_EMAIL_FROM,
recipient_list=[course.email])
messages.add_message(request, messages.INFO,
"Enrollment request sent. You will receive notifcation "
"by email once your request has been acted upon.")
else:
enroll(participation_status.active, role)
messages.add_message(request, messages.SUCCESS,
"Successfully enrolled.")
return redirect("course.views.course_page", course_identifier)
# }}}
# {{{ admin actions
def decide_enrollment(approved, modeladmin, request, queryset):
count = 0
for participation in queryset:
if participation.status != participation_status.requested:
continue
if approved:
participation.status = participation_status.active
else:
participation.status = participation_status.denied
participation.save()
course = participation.course
from django.template.loader import render_to_string
message = render_to_string("course/enrollment-decision-email.txt", {
"user": participation.user,
"approved": approved,
"course": course,
"course_uri": request.build_absolute_uri(
reverse("course.views.course_page",
args=(course.identifier,)))
})
from django.core.mail import EmailMessage
msg = EmailMessage("[%s] Your enrollment request" % course.identifier,
message,
course.email,
[participation.user.email])
msg.cc = [course.email]
msg.send()
count += 1
messages.add_message(request, messages.INFO,
"%d requests processed." % count)
def approve_enrollment(modeladmin, request, queryset):
decide_enrollment(True, modeladmin, request, queryset)
approve_enrollment.short_description = "Approve enrollment"
def deny_enrollment(modeladmin, request, queryset):
decide_enrollment(False, modeladmin, request, queryset)
deny_enrollment.short_description = "Deny enrollment"
# }}}
# {{{ preapprovals
class BulkPreapprovalsForm(StyledForm):
role = forms.ChoiceField(
choices=PARTICIPATION_ROLE_CHOICES,
initial=participation_role.student)
emails = forms.CharField(required=True, widget=forms.Textarea,
help_text="Enter fully qualified email addresses, one per line.")
def __init__(self, *args, **kwargs):
super(BulkPreapprovalsForm, self).__init__(*args, **kwargs)
self.helper.add_input(
Submit("submit", "Preapprove",
css_class="col-lg-offset-2"))
@login_required
@transaction.atomic
@course_view
def create_preapprovals(pctx):
if pctx.role != participation_role.instructor:
raise PermissionDenied("only instructors may do that")
request = pctx.request
if request.method == "POST":
form = BulkPreapprovalsForm(request.POST)
if form.is_valid():
created_count = 0
exist_count = 0
role = form.cleaned_data["role"]
for l in form.cleaned_data["emails"].split("\n"):
l = l.strip()
if not l:
continue
try:
preapproval = ParticipationPreapproval.objects.get(
email__iexact=l,
course=pctx.course)
except ParticipationPreapproval.DoesNotExist:
pass
else:
exist_count += 1
continue
preapproval = ParticipationPreapproval()
preapproval.email = l
preapproval.course = pctx.course
preapproval.role = role
preapproval.creator = request.user
preapproval.save()
created_count += 1
messages.add_message(request, messages.INFO,
"%d preapprovals created, %d already existed."
% (created_count, exist_count))
return redirect("course.views.home")
else:
form = BulkPreapprovalsForm()
return render_course_page(pctx, "course/generic-course-form.html", {
"form": form,
"form_description": "Create Participation Preapprovals",
})
# }}}
# vim: foldmethod=marker
|
from openpack.basepack import Part
class SamplePart(Part):
content_type = "text/pmxtest+xml"
rel_type = "http://polimetrix.com/relationships/test"
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 28 16:27:43 2021
Contains functions to make plots of ScS, SKS and SKKS waveforms prior to inversion
and after corrections for the predicted splitting have been made.
i.e if obs(x,t) = ani*u(x,t) we are plotting obs and u (assuming our model of ani is correct)
@author: ja17375
"""
import obspy
from obspy import UTCDateTime
from obspy.taup import TauPyModel
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import splitwavepy as sw
import pandas as pd
import numpy as np
from correct_waveforms import event_relative_time
MODEL = 'EllipTI'
#DATA_DIR = f'/Users/ja17375/SWSTomo/Inversions/Epac_fast_anom/{MODEL}/wavecorr/corrs'
DATA_DIR = '/Users/ja17375/SWSTomo/data'
FIG_DIR = '/Users/ja17375/SWSTomo/Figures/Waveforms'
def read_path_as_pair(fstem, full_path=False):
'''
Reads the 3-component waveform (SAC) data
'''
if not full_path:
st = obspy.read(f'{DATA_DIR}/{fstem}.BH?')
else:
st = obspy.read(f'{fstem}.BH?')
north = st[1].data
east = st[0].data
metadata = st[0].stats
pair = sw.Pair(north, east, delta=metadata.delta)
return pair, metadata
def add_waveforms(pair, ert, ax):
'''function to draw the raw input waveforms'''
time = pair.t() + ert
r = pair.data()[0]
t = pair.data()[1]
ax.plot(time, r, "-", label='BHN')
ax.plot(time, t, "-", label='BHE')
def set_yrange(ydata, axs):
ymin = rounddown(ydata.min())
ymax = roundup(ydata.max())
yrange = [ymin - abs(ymin)*0.1,
ymax + abs(ymax)*0.1]
for ax in axs:
ax.set_ylim(yrange)
return yrange
def roundup(x):
return np.ceil(x /100) * 100
def rounddown(x):
return np.floor(x /100) * 100
def predict_tt(evdp, dist, phase='ScS'):
model = TauPyModel()
# uses IASP91 by default
arrivals = model.get_travel_times(source_depth_in_km=evdp,
distance_in_degree=dist,
phase_list=[phase])
tt = arrivals[0].time
return tt
def plot_individual_input_waveforms(paths):
for i, path in paths.iterrows():
fig = plt.figure(figsize=(7,3))
ax = fig.add_subplot(111)
filename = f'{path.STAT}_{path.DATE}_*_{path.PHASE}'
#filename = f'{path.STAT}_{path.DATE}_*corr.00000002'
pair, metadata = read_path_as_pair(filename)
station = metadata.station
ert, evt = event_relative_time(metadata)
ax.plot(pair.t()+ert, pair.x, label= 'BHN')
ax.plot(pair.t()+ert, pair.y, label= 'BHE')
ax.axvline(path.WBEG, linewidth=1, color='k')
ax.axvline(path.WEND, linewidth=1, color='k')
datestring = evt.strftime('%Y-%m-%d %H:%M:%S')
ax.set_title(f'Event {datestring}. Station {path.STAT}. Phase {path.PHASE}')
tt = predict_tt(path.EVDP, path.DIST, path.PHASE)
ax.set_xlim([path.WBEG - 20, path.WEND + 20])
ax.legend(framealpha=0.75, loc=1)
ax.set_xlabel('Time (relative to event time) (s)')
plt.tight_layout(w_pad=1.25)
fig.savefig(f'{FIG_DIR}/{station}_input_waveform.png')
def plot_all_particle_motions(paths, bw=True):
corr_stem = '/Users/ja17375/SWSTomo/Inversions/Epac_fast_anom/'
models = ['Input','EllipTI', 'pv_100_001', 'ppv_001_100',
'ppv_010_100']
fig, axs = plt.subplots(nrows=11, ncols=5, figsize=(12, 20))
for i, path in paths.iterrows():
ax_row = axs[i, :]
for i, model in enumerate(models):
if model == 'Input':
ddir = '/Users/ja17375/SWSTomo/data'
filename = f'{ddir}/{path.STAT}_{path.DATE}_*_{path.PHASE}'
else:
ddir = f'{corr_stem}/{model}/wavecorr/corrs'
filename = f'{ddir}/{path.STAT}_{path.DATE}_*corr.00000002'
pair, metadata = read_path_as_pair(filename, full_path=True)
ert = event_relative_time(metadata)[0]
pair.set_window(path.WBEG - ert, path.WEND - ert)
ax = ax_row[i]
if bw:
x, y = pair.chop().x, pair.chop().y
# x is North and y is East Component
lim = np.abs(pair.data()).max() * 1.1
ax.plot(y, x, 'k')
ax.set_aspect('equal')
ax.set_xlim([-lim, lim])
ax.set_ylim([-lim, lim])
ax.axes.xaxis.set_ticklabels([])
ax.axes.yaxis.set_ticklabels([])
ext = '_BW'
else:
pair._ppm(ax)
ext = ''
ax.set_xlabel(None)
ax.set_ylabel(None)
plt.tight_layout(pad=1.05)
fig.savefig(f'{FIG_DIR}/ParticleMotions/All_particle_motions{ext}.png',dpi=500)
if __name__ == '__main__':
#~/SWSTomo/Inversions/HQ_phases_NoIRON_fa_anom.sdb
paths = pd.read_csv('/Users/ja17375/SWSTomo/Inversions/HQ_phases_on_fast_anom.sdb', delim_whitespace=True)
plot_all_particle_motions(paths)
# plot_individual_input_waveforms(paths)
#
plt.show()
|
import numpy as np
import pandas as pd
import argparse
import glob
import os
import time
import re
from multiprocessing import Pool
'''
****************************************************************
GLOBAL VARIABLES
****************************************************************
'''
MAX_ENTITY_LENGTH = 20
MAX_ENTITY_WORD_LENGTH = 8
NUM_FEATURES = 20
globalVerbSet = set()
with open('../data/verbs.txt', 'r') as f:
for line in f:
globalVerbSet.add(line.strip())
instituteKeywords = re.compile(r'\b(Inc|Incorporation|Corp|Corporation|Institute|\
University|School|College|Department|Org|Organization|Times|Committee|Foundation|\
Party|Agency|Council|News)\b', re.I)
badKeywords = re.compile(r'\b(the|an|as|be|am|is|are|was|were|has|have|had|\
at|from|to|in|under|before|after|near|far|away|\
that|them|there|their|they|his|her|hers|him|it|you|your|yours|\
ceo|chairman|founder|head|director|\
email)\b', re.I)
#allow . - _ & : ' ` and " inside entity words. As these are there while marking up
badpunc = re.compile(r'\~|\!|\@|\#|\$|\%|\^|\*|\(|\)|\+|\=|\{|\}|\[|\]|\;|\<|\>|\,|\?|\/|\\')
endSentence = re.compile(r'(\d\s*\.\s*\D)|([a-z]\s*\.)')
domain = re.compile(r'\.\s*(com|org)\b')
globalCount = 0
missedTuples = []
'''
****************************************************************
PROGRAM FUNCTION SCRIPTS
****************************************************************
'''
'''
This function accepts a list of strings, where each
string represents a single line in a text file (which has
been pre-processed with at most 1 word on each
line.
@:param fileContents List with every line of the file contents.
@:return A pandas dataframe object that has
'''
def generateStringTuples(fileContents, fileName):
global globalCount
# Create initial pandas dataframe for data objects.
# rawString: as read form the file after removing entity markers
# string: after stripping punctuations from inside rawString
# wordCount: number of words in 'string' field
# start, end: index in file
# class: class label if marked entity
#tupleDF = pd.DataFrame(columns=['rawString', 'file', 'start', 'end', 'string', 'wordCount' 'class'])
# Create native python list for appending to, which is faster than pandas DF append or concat.
tupleList = []
reg = re.compile(r'[a-zA-Z0-9_\’\']+')# use to strip inner punctuations, except _ and \’
tupleColumns=['rawString', 'file', 'start', 'end', 'string', 'wordCount', 'label']
global missedTuples
for entityLength in range(1, MAX_ENTITY_LENGTH):
for i in range(len(fileContents)-entityLength):#reversed order here to prevent i+entityLength overflow
# For each possible entityLength, generate string from each index.
# Strip punctuations in order to get wordCount
# make tuples only from those whose word count is <= MAX_ENTITY_WORD_LENGTH, >=0 and unique
try:
tuple = ['', fileName, i, i+entityLength, '', 0, '-']
entityList = list(map(lambda item: str(item).strip(), fileContents[i:i+entityLength]))
# Set class to positive if '<[>' in first list word, and '<]>' in last word in list.
if '<[>' in entityList[0].strip() and '<]>' in entityList[-1].strip():
# If '<[>' and '<]>' appear in any other places internally in the string, then the
# string isn't a single entity, and is actually two entities that have been grouped
# together. Ex '<[>Project Veritas<]> shows how the <[>Clinton campaign<]>'.
# Count the number of times left and right tags occur in the string.
lCount = 0#sum(map(lambda item: 1 if '<[>' in item else 0, entityList))
rCount = 0#sum(map(lambda item: 1 if '<]>' in item else 0, entityList))
for cStr in entityList:
if '<[>' in cStr:
lCount += 1
if '<]>' in cStr:
rCount += 1
if lCount + rCount == 2:
tuple[-1] = '+'
globalCount += 1
else:
tuple[-1] = '-'
# Remove any entity tags from the string.
entityList = list(map(lambda item: item.replace('<[>', ''), entityList))
entityList = list(map(lambda item: item.replace('<]>', ''), entityList))
# Update the rest of the tuple information.
tuple[0] = ' '.join(entityList).strip()#rawString
#groups of only continuous alpha numeric characters. Not including '.' as a separate group.
words = re.findall(reg, tuple[0])
tuple[4] = ' '.join(words).strip()# string after stripping inner punctuations
tuple[5] = len(words)# wordCount
#################################
# PRE-PROCESSING RULES
#################################
#if ',' in tuple[0].strip().split()[0] or ',' in tuple[0].strip().split()[-1]:
# continue
# #if ('.' in tuple[0].strip().split()[0] or '.' in tuple[0].strip().split()[-1]) and len(entityList):
# # continue
# if ('-' in tuple[0].strip()):
# continue
#if ('(' in tuple[0].strip() or ')' in tuple[0].strip()):
# continue
# if 'as' in tuple[0].lower() or 'a' in tuple[0].lower() or 'an' in tuple[0].lower():
# continue
failed = False# use this to remove negative entries
#empty or too long remaining string
failed = failed or tuple[5]==0 or tuple[5]>MAX_ENTITY_WORD_LENGTH
#begins with a .
failed = failed or tuple[0][0]=='.'
#full tuple contains any unwanted punctuations
failed = failed or len(re.findall(badpunc, tuple[0]))>0
#Want atleast 2 english chars. Removes number only cases
failed = failed or len(re.findall(r'[a-zA-Z]', tuple[4]))<2
#Looks like end of a sentence, except when a domain name
failed = failed or len(re.findall(endSentence, tuple[0])) - len(re.findall(domain, tuple[0]))>0
#contains a bad keyword
failed = failed or len(re.findall(badKeywords, tuple[4]))
if failed:
if tuple[-1] == '+': missedTuples.append(tuple)
continue
tupleList.append(tuple)
except IndexError:
continue
return pd.DataFrame(tupleList, columns=tupleColumns)
def F0(tuple, fileContents):
try:
if fileContents[tuple.start - 1].strip().lower() == 'the' or fileContents[tuple.start - 2].strip().lower() == 'the':
return 1
else:
return 0
except IndexError:
return 0
def F1(tuple, fileContents):
return sum(1 for char in tuple.string if char.isupper())
def F2(tuple, fileContents):
try:
if fileContents[tuple.end].strip().lower() in globalVerbSet:
return 1
else:
return 0
except IndexError:
return 0
def F3(tuple, fileContents):
return len(tuple.string.strip())
def F4(tuple, fileContents):
return tuple.wordCount#len(tuple.string.strip().split())
def F5(tuple, fileContents):
try:
return sum(1 for char in fileContents[tuple.start - 1] if char.isupper())
except:
return -1
def F6(tuple, fileContents):
try:
if fileContents[tuple.start - 1].strip().lower() == 'on':
return 1
else:
return 0
except IndexError:
return 0
def F7(tuple, fileContents):
try:
if fileContents[tuple.start - 1].strip().lower() == 'called':
return 1
else:
return 0
except IndexError:
return 0
def F8(tuple, fileContents):
try:
if fileContents[tuple.end].strip().lower() == 'they':
return 1
else:
return 0
except IndexError:
return 0
def F9(tuple, fileContents):
try:
if "." in tuple.rawString.split()[1:-1] or "!" in tuple.rawString.split()[1:-1] or "?" in tuple.rawString.split()[1:-1]:
return 1
else:
return 0
except IndexError:
return 0
def F10(tuple, fileContents):
return tuple.rawString.count('.')
def F11(tuple, fileContents):
if ',' in tuple.rawString:
return 1
else:
return 0
def F12(tuple, fileContents):
if ',' in tuple.rawString.strip().split()[0] or ',' in tuple.rawString.strip().split()[-1]:
return 1
else:
return 0
def F13(tuple, fileContents):
if '.' in tuple.rawString.strip().split()[0] or '.' in tuple.rawString.strip().split()[-1]:
return 1
else:
return 0
def F14(tuple, fileContents):
if 'as' in tuple.rawString.lower() or 'a' in tuple.rawString.lower() or 'an' in tuple.rawString.lower():
return 1
else:
return 0
def F15(tuple, fileContents):
count = 0
for word in tuple.rawString.strip().split():
if word[0].isupper() and word[1:] == word[1:].lower():
count += 1
return count / len(tuple.rawString.strip().split())
def F16(tuple, fileContents):
try:
if fileContents[tuple.end][0].isupper() and fileContents[tuple.end][1:] == fileContents[tuple.end][1:].lower():
return 1
else:
return 0
except:
return 0
def F17(tuple, fileContents):
return 1 if len(re.findall(instituteKeywords, tuple.string))>0 else 0#case ignoring search criteria
def F18(tuple, fileContents):
try:
return sum(1 for char in tuple.string if char.isupper())*1.0/tuple.wordCount
except:
return -1
def F19(tuple, fileContents):
if ":" in tuple.rawString or "-" in tuple.rawString or '"' in tuple.rawString or "&" in tuple.rawString:
return 1
else:
return 0
'''
Feature list:
F0: "[The]" occurs 1 or two lines before string.
F1: Number of capitol Letters.
F2: Verb occurs 1 or two lines after the string.
F3: Total character length
F4: Total number of words
F5: Number of capitol letters before the string.
F5: Number of capitol letters in line after this string.
F6: "on" comes before
F7: "called" comes before # shouldn't the verb have taken care of it?
F8: "they" comes after
F9: .?! comes in the middle of and entry# should no longer be true ever
F10: Number of "."s
F11: "," is in the raw string "NOTE: This feature reliably improves precision!", #should no longer be True ever
F12: "," is in the first or last raw string position "NOTE: This feature reliably improves precision!", #should no lenger be True ever
F13: "." is in the first or last raw string position.
F14: "as", "a", "an" is in the raw string., # Invalid as discussed, to be removed
F15: The faction of the number of words where only the first character is capitalized to all words.
F16: The rawString has a Single capitalized word after it.
F17: Contains a keyword
F18: fraction of capital letters to wordCount
F19: Contains bad punctuation in raw string.
Each "tuple" object is a Pandas series with first entry tuple[0] the index, and
all following entries the entries of each row from the string tuples dataframe.
'''
def generateFeaturesFromFile(fileContents, fileName):
tuplesDF = generateStringTuples(fileContents, fileName)
allFeaturesList = []
# Call each feature generation function on each dataframe tuple.
for i in range(0, NUM_FEATURES):
featureList = []
for tuple in tuplesDF.itertuples():
featureList.append(eval('F' + str(i) + '(tuple, fileContents)'))
allFeaturesList.append(featureList)
allFeaturesList.append(tuplesDF['label'].tolist())
# TODO: write to a csv file the entire matrix of examples and features. Randomize. Remove some to ensure almost even split b/w + and -
return pd.DataFrame(np.array(allFeaturesList).T, columns=['F' + str(i) for i in range(NUM_FEATURES)] + ['label']), tuplesDF
def updateFeaturesFromFile(fileContents, fileName, functionName):
tuplesDF = generateStringTuples(fileContents, fileName)
featureList = []
for tuple in tuplesDF.itertuples():
featureList.append(eval(functionName + '(tuple, fileContents)'))
return featureList
'''
****************************************************************
PROGRAM RUNNING AND MANAGEMENT SCRIPTS
****************************************************************
'''
'''
For each file in the directory provided to the program, generate all of the
possible feature sets.
'''
def main(args):
if args.Mode == "C":
# Get sorted file list names from the given directory.
fileList = sorted(filter(lambda item: '.txt' in str(item), os.listdir(args.FileFolder)), key=lambda item: int(item.split('_')[0]))
startTime = time.time()
global missedTuples
missedTuples = []
global globalCount
globalCount = 0
fullDF = pd.DataFrame(columns=['F' + str(i) for i in range(NUM_FEATURES)] + ['label'])
tuplesDF = pd.DataFrame(columns=['rawString', 'file', 'start', 'end', 'string', 'wordCount', 'label'])
# For each file, parse into tuples, then parse into features, and create a full pandas data frame object.
print('Performing featurization...')
for file in fileList:
if '.txt' in file:
with open(args.FileFolder + file, "r", encoding="ISO-8859-1") as f:
print(file)
fileDF, fileTuplesDF = generateFeaturesFromFile(f.readlines(), file)
fullDF = pd.concat([fullDF, fileDF])
tuplesDF = pd.concat([tuplesDF, fileTuplesDF])
endTime = time.time()
print(fullDF.shape)
print('Done!')
print("Total time to run: %s seconds." %str(endTime-startTime))
# Save the entire pandas data frame object of features and classes.
print('Saving the full dataframe...')
fullDF.to_csv('../data/featurized_instances.csv')
# Update tuples index to full data set.
tuplesDF.index = pd.Series(list(range(0, fullDF.shape[0])))
tuplesDF.to_csv('../data/tuples_instances.csv')
print('Done!')
print(globalCount)
if len(missedTuples)>0:
print("Missed", len(missedTuples), "items overall")
elif args.Mode == "U":
fullDF = pd.read_csv('../data/featurized_instances.csv', index_col=0)
tuplesDF = pd.read_csv('../data/tuples_instances.csv', index_col=0)
fileList = sorted(filter(lambda item: '.txt' in str(item), os.listdir(args.FileFolder)), key=lambda item: int(item.split('_')[0]))
# For each file, parse into tuples, then parse into features, and create a full pandas data frame object.
print('Performing featurization...')
startTime = time.time()
for functionName in args.UpdateListString.strip().split():
print(functionName)
featureList = []
for file in fileList:
if '.txt' in file:
print(file)
with open(args.FileFolder + file, "r", encoding="ISO-8859-1") as f:
newList = updateFeaturesFromFile(f.readlines(), file, functionName)
featureList.extend(newList)
# All features for current function have been generated, so update the full data frame.
fullDF.loc[:, functionName] = pd.Series(featureList, index=fullDF.index)
endTime = time.time()
print('Done!')
print("Total time to run: %s seconds." % str(endTime - startTime))
columnsList = list(fullDF.columns)
columnsList.remove('label')
fullDF = fullDF[columnsList + ['label']]
# Save the entire pandas data frame object of features and classes.
print('Saving the full dataframe...')
fullDF.to_csv('../data/featurized_instances.csv')
tuplesDF.to_csv('../data/tuples_instances.csv')
if __name__ == '__main__':
#Parse command line arguments
parser = argparse.ArgumentParser(description="""Fake news feature generator. Generates features from files
whos' words have been split to multiple lines. It also handles files where entities have been pre-marked.""")
parser.add_argument('FileFolder', metavar='f', type=str)
parser.add_argument('Mode', metavar='m', type=str, help="""U is for update, and C is for create""")
parser.add_argument('UpdateListString', metavar='--l', type=str, default="", help="""Use a string 'F0 F12 F13' of features, or '' empty string if no features. """)
args = parser.parse_args()
main(args)
|
import h2o
import os, sys
sys.path.insert(1, os.path.join("..", ".."))
from h2o.tree import H2OTree
from h2o.estimators import H2OXGBoostEstimator
from tests import pyunit_utils
# PUBDDEV-7267
def test_terminal_xgboost_nodes():
df = h2o.import_file(pyunit_utils.locate("smalldata/demos/bank-additional-full.csv"))
xgboost = H2OXGBoostEstimator(max_depth=1, ntrees=1)
model = xgboost.train(y="y", training_frame=df)
tree = H2OTree(xgboost, 0)
assert len(tree.node_ids) == 3
# tree.descriptions is deprecated
# Depth is 1 - last two nodes should be described as terminal
assert "terminal node" in tree.descriptions[1]
assert "terminal node" in tree.descriptions[2]
# Prediction is part of the description for terminal nodes
assert "Prediction: " in tree.descriptions[1]
assert "Prediction: " in tree.descriptions[2]
if __name__ == "__main__":
pyunit_utils.standalone_test(test_terminal_xgboost_nodes)
else:
test_terminal_xgboost_nodes()
|
import torch
import torch.nn as nn
from typing import Callable, Union, Tuple
class Conv2dPolicy(nn.Module):
"""
A standard linear policy
"""
def __init__(self,
channel_sizes: Tuple[int, ...],
kernel_sizes: Tuple[Union[int, Tuple[int, int]], ...],
strides: Tuple[Union[int, Tuple[int, int]], ...],
paddings: Tuple[Union[int, Tuple[int, int]], ...],
activation_fn: nn.Module):
"""
Creates a network of layers of the given sizes with the activation
function for each layer. The output layer has no activation function.
Args:
channel_sizes (int): The size of each channel from input to output.
kernel_sizes (Union[int, Tuple[int, int]]): The size of the
convolution kernel.
strides (Union[int, Tuple[int, int]]): The size of the stride.
paddings (Union[int, Tuple[int, int]]): The size of the padding.
activation_fn (nn.Module): The activation function in
between each layer.
"""
super().__init__()
assert (
len(channel_sizes) - 1 == len(kernel_sizes)
== len(strides) == len(paddings)
)
layers = []
for i in range(len(kernel_sizes) - 1):
block = self._conv2d_block(
channel_sizes[i], channel_sizes[i + 1], kernel_sizes[i],
strides[i], paddings[i], activation_fn
)
layers.append(block)
last_layer = (
nn.Conv2d(
channel_sizes[-2], channel_sizes[-1], kernel_sizes[-1],
strides[-1], paddings[-1]
)
if len(kernel_sizes) > 0 else nn.Identity()
)
self.conv = nn.Sequential(*layers, last_layer)
def _conv2d_block(self, inp_channels, out_channels, kernel_size, stride,
padding, activation_fn):
"""
Creates a linear block consisting of a linear layer and the activation
function.
Args:
inp_channels (int): The number of input channels.
out_channels (out): The number of output channels.
kernel_size (Union[int, Tuple[int, int]]): The size of the
convolution kernel.
stride (Union[int, Tuple[int, int]]): The size of the stride.
padding (Union[int, Tuple[int, int]]): The size of the padding.
num_layers (int): The number of layers in the network.
activation_fn (nn.Module): The activation function in
between each layer.
"""
return nn.Sequential(
nn.Conv2d(
inp_channels, out_channels, kernel_size, stride, padding
),
activation_fn()
)
def forward(self, inp):
"""
Returns the policy output for the input
"""
return self.conv(inp)
|
__package__ = "blackhat.bin"
from ..helpers import Result
from ..lib.input import ArgParser
from ..lib.output import output
from ..lib.unistd import get_sessions, get_user
import datetime
__COMMAND__ = "who"
__DESCRIPTION__ = "show who is logged on"
__DESCRIPTION_LONG__ = "Print information about users who are currently logged in."
__VERSION__ = "1.2"
from ..lib.unistd import read
def parse_args(args=[], doc=False):
"""
Handle parsing of arguments and flags. Generates docs using help from `ArgParser`
Args:
args (list): argv passed to the binary
doc (bool): If the function should generate and return manpage
Returns:
Processed args and a copy of the `ArgParser` object if not `doc` else a `string` containing the generated manpage
"""
parser = ArgParser(prog=__COMMAND__, description=f"{__COMMAND__} - {__DESCRIPTION__}")
parser.add_argument("-b", "--boot", action="store_true", help="time of last system boot")
parser.add_argument("-H", "--heading", action="store_true", help="print line of column headings")
parser.add_argument("-q", "--count", action="store_true", help="all login names and number of users logged on")
parser.add_argument("--version", action="store_true", help=f"print program version")
args = parser.parse_args(args)
arg_helps_with_dups = parser._actions
arg_helps = []
[arg_helps.append(x) for x in arg_helps_with_dups if x not in arg_helps]
NAME = f"**NAME*/\n\t{__COMMAND__} - {__DESCRIPTION__}"
SYNOPSIS = f"**SYNOPSIS*/\n\t{__COMMAND__} [OPTION]... "
DESCRIPTION = f"**DESCRIPTION*/\n\t{__DESCRIPTION_LONG__}\n\n"
for item in arg_helps:
# Its a positional argument
if len(item.option_strings) == 0:
# If the argument is optional:
if item.nargs == "?":
SYNOPSIS += f"[{item.dest.upper()}] "
elif item.nargs == "+":
SYNOPSIS += f"[{item.dest.upper()}]... "
else:
SYNOPSIS += f"{item.dest.upper()} "
else:
# Boolean flag
if item.nargs == 0:
if len(item.option_strings) == 1:
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\t{item.help}\n\n"
else:
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\n\t\t{item.help}\n\n"
elif item.nargs == "+":
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/=[{item.dest.upper()}]...\n\t\t{item.help}\n\n"
else:
DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/={item.dest.upper()}\n\t\t{item.help}\n\n"
if doc:
return f"{NAME}\n\n{SYNOPSIS}\n\n{DESCRIPTION}\n\n"
else:
return args, parser
def main(args: list, pipe: bool) -> Result:
args, parser = parse_args(args)
if parser.error_message:
if not args.version:
return output(f"{__COMMAND__}: {parser.error_message}", pipe, success=False)
# If we specific -h/--help, args will be empty, so exit gracefully
if not args:
return output("", pipe)
else:
if args.version:
return output(f"{__COMMAND__} (blackhat coreutils) {__VERSION__}", pipe)
# TODO: Create special attribute for files in /proc so that they can't be modified
# For now, just trust that they'll exist
read_uptime = read("/proc/uptime")
if not read_uptime.success:
raise Exception
total_seconds = int(float(read_uptime.data))
if args.boot:
return output((datetime.datetime.now() - datetime.timedelta(seconds=total_seconds)).strftime("%Y-%m-%d %H:%M"), pipe)
if args.count:
usernames = []
for session in get_sessions().data:
username_result = get_user(session.real_uid)
if username_result.success:
username = username_result.data.username
else:
username = "?"
if username not in usernames:
usernames.append(username)
return output(f"{' '.join(usernames)}\n# users={len(usernames)}", pipe)
output_text = ""
if args.heading:
output_text += "NAME\tLINE\n"
for session in get_sessions().data:
username_result = get_user(session.real_uid)
if username_result.success:
username = username_result.data.username
else:
username = "?"
output_text += f"{username}\tpts/{session.id}\n"
return output(output_text, pipe)
|
from .chamfer_metric import *
from .compactness_metric import *
from .CustomLoss import *
from .edge_metric import *
from .GlobalLoss import *
from .hausdorff_metric import *
from .HausdorffLoss import *
from .KLDivergenceLoss import *
from .L2_metric import *
from .L21_metric import *
from .Loss import *
from .LossList import *
from .normal_metric import *
from .PixelLoss import *
from .ReconstructionLoss import *
from .so3_metric import *
from .SO3Loss import *
|
""" Use YQL to set industries to share companies through fondout CLI """
""" This is a very slow script. new ways found. do not use."""
import stockretriever
from subprocess import call
import sys
sectors = stockretriever.get_industry_ids()
for sector in sectors:
for industry in sector['industry']:
try:
print "\nProcessing", industry['name'], industry['id']
except TypeError as E:
print E
continue
industry_index = stockretriever.get_industry_index(industry['id'])
try:
industry_name = industry_index['name']
industry_companies = industry_index['company']
industry_id = industry_index['id']
except Exception, e:
print e
continue
for company in industry_companies:
try:
if(call(["php", "/Users/petter/projects/fondout2/public/index.php", "add", "industry-by-symbol", "--symbol=", company['symbol'], "--industry=", industry_name])):
try:
print "\nSuccess adding", company['name'], "(", company['symbol'], ") to", industry_name
except UnicodeEncodeError as e:
print e
else:
sys.stdout.write('.')
sys.stdout.flush()
except OSError as err:
print(err)
except TypeError as err:
print(err)
except:
print "Unknown error, error cought."
continue
|
"""
Author: https://github.com/CharlieZhao95
"""
|
#! /usr/bin/python
import ply.lex as lex
tokens = (
'COMMENT',
'BINARY',
'ATOM',
'STRING',
'INTEGER',
'FLOAT',
'MAP_ASSIGN',
)
literals = '[]{},.#'
t_ignore = ' \t'
def t_COMMENT(t):
r'%.*'
def t_BINARY(t):
r'<<\s*\"[^\"]*\"\s*>>'
start = t.value.find('"')
end = t.value.rfind('"')
t.value = t.value[start + 1:end]
return t
def t_ATOM(t):
r'([a-z][a-zA-Z0-9_]*)|(\'[^\']+\')'
return t
def t_STRING(t):
r'\"[^\"]*\"'
t.value = t.value[1:-1]
return t
def t_FLOAT(t):
r'[\+\-]?[0-9]+\.[0-9]+'
# print 'float %s' % t.value
t.value = float(t.value)
return t
def t_INTEGER(t):
r'[\+\-]?[0-9]+'
# print 'integer %s' % t.value
t.value = int(t.value)
return t
def t_MAP_ASSIGN(t):
r'=>'
return t
def t_newline(t):
r'\r?\n'
t.lexer.lineno += 1
def t_error(t):
print "Illegal character '%s'" % t.value[0]
t.lexer.skip(1)
lexer = lex.lex()
|
import os
import collections
import queue
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple, Union, cast
import numpy as np
from loguru import logger
from typing_extensions import Literal
import carla
from .. import physics, process
from ..utils.carla_image import carla_image_to_np
from .agents.navigation.local_planner import LocalPlannerNew, RoadOption
from .map_utils import Renderer
#: The number of ticks without movement after which we consider the vehicle to be stuck
STUCK_TICKS: int = 90 * 10
#: The distance in meters to the next checkpoint at which we consider the vehicle to be
#: off course.
OFF_COURSE_CHECKPOINT_DISTANCE: float = 15.0
CarlaWeather = Literal[
"Default",
"ClearNoon",
"CloudyNoon",
"WetNoon",
"WetCloudyNoon",
"SoftRainNoon",
"MidRainyNoon",
"HardRainNoon",
"ClearSunset",
"CloudySunset",
"WetSunset",
"WetCloudySunset",
"SoftRainSunset",
"MidRainSunset",
"HardRainSunset",
]
CarlaTown = Literal[
"Town01",
"Town02",
"Town03",
"Town04",
"Town05",
]
CARLA_WEATHERS = CarlaWeather.__args__ # type: ignore
CARLA_TOWNS = CarlaTown.__args__ # type: ignore
@dataclass
class TickState:
location: carla.Location
rotation: carla.Rotation
distance_travelled: float
distance_to_next_checkpoint: float
distance_to_goal: float
distance_travelled_along_route: float
rgb: np.ndarray
high_resolution_rgb: Optional[np.ndarray]
lane_invasion: Optional[carla.LaneInvasionEvent]
collision: Optional[carla.CollisionEvent]
speed: float
velocity: carla.Vector3D
command: RoadOption
birdview: np.ndarray
route_completed: bool
probably_stuck: bool
probably_off_course: bool
class EgoVehicle:
def __init__(self, vehicle: carla.Vehicle):
self.vehicle: carla.Vehicle = vehicle
self._rgb_queues: Dict[carla.Sensor, queue.Queue[np.ndarray]] = {}
self._lane_invasion_queue: queue.Queue[carla.LaneInvasionEvent] = queue.Queue()
self._collision_queue: queue.Queue[carla.CollisionEvent] = queue.Queue()
def apply_control(self, control: carla.VehicleControl) -> None:
self.vehicle.apply_control(control)
def current_speed_and_velocity(self) -> Tuple[float, carla.Vector3D]:
velocity = self.vehicle.get_velocity()
return (np.linalg.norm([velocity.x, velocity.y, velocity.z]), velocity)
def current_location(self) -> carla.Location:
return self.vehicle.get_transform().location
def current_rotation(self) -> carla.Rotation:
return self.vehicle.get_transform().rotation
def latest_rgb(self, camera_handle: carla.Sensor) -> np.ndarray:
"""
Blocks until an image is available for the given camera handle.
:param camera_handle: MUST be a carla `Sensor` object, as returned by
`EgoVehicle.add_rgb_camera`.
"""
rgb = None
while rgb is None or not self._rgb_queues[camera_handle].empty():
rgb = self._rgb_queues[camera_handle].get()
return carla_image_to_np(rgb)
def latest_lane_invasion(self) -> Optional[carla.LaneInvasionEvent]:
event = None
while not self._lane_invasion_queue.empty():
event = self._lane_invasion_queue.get()
return event
def latest_collision(self) -> Optional[carla.CollisionEvent]:
event = None
while not self._collision_queue.empty():
event = self._collision_queue.get()
return event
def add_rgb_camera(
self,
carla_world: carla.World,
image_size_x: int = 384,
image_size_y: int = 160,
effects: bool = True,
) -> carla.Sensor:
"""
:param effects: if false, disables some camera effects
"""
def _create_listener(rgb_camera: carla.Sensor):
def _enqueue_image(image):
logger.trace("Received image: {}", image)
self._rgb_queues[rgb_camera].put(image)
return _enqueue_image
blueprints = carla_world.get_blueprint_library()
rgb_camera_bp = blueprints.find("sensor.camera.rgb")
rgb_camera_bp.set_attribute("image_size_x", f"{image_size_x}")
rgb_camera_bp.set_attribute("image_size_y", f"{image_size_y}")
rgb_camera_bp.set_attribute("fov", "90")
if not effects:
rgb_camera_bp.set_attribute("motion_blur_intensity", "0.0")
rgb_camera = carla_world.spawn_actor(
rgb_camera_bp,
carla.Transform(carla.Location(x=2.0, z=1.4), carla.Rotation(pitch=0)),
attach_to=self.vehicle,
)
assert isinstance(rgb_camera, carla.Sensor)
self._rgb_queues[rgb_camera] = queue.Queue()
rgb_camera.listen(_create_listener(rgb_camera))
return rgb_camera
def add_lane_invasion_detector(self, carla_world: carla.World) -> carla.Sensor:
def _lane_invasion(event: carla.LaneInvasionEvent) -> None:
logger.warning("Lane invasion: {}", event)
self._lane_invasion_queue.put(event)
blueprints = carla_world.get_blueprint_library()
lane_invasion_detector_bp = blueprints.find("sensor.other.lane_invasion")
lane_invasion_detector = carla_world.spawn_actor(
lane_invasion_detector_bp,
carla.Transform(),
attach_to=self.vehicle,
)
assert isinstance(lane_invasion_detector, carla.Sensor)
lane_invasion_detector.listen(_lane_invasion) # type: ignore
return lane_invasion_detector
def add_collision_detector(self, carla_world: carla.World) -> carla.Sensor:
def _collision(event: carla.CollisionEvent) -> None:
logger.warning("Collision: {}", event)
self._collision_queue.put(event)
blueprints = carla_world.get_blueprint_library()
collision_detector_bp = blueprints.find("sensor.other.collision")
collision_detector = carla_world.spawn_actor(
collision_detector_bp,
carla.Transform(),
attach_to=self.vehicle,
)
assert isinstance(collision_detector, carla.Sensor)
collision_detector.listen(_collision) # type: ignore
return collision_detector
def get_vehicle_geometry(self) -> physics.VehicleGeometry:
vehicle_location = self.vehicle.get_location()
vehicle_physics = self.vehicle.get_physics_control()
wheel_front_left = vehicle_physics.wheels[0]
wheel_front_right = vehicle_physics.wheels[1]
wheel_rear_left = vehicle_physics.wheels[2]
wheel_rear_right = vehicle_physics.wheels[3]
vehicle_pos = np.array(
[vehicle_location.x, vehicle_location.y, vehicle_location.z]
)
# The physics positions are world coordinates in centimeters, we convert to
# meters.
wheel_front_left_pos = (
np.array(
[
wheel_front_left.position.x,
wheel_front_left.position.y,
wheel_front_left.position.z,
]
)
/ 100.0
)
wheel_front_right_pos = (
np.array(
[
wheel_front_right.position.x,
wheel_front_right.position.y,
wheel_front_right.position.z,
]
)
/ 100.0
)
wheel_rear_left_pos = (
np.array(
[
wheel_rear_left.position.x,
wheel_rear_left.position.y,
wheel_rear_left.position.z,
]
)
/ 100.0
)
wheel_rear_right_pos = (
np.array(
[
wheel_rear_right.position.x,
wheel_rear_right.position.y,
wheel_rear_right.position.z,
]
)
/ 100.0
)
# Calculate the points determining the longitudinal axis
front = (wheel_front_left_pos + wheel_front_right_pos) / 2.0
rear = (wheel_rear_left_pos + wheel_rear_right_pos) / 2.0
# Calculate offsets from the rear
rear_vehicle_offset = vehicle_pos - rear
rear_front_offset = front - rear
# Project the vehicle world location onto the longitudinal axis
# (the vehicle might be rotated somewhat during spawning)
vehicle_pos_projected_onto_axis = (
rear
+ np.dot(rear_vehicle_offset, rear_front_offset)
/ np.dot(rear_front_offset, rear_front_offset)
* rear_front_offset
)
wheel_base = np.linalg.norm(rear_front_offset)
wheel_track = np.linalg.norm(wheel_front_left_pos - wheel_front_right_pos)
rear_axle_longitudinal_offset = np.linalg.norm(
vehicle_pos_projected_onto_axis - rear
)
max_inner_wheel_angle = np.radians(wheel_front_left.max_steer_angle)
return physics.VehicleGeometry(
wheel_base=float(wheel_base),
wheel_track=float(wheel_track),
max_inner_wheel_angle=float(max_inner_wheel_angle),
rear_axle_longitudinal_offset=rear_axle_longitudinal_offset,
)
class Episode:
def __init__(
self,
carla_world: carla.World,
start_location: carla.Location,
ego_vehicle: EgoVehicle,
rgb_camera: carla.Sensor,
local_planner: LocalPlannerNew,
renderer: Renderer,
high_resolution_rgb_camera: Optional[carla.Sensor] = None,
):
self._carla_world: carla.World = carla_world
self._location: carla.Location = start_location
self._ego_vehicle: EgoVehicle = ego_vehicle
self._rgb_camera: carla.Sensor = rgb_camera
self._high_res_rgb_camera: Optional[carla.Sensor] = high_resolution_rgb_camera
self._local_planner: LocalPlannerNew = local_planner
self._renderer: Renderer = renderer
self._route_completed: bool = False
self._unmoved_ticks: int = 0
self._distance_travelled: float = 0.0
self.route_length: float = local_planner.distance_to_goal
def get_vehicle_geometry(self) -> physics.VehicleGeometry:
return self._ego_vehicle.get_vehicle_geometry()
def apply_control(self, control: carla.VehicleControl):
"""Apply control on the ego vehicle."""
self._ego_vehicle.apply_control(control)
def restore(self):
"""Restore to N seconds ago."""
# TODO
raise NotImplementedError
def tick(self) -> TickState:
self._carla_world.tick()
self._local_planner.run_step()
if self._local_planner.is_done():
self._route_completed = True
assert self._local_planner.checkpoint[0] is not None
checkpoint_location = self._local_planner.checkpoint[0].transform.location
assert self._local_planner.checkpoint[1] is not None
command = self._local_planner.checkpoint[1]
# node = self._local_planner.checkpoint[0].transform.location
# next = self._local_planner.target[0].transform.location
# logger.trace("start {}", self._start_pose.location)
# logger.trace("end {}", self._end_pose.location)
# logger.trace("node {}", node)
# logger.trace("next {}", next)
(speed, velocity) = self._ego_vehicle.current_speed_and_velocity()
location = self._ego_vehicle.current_location()
rotation = self._ego_vehicle.current_rotation()
rgb = self._ego_vehicle.latest_rgb(self._rgb_camera)
high_resolution_rgb = (
None
if self._high_res_rgb_camera is None
else self._ego_vehicle.latest_rgb(self._high_res_rgb_camera)
)
lane_invasion = self._ego_vehicle.latest_lane_invasion()
collision = self._ego_vehicle.latest_collision()
distance_to_next_checkpoint = location.distance(checkpoint_location)
self._distance_travelled += self._location.distance(location)
self._location = location
if speed > 0.0001:
self._unmoved_ticks = 0
else:
self._unmoved_ticks += 1
return TickState(
location=location,
rotation=rotation,
distance_travelled=self._distance_travelled,
distance_to_next_checkpoint=distance_to_next_checkpoint,
distance_to_goal=self._local_planner.distance_to_goal,
distance_travelled_along_route=(
self.route_length - self._local_planner.distance_to_goal
),
rgb=rgb,
high_resolution_rgb=high_resolution_rgb,
lane_invasion=lane_invasion,
collision=collision,
speed=speed,
velocity=velocity,
command=command,
birdview=self.get_birdview(),
route_completed=self._route_completed,
probably_stuck=self._unmoved_ticks > STUCK_TICKS,
probably_off_course=distance_to_next_checkpoint
> OFF_COURSE_CHECKPOINT_DISTANCE,
)
def render_birdview(self):
return self._renderer.get_render()
def get_birdview(self):
self._renderer.update()
self._renderer.render()
result = self._renderer.get_result()
birdview = [
result["road"],
result["lane"],
result["traffic"],
result["vehicle"],
result["pedestrian"],
]
birdview = [x if x.ndim == 3 else x[..., None] for x in birdview]
birdview = np.concatenate(birdview, 2)
return birdview
@dataclass
class ManagedEpisode:
"""
This class is an Episode context manager. It is used to configure and set up CARLA
episodes.
Configure the desired episode by changing the properties of this class. For example,
to change the default town and attach a high resolution camera, run:
```
managed_episode = ManagedEpisode(carla_client)
managed_episode.town = "Town02"
managed_episode.attach_high_resolution_rgb_camera = True
with managed_episode as episode:
while True:
state = episode.tick()
# ...
episode.apply_control(...)
```
"""
town: CarlaTown = "Town01"
weather: CarlaWeather = "Default"
vehicle_name: str = "vehicle.mustang.mustang"
target_route_length: float = 500.0
attach_high_resolution_rgb_camera: bool = False
def __init__(self, carla_client: carla.Client):
self._client = carla_client
self._carla_world: Optional[carla.World] = None
self._traffic_manager: Optional[carla.TrafficManager] = None
self._traffic_manager_port: Optional[int] = None
self._sensors: List[carla.Sensor] = []
self._pedestrian_controllers: List[carla.WalkerAIController] = []
self._actor_dict: Dict[str, List[carla.Actor]] = collections.defaultdict(list)
def _set_up_world_settings(self, world: carla.World):
logger.trace("Set simulation to synchronous mode.")
settings = world.get_settings()
settings.synchronous_mode = True
settings.fixed_delta_seconds = 0.1
world.apply_settings(settings)
def _set_up_traffic_manager(self) -> int:
if "CARLA_TRAFFIC_MANAGER_PORT" in os.environ:
port = int(os.environ["CARLA_TRAFFIC_MANAGER_PORT"])
else:
port = 8000
logger.trace(f"Setting up/connecting to traffic manager on port {port}.")
self._traffic_manager = self._client.get_trafficmanager(port)
self._traffic_manager.set_synchronous_mode(True)
self._traffic_manager.set_hybrid_physics_mode(True)
self._traffic_manager_port = port
return port
def _generate_route(
self, carla_map: carla.Map
) -> Tuple[LocalPlannerNew, carla.Transform, carla.Transform]:
spawn_points = carla_map.get_spawn_points()
while True:
# We generate a hopeful start and end pose, but the end pose might be
# somewhere else (we try to generate a route of length
# `self.target_route_length`).
start_pose = process.rng.choice(spawn_points)
end_pose = process.rng.choice(spawn_points)
local_planner = LocalPlannerNew(carla_map, 2.5, 9.0, 1.5)
local_planner.set_route(
start_pose.location,
end_pose.location,
max_length=self.target_route_length,
)
# ... and we generate a different route if the distance is too short.
if local_planner.distance_to_goal >= self.target_route_length - 2.0:
logger.debug(
f"Generated route length: {local_planner.distance_to_goal:.2f} m."
)
return local_planner, start_pose, end_pose
else:
logger.trace(
"Generated route length too short: "
f"{local_planner.distance_to_goal:.2f} m. Regenerating."
)
def _set_up(self) -> Episode:
logger.trace("Loading world.")
self._carla_world = self._client.load_world(self.town)
traffic_manager_port = self._set_up_traffic_manager()
weather = getattr(carla.WeatherParameters, self.weather)
logger.trace(f"Setting world weather to {weather}.")
self._carla_world.set_weather(weather)
self._set_up_world_settings(self._carla_world)
carla_map = self._carla_world.get_map()
logger.debug("Generating route.")
(local_planner, start_pose, _) = self._generate_route(carla_map)
logger.debug("Spawning ego vehicle.")
ego_vehicle, rgb_camera, high_resolution_rgb_camera = self._spawn_ego_vehicle(
self._carla_world, traffic_manager_port, start_pose
)
local_planner.set_vehicle(ego_vehicle.vehicle)
logger.debug("Spawning vehicles.")
self._spawn_vehicles(
self._carla_world,
carla_map,
100,
[start_pose.location],
)
logger.debug("Spawning pedestrians.")
self._spawn_pedestrians(self._carla_world, 250)
for controller in self._pedestrian_controllers:
controller.start()
controller.go_to_location(
self._carla_world.get_random_location_from_navigation()
)
# Set walking speed between 2 and 5 km/h
controller.set_max_speed(process.rng.uniform(2.0, 5.0) * 1000 / 60 / 60)
renderer = Renderer(
"placeholder",
self._client,
self._carla_world,
carla_map,
ego_vehicle.vehicle,
)
renderer.start()
return Episode(
self._carla_world,
start_pose.location,
ego_vehicle,
rgb_camera,
local_planner,
renderer,
high_resolution_rgb_camera=high_resolution_rgb_camera,
)
def _destroy_actors(self) -> None:
logger.debug("Destroying actors.")
# In addition to being destroyed, sensors and pedestrians must be told to stop.
for sensor in self._sensors:
sensor.stop()
for controller in self._pedestrian_controllers:
controller.stop()
actors = [actor for actors in self._actor_dict.values() for actor in actors]
self._client.apply_batch_sync(
[carla.command.DestroyActor(actor.id) for actor in actors]
)
self._actor_dict = {}
self._sensors = []
self._pedestrian_controllers = []
logger.debug("Destroyed actors.")
def _clean_up(self) -> None:
assert self._carla_world is not None
assert self._traffic_manager is not None
self._destroy_actors()
self._traffic_manager.set_synchronous_mode(False)
settings = self._carla_world.get_settings()
settings.synchronous_mode = False
settings.fixed_delta_seconds = 0.0
self._carla_world.apply_settings(settings)
self._carla_world.wait_for_tick()
logger.info("Cleanup done.")
def __enter__(self) -> Episode:
logger.debug("Entering managed episode context.")
logger.trace("Building episode.")
# TODO start recording
return self._set_up()
def __exit__(self, *args):
logger.debug("Exiting managed episode context.")
self._clean_up()
# Most likely unnecessary deletes, but there have been issues with cleanup
del self._carla_world
del self._traffic_manager
del self._client
def _spawn_vehicles(
self,
carla_world: carla.World,
carla_map: carla.Map,
n_vehicles: int,
disallowed_spawn_points: List[carla.Location] = [],
) -> None:
"""
Spawns `n_vehicles` vehicles.
"""
blueprints = []
for blueprint in carla_world.get_blueprint_library().filter("vehicle.*"):
wheels = blueprint.get_attribute("number_of_wheels")
if wheels is not None and wheels.as_int() == 4:
blueprints.append(blueprint)
spawn_points = carla_map.get_spawn_points()
spawn_points = [
spawn_point
for spawn_point in spawn_points
if spawn_point not in disallowed_spawn_points
]
process.rng.shuffle(spawn_points)
if n_vehicles > len(spawn_points):
logger.warning(
"Requested spawning {} vehicles, but only {} spawn points are available.",
n_vehicles,
len(spawn_points),
)
n_vehicles = len(spawn_points)
spawn_points = spawn_points[:n_vehicles]
batch = []
for spawn_point in spawn_points[:n_vehicles]:
blueprint = process.rng.choice(blueprints)
blueprint.set_attribute("role_name", "autopilot")
if blueprint.has_attribute("color"):
color_attribute = blueprint.get_attribute("color")
assert color_attribute is not None
color = process.rng.choice(color_attribute.recommended_values)
blueprint.set_attribute("color", color)
if blueprint.has_attribute("driver_id"):
driver_id_attribute = blueprint.get_attribute("driver_id")
assert driver_id_attribute is not None
driver_id = process.rng.choice(driver_id_attribute.recommended_values)
blueprint.set_attribute("driver_id", driver_id)
batch.append(carla.command.SpawnActor(blueprint, spawn_point))
spawned = []
for result in self._client.apply_batch_sync(batch, True):
if result.error:
logger.warning(result.error)
else:
spawned.append(result.actor_id)
vehicles = carla_world.get_actors(spawned)
for vehicle in vehicles:
assert isinstance(vehicle, carla.Vehicle)
assert self._traffic_manager_port is not None
vehicle.set_autopilot(True, self._traffic_manager_port)
self._actor_dict["vehicle"] = list(vehicles)
logger.debug("Spawned {} vehicles.", len(spawned))
def _spawn_pedestrians(self, carla_world: carla.World, n_pedestrians: int) -> None:
"""
Spawns `n_pedestrians` pedestrians. Sometimes spawning a pedestrian fails due to
a collision at the spawn location. This method keeps retrying in a loop until
exactly `n_pedestrians` have been spawned.
"""
walker_blueprints = carla_world.get_blueprint_library().filter(
"walker.pedestrian.*"
)
controller_blueprint = carla_world.get_blueprint_library().find(
"controller.ai.walker"
)
walkers: List[int] = []
controllers: List[int] = []
spawn_collisions = 0
while len(walkers) < n_pedestrians:
walker_batch = []
for _ in range(n_pedestrians - len(walkers)):
location = carla_world.get_random_location_from_navigation()
spawn_point = carla.Transform(location=location)
walker_bp = process.rng.choice(walker_blueprints)
if walker_bp.has_attribute("is_invincible"):
walker_bp.set_attribute("is_invincible", "false")
walker_batch.append(carla.command.SpawnActor(walker_bp, spawn_point))
for result in self._client.apply_batch_sync(walker_batch, True):
if result.error:
if "collision at spawn position" in result.error:
spawn_collisions += 1
else:
logger.error(result.error)
else:
walkers.append(result.actor_id)
controller_batch = [
carla.command.SpawnActor(controller_blueprint, carla.Transform(), walker)
for walker in walkers
]
for result in self._client.apply_batch_sync(controller_batch, True):
if result.error:
logger.error(result.error)
else:
controllers.append(result.actor_id)
logger.debug(
f"Spawned {len(controllers)} pedestrians, after accounting for "
f"{spawn_collisions} spawn collisions."
)
self._actor_dict["pedestrian"] = list(carla_world.get_actors(walkers))
self._actor_dict["pedestrian_controller"] = list(
carla_world.get_actors(controllers)
)
self._pedestrian_controllers = cast(
List[carla.WalkerAIController], self._actor_dict["pedestrian_controller"]
)
def _spawn_ego_vehicle(
self,
carla_world: carla.World,
traffic_manager_port: int,
start_pose: carla.Transform,
) -> Tuple[EgoVehicle, carla.Sensor, Optional[carla.Sensor]]:
"""
Returns a 3-tuple of the created vehicle, an attached RGB camera, and
(optionally) an attached high-resolution RGB camera.
"""
blueprints = carla_world.get_blueprint_library()
blueprint = process.rng.choice(blueprints.filter(self.vehicle_name))
blueprint.set_attribute("role_name", "hero")
if blueprint.has_attribute("color"):
color = process.rng.choice(
blueprint.get_attribute("color").recommended_values
)
blueprint.set_attribute("color", color)
if blueprint.has_attribute("driver_id"):
driver_id = process.rng.choice(
blueprint.get_attribute("driver_id").recommended_values
)
blueprint.set_attribute("driver_id", driver_id)
if blueprint.has_attribute("is_invincible"):
blueprint.set_attribute("is_invincible", "true")
player = carla_world.spawn_actor(blueprint, start_pose)
assert isinstance(player, carla.Vehicle)
player.set_autopilot(False, traffic_manager_port)
self._actor_dict["player"].append(player)
ego_vehicle = EgoVehicle(player)
rgb_camera = ego_vehicle.add_rgb_camera(carla_world)
self._actor_dict["sensor"].append(rgb_camera)
self._sensors.append(rgb_camera)
high_resolution_rgb_camera: Optional[carla.Sensor] = None
if self.attach_high_resolution_rgb_camera:
high_resolution_rgb_camera = ego_vehicle.add_rgb_camera(
carla_world, image_size_x=1920, image_size_y=800, effects=False
)
self._actor_dict["sensor"].append(high_resolution_rgb_camera)
self._sensors.append(high_resolution_rgb_camera)
lane_invasion_detector = ego_vehicle.add_lane_invasion_detector(carla_world)
self._actor_dict["sensor"].append(lane_invasion_detector)
self._sensors.append(lane_invasion_detector)
collision_detector = ego_vehicle.add_collision_detector(carla_world)
self._actor_dict["sensor"].append(collision_detector)
self._sensors.append(collision_detector)
return ego_vehicle, rgb_camera, high_resolution_rgb_camera
def connect(
carla_host: str = "localhost", carla_world_port: int = 2000
) -> ManagedEpisode:
logger.trace(f"Connecting to Carla simulator at {carla_host}:{carla_world_port}.")
client = carla.Client(carla_host, carla_world_port)
client.set_timeout(30.0)
client_version = client.get_client_version()
server_version = client.get_server_version()
logger.info(f"Carla client version: {client_version}.")
logger.info(f"Carla server version: {server_version}.")
return ManagedEpisode(client)
|
# Generated by Django 4.0 on 2021-12-29 20:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0011_alter_address_options_alter_customuser_options_and_more'),
]
operations = [
migrations.CreateModel(
name='CustomerProfile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_pic', models.ImageField(blank=True, null=True, upload_to='images/profile_pictures', verbose_name='عکس پروفایل')),
('slug', models.SlugField(allow_unicode=True, blank=True, null=True, verbose_name='اسلاگ')),
('address', models.ManyToManyField(blank=True, related_name='user', to='accounts.Address', verbose_name='آدرس')),
],
options={
'verbose_name': 'پروفایل',
'verbose_name_plural': 'پروفایل\u200c',
},
),
migrations.CreateModel(
name='SellerDashboard',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Customer',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('accounts.customuser',),
),
migrations.CreateModel(
name='Seller',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('accounts.customuser',),
),
migrations.DeleteModel(
name='Profile',
),
migrations.AddField(
model_name='customerprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to='accounts.customer', verbose_name='کاربر'),
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import argparse
def format_label(label):
return label.lower().replace(' ', '_')
def format_value(value):
return int(value.replace(',', ''))
def parse_hotel(soup):
chart = soup.find('div', id='ratingFilter')
results = {}
for li in chart.ul.find_all('li'):
label = format_label(li.find('div').text)
rating = format_value(li.find_all('span')[-2].text)
results[label] = rating
return results
def parse_attraction(soup):
chart = soup.find('div', class_='visitorRating')
results = {}
for li in chart.ul.find_all('li'):
label = format_label(li.find('div', class_='label').text)
rating = format_value(li.find('div', class_='valueCount').text)
results[label] = rating
return results
PARSE_FUNCTIONS = {
'hotel': parse_hotel,
'attraction': parse_attraction,
}
def fetch_reviews(url, kind):
r = requests.get(url)
r.raise_for_status()
html_text = r.text
soup = BeautifulSoup(html_text, 'html.parser')
return PARSE_FUNCTIONS[kind](soup)
def format_results(results):
order = ['excellent', 'very_good', 'average', 'poor', 'terrible']
keys = ['{name}:{value}'.format(name=name, value=results[name])
for name in order]
return ' '.join(keys)
if __name__ == '__main__': # pragma: no cover
parser = argparse.ArgumentParser()
parser.add_argument('url')
parser.add_argument('-k', '--kind', required=True,
choices=['hotel', 'attraction'])
args = parser.parse_args()
results = fetch_reviews(args.url, kind=args.kind)
print(format_results(results))
|
"""
pee.py
====================================
Parameter error estimator functionality.
"""
import numpy as np
from scipy.optimize import minimize
from numdifftools import Jacobian
def parameter_error_estimator(
fun,
x_data,
y_data,
x_err,
y_err,
w_0,
iter_num=None,
rtol=None,
atol=None,
method=None,
jac=None,
hess=None,
hessp=None,
bounds=None,
constraints=(),
tol=None,
callback=None,
options=None,
):
"""This function takes input data and input error and than samples in each iteration data from given normal
distribution. Input function fun will be used in the definition of cost function for least squares. Cost functions
is optimized using scipy.optimize.minimize function. Argument iter_num or rtol, atol has to be set in order to
run least squares.
Args:
fun ([type]): [description]
x_data ([type]): [description]
y_data ([type]): [description]
x_err ([type]): [description]
y_err ([type]): [description]
w_0 ([type]): [description]
iter_num (int, optional): [description]. Defaults to 1000.
method ([type], optional): [description]. Defaults to None.
jac ([type], optional): [description]. Defaults to None.
hess ([type], optional): [description]. Defaults to None.
hessp ([type], optional): [description]. Defaults to None.
bounds ([type], optional): [description]. Defaults to None.
constraints (tuple, optional): [description]. Defaults to ().
tol ([type], optional): [description]. Defaults to None.
callback ([type], optional): [description]. Defaults to None.
options ([type], optional): [description]. Defaults to None.
"""
if iter_num is None and rtol is None and atol is None:
raise TypeError("Argument iter_num or arguments rtol, atol have to be set.")
if iter_num is None and (rtol is None or atol is None):
raise TypeError("Both arguments rtol, atol have to be set.")
def cost_fun(params, x, y):
y_gen = np.array([fun(x_i, params) for x_i in x])
return np.linalg.norm(y_gen - y)
if method == "Newton-CG":
def cost_fun_jac(params, x, y):
return Jacobian(lambda p: cost_fun(p, x, y))(params).ravel()
else:
cost_fun_jac = None
# print(cost_fun(w_0, x_data, y_data))
result = minimize(
cost_fun,
w_0,
args=(x_data, y_data),
method=method,
jac=cost_fun_jac,
hess=hess,
hessp=hessp,
bounds=bounds,
constraints=constraints,
tol=tol,
callback=callback,
options=options,
)
if iter_num is not None:
params_agg = np.zeros((iter_num, result.x.shape[0]), dtype=result.x.dtype)
n, p_mean, M2 = 0, np.zeros_like(w_0), np.zeros_like(w_0)
while n < iter_num:
x_data_loc = x_data + np.random.normal(loc=0.0, scale=x_err)
y_data_loc = y_data + np.random.normal(loc=0.0, scale=y_err)
result = minimize(
cost_fun,
w_0,
args=(x_data_loc, y_data_loc),
method=method,
jac=cost_fun_jac,
hess=hess,
hessp=hessp,
bounds=bounds,
constraints=constraints,
tol=tol,
callback=callback,
options=options,
)
if result.success:
# params_agg[n] = result.x
n += 1
delta = result.x - p_mean
p_mean = p_mean + delta / n
M2 = M2 + np.multiply(delta, result.x - p_mean)
else:
params_agg = []
p_mean, p_std = parameter_error_estimator(
fun,
x_data,
y_data,
x_err,
y_err,
w_0,
iter_num=3,
rtol=None,
atol=None,
method=method,
jac=cost_fun_jac,
hess=hess,
hessp=hessp,
bounds=bounds,
constraints=constraints,
tol=tol,
callback=callback,
options=options,
)
p_mean_prev, p_std_prev = 2 * p_mean, 2 * p_std
variance = np.zeros_like(p_std)
n, M2 = 3, np.zeros_like(w_0)
while not (
np.allclose(p_mean, p_mean_prev, rtol=rtol, atol=atol)
and np.allclose(p_std, p_std_prev, rtol=rtol, atol=atol)
):
x_data_loc = x_data + np.random.normal(loc=0.0, scale=x_err)
y_data_loc = y_data + np.random.normal(loc=0.0, scale=y_err)
result = minimize(
cost_fun,
w_0,
args=(x_data_loc, y_data_loc),
method=method,
jac=cost_fun_jac,
hess=hess,
hessp=hessp,
bounds=bounds,
constraints=constraints,
tol=tol,
callback=callback,
options=options,
)
if result.success:
# params_agg.append(result.x)
n += 1
p_mean_prev, p_std_prev = p_mean, p_std
delta = result.x - p_mean
p_mean = p_mean + delta / n
M2 = M2 + np.multiply(delta, result.x - p_mean)
variance = M2 / (n - 1)
p_std = np.sqrt(variance)
variance = M2 / (n - 1)
return p_mean, np.sqrt(variance)
|
__author__ = 'yetone'
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.dirname(os.path.abspath(__file__)))) # noqa
import pytest
import unittest
from script_manager import Manager
from script_manager.compat import PY2
def run(command_line, manager_run):
sys.argv = command_line.split()
exit_code = None
try:
manager_run()
except SystemExit as e:
exit_code = e.code
return exit_code
class Test(unittest.TestCase):
@pytest.fixture(autouse=True)
def inject_fixtures(self, capsys):
self.capsys = capsys
def test_simple_command_decorator(self):
manager = Manager()
@manager.command
def hello_world():
print('hello world')
self.assertIn('hello-world', manager._command_map)
run('manage.py hello_world', manager.run)
out, err = self.capsys.readouterr()
self.assertIn('hello world', out)
run('manage.py hello-world', manager.run)
out, err = self.capsys.readouterr()
self.assertIn('hello world', out)
def test_nested_command(self):
test_manager = Manager()
test0_manager = Manager()
main_manager = Manager()
@test_manager.command
def hello():
print('test.hello')
@test_manager.command
def hi(a, b, c=False, name='yetone'):
print('test.hi: <a: {}, b: {}, c: {}, name: {}>'.format(a, b, c, name))
@test0_manager.command
def say():
print('test0.say')
@test0_manager.command
def sing(a, b, c=False, name='yetone'):
print('test0.sing: <a: {}, b: {}, c: {}, name: {}>'.format(a, b, c, name))
main_manager.add_command('test', test_manager)
main_manager.add_command('test0', test0_manager)
self.assertIn('hello', test_manager._command_map)
self.assertIn('hi', test_manager._command_map)
self.assertIn('say', test0_manager._command_map)
self.assertIn('sing', test0_manager._command_map)
run('manage.py test hello', main_manager.run)
out, err = self.capsys.readouterr()
self.assertIn('test.hello', out)
run('manage.py test hellos', main_manager.run)
out, err = self.capsys.readouterr()
self.assertIn('usage', out)
run('manage.py test -h', main_manager.run)
out, err = self.capsys.readouterr()
self.assertIn('usage', out)
run('manage.py test hi', main_manager.run)
out, err = self.capsys.readouterr()
if PY2:
self.assertIn('too few arguments', err)
else:
self.assertIn('the following arguments are required', err)
run('manage.py test hi -h', main_manager.run)
out, err = self.capsys.readouterr()
self.assertIn('usage', out)
run('manage.py test hi -n foo 1 2 -c', main_manager.run)
out, err = self.capsys.readouterr()
self.assertIn('test.hi: <a: 1, b: 2, c: True, name: foo>', out)
run('manage.py test hi -n foo 1 2', main_manager.run)
out, err = self.capsys.readouterr()
self.assertIn('test.hi: <a: 1, b: 2, c: False, name: foo>', out)
run('manage.py test0 say', main_manager.run)
out, err = self.capsys.readouterr()
self.assertIn('test0.say', out)
run('manage.py test0 hello', main_manager.run)
out, err = self.capsys.readouterr()
self.assertIn('usage', out)
run('manage.py test0 -h', main_manager.run)
out, err = self.capsys.readouterr()
self.assertIn('usage', out)
run('manage.py test0 sing -h', main_manager.run)
out, err = self.capsys.readouterr()
self.assertIn('usage', out)
run('manage.py test0 sing -n foo 1 2 -c', main_manager.run)
out, err = self.capsys.readouterr()
self.assertIn('test0.sing: <a: 1, b: 2, c: True, name: foo>', out)
run('manage.py test0 sing -n foo 1 2', main_manager.run)
out, err = self.capsys.readouterr()
self.assertIn('test0.sing: <a: 1, b: 2, c: False, name: foo>', out)
def test_description(self):
description = 'Test my description'
manager = Manager(description=description)
@manager.command
def hello():
print('hello')
run('manage.py', manager.run)
out, err = self.capsys.readouterr()
self.assertIn(description, out)
run('manage.py -h', manager.run)
out, err = self.capsys.readouterr()
self.assertIn(description, out)
@manager.command
def wow(a, b, c=1):
'''I'm wow
:param a: I'm a
:param b: I'm b
:param c: I'm c
'''
print('wow')
run('manage.py -h', manager.run)
out, err = self.capsys.readouterr()
self.assertIn("I'm wow", out)
self.assertNotIn("I'm a", out)
run('manage.py wow -h', manager.run)
out, err = self.capsys.readouterr()
self.assertIn("I'm wow", out)
self.assertIn("I'm a", out)
self.assertIn("I'm b", out)
self.assertIn("I'm c", out)
def test_conflict_flag(self):
manager = Manager()
@manager.command
def hello(host='127.0.0.1', boy=1, bird=2):
print('hello')
run('manage.py hello -h', manager.run)
out, err = self.capsys.readouterr()
self.assertIn('-H HOST, --host HOST', out)
self.assertIn('-b BOY, --boy BOY', out)
self.assertIn('--bird BIRD', out)
if not PY2:
from py3_tests import * # noqa
|
"""A decorator to add a method to an existing class."""
def monkeypatch(cls: type, name: str=None):
"""Decorator. Applied to a function, sets it as a method in a class.
This can be used above a property, too. Example::
@monkeypatch(MyClass)
def some_method(self):
pass
"""
def _monkeypatch(fn):
nam = name or (
fn.fget.__name__ if isinstance(fn, property) else fn.__name__)
setattr(cls, nam, fn)
return _monkeypatch
|
#usage: python rgbhist.py ./pics/auto_seg/ ./hists/ True
import sys
from skimage import io
import matplotlib.pyplot as plt
import os
import argparse
def hist(in_path, out_path, batch=False):
if batch is False:
image = io.imread(path)
fig = plt.figure(figsize = [10, 5])
plt.subplot(1, 2, 1)
plt.imshow(image)
plt.title(path.rpartition('/')[-1][:-4])
plt.subplot(1, 2, 2)
plt.hist(image.ravel(), bins = 256, color = 'orange', )
plt.hist(image[:, :, 0].ravel(), bins = 256, color = 'red', alpha = 0.5)
plt.hist(image[:, :, 1].ravel(), bins = 256, color = 'Green', alpha = 0.5)
plt.hist(image[:, :, 2].ravel(), bins = 256, color = 'Blue', alpha = 0.5)
plt.xlabel('Intensity Value')
plt.ylabel('Count')
plt.legend(['Total', 'Red_Channel', 'Green_Channel', 'Blue_Channel'])
return fig
else:
with os.scandir(in_path) as entries:
for entry in entries:
if entry.is_file():
image = io.imread(f'{in_path}{entry.name}')
plt.figure(figsize = [10, 5])
plt.subplot(1, 2, 1)
plt.imshow(image)
plt.title(entry.name)
plt.subplot(1, 2, 2)
plt.hist(image.ravel(), bins = 256, color = 'orange', )
plt.hist(image[:, :, 0].ravel(), bins = 256, color = 'red', alpha = 0.5)
plt.hist(image[:, :, 1].ravel(), bins = 256, color = 'Green', alpha = 0.5)
plt.hist(image[:, :, 2].ravel(), bins = 256, color = 'Blue', alpha = 0.5)
plt.xlabel('Intensity Value')
plt.ylabel('Count')
plt.legend(['Total', 'Red_Channel', 'Green_Channel', 'Blue_Channel'])
plt.savefig(f'{out_path}{entry.name}')
plt.close()
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process Images')
parser.add_argument('in_path', default='./pics/auto_seg/', help='Path to input directory')
parser.add_argument('out_path', default='./hists/', help='Path to output directory')
parser.add_argument('batch', type = bool, default=True, help='Whether process in batch or not')
args = parser.parse_args()
hist(args.in_path, args.out_path, args.batch)
|
import time
while True:
print("I am working!")
time.sleep(2)
|
#import dash
import dash_core_components as dcc
import dash_html_components as html
#from dash.dependencies import Input, Output, State
#import dash_table
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
from plotly.colors import n_colors
#import warnings
#import sys
#import re
import numpy as np
#from scipy import stats
#import statsmodels.api as sm
#import urllib
#from urllib.request import urlopen
#import json
#import base64
#import io
#import json
#import ast
#import time
#import numpy.polynomial.polynomial as poly
#from statsmodels.regression.linear_model import OLS
#from statsmodels.tools import add_constant
def description_card1():
"""
:return: A Div containing dashboard title & descriptions.
"""
return html.Div(
id="description-card1",
children=[
html.H3("Predicting crimes in Chicago", style={'textAlign': 'left', 'color': '#3385ff'}),
html.P("This tool uses over 7.2 million crimes committed from 2001 to present. " +
" It allows the user to build profiles for specific crimes. It then displays " +
" the chance of that crime profile happening across Chicago. " +
" The app tests its own accuracy by predicting where crimes" +
" fitting the profile have occurred in 2020 and 2021.",
style={
'textAlign': 'left',
}),
html.P("This tool crunches a lot of data to find its predictions. Updating takes a few seconds.",
style={
'textAlign': 'left',
}),
],
)
def description_card12():
"""
:return: A Div containing dashboard title & descriptions.
"""
return html.Div(
id="description-card12",
children=[
html.H3("Examining crimes in Chicago throughout 2020 and 2021",
style={'textAlign': 'left', 'color': '#3385ff'}),
html.P("This tool allows users to examine crimes committed in Chicago from " +
"Jan 1st 2020 to present.",
style={
'textAlign': 'left',
}),
html.P("This tool crunches a lot of data. Updating takes a few seconds.",
style={
'textAlign': 'left',
}),
],
)
def description_card2():
"""
:return: A Div containing dashboard title & descriptions.
"""
return html.Div(
id="description-card2",
children=[
html.H5("Comparisons between Loshiko, City of Chicago, and Neighborhood Scout", style={
'textAlign': 'left',
}),
html.H6('Our measure of app quality', style={'textAlign': 'left', 'color': '#3385ff'}),
dcc.Markdown('''
We award a point for whether an app satisfies each of 10 criteria.
These criteria do not include features that all apps have.
For example, if each app provides a map, then providing a map
is not a useful criterion for comparison. An app's score can range
between 0 and 10.
'''),
#html.Br(),
],
)
def comparison_table():
cr = ['The app uses data from the current year',
'The app actually maps crimes',
'The app allows users to filter crime data',
'The app provides useful statistics',
'The app makes predictions',
'The map includes useful hover-over information',
'The app provides trends across time',
'The app allows users to download data',
'The app is easy to use, not complicated or cumbersome',
'The app provides text summaries of data insights']
#colors = n_colors('rgb(255, 200, 200)', 'rgb(200, 0, 0)', 2, colortype='rgb')
lo = ['<b>Yes</b>', '<b>Yes</b>', '<b>Yes</b>', '<b>Yes</b>', '<b>Yes</b>',
'<b>Yes</b>', '<b>No</b>', '<b>No</b>', '<b>Yes</b>', '<b>Yes</b>']
lo_clrs = []
for l in lo:
if l == '<b>Yes</b>':
lo_clrs.append('#9ae59a')
else:
lo_clrs.append('#ffb3b3')
ch = ['<b>Yes</b>', '<b>Yes</b>', '<b>Yes</b>', '<b>No</b>', '<b>No</b>',
'<b>No</b>', '<b>No</b>', '<b>Yes</b>', '<b>No</b>', '<b>No</b>']
ch_clrs = []
for l in ch:
if l == '<b>Yes</b>':
ch_clrs.append('#9ae59a')
else:
ch_clrs.append('#ffb3b3')
ns = ['<b>No</b>', '<b>No</b>', '<b>No</b>', '<b>Yes</b>', '<b>Yes</b>',
'<b>No</b>', '<b>No</b>', '<b>No</b>', '<b>No</b>', '<b>Yes</b>']
ns_clrs = []
for l in ns:
if l == '<b>Yes</b>':
ns_clrs.append('#9ae59a')
else:
ns_clrs.append('#ffb3b3')
figure = go.Figure(data=[go.Table(
columnwidth = [80,40,40,40],
header=dict(
values=['<b>Criterion</b>',
'<b>Loshiko (Score = 8)</b>',
'<b>City of Chicago (Score = 4)</b>',
'<b>Neighborhood Scout (Score = 3)</b>'],
#line_color='black',
#fill_color='white',
align='center',
font = dict(color='black', size=14)
),
cells=dict(
values=[cr, lo, ch, ns],
#line_color='black',
fill_color=[['#f2f2f2']*10,
lo_clrs, ch_clrs, ns_clrs],
align=['left', 'center', 'center', 'center'],
font=dict(color=['black', 'white', 'white', 'white'], size=14),
height=45,
))
])
figure.update_layout(
#coloraxis_showscale=False,
#autosize=True,
#showlegend=False,
#hovermode='closest',
#mapbox_style="light",
height=502,
margin={"r":0,"t":0,"l":0,"b":0},
)
return figure
def replace_fields(crimes_df):
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['AIRCRAFT',
'AIRPORT BUILDING NON-TERMINAL - NON-SECURE AREA',
'AIRPORT BUILDING NON-TERMINAL - SECURE AREA',
'AIRPORT EXTERIOR - NON-SECURE AREA',
'AIRPORT EXTERIOR - SECURE AREA',
'AIRPORT PARKING LOT',
'AIRPORT TERMINAL LOWER LEVEL - NON-SECURE AREA',
'AIRPORT TERMINAL LOWER LEVEL - SECURE AREA',
'AIRPORT TERMINAL MEZZANINE - NON-SECURE AREA',
'AIRPORT TERMINAL UPPER LEVEL - NON-SECURE AREA',
'AIRPORT TERMINAL UPPER LEVEL - SECURE AREA',
'AIRPORT TRANSPORTATION SYSTEM (ATS)',
'AIRPORT VENDING ESTABLISHMENT',
], 'AIRPORT/AIRCRAFT')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['CHA APARTMENT',
'CHA BREEZEWAY',
'CHA ELEVATOR',
'CHA GROUNDS',
'CHA HALLWAY',
'CHA HALLWAY / STAIRWELL / ELEVATOR',
'CHA HALLWAY/STAIRWELL/ELEVATOR',
'CHA LOBBY',
'CHA PARKING LOT',
'CHA PARKING LOT / GROUNDS',
'CHA PARKING LOT/GROUNDS',
'CHA PLAY LOT',
'CHA STAIRWELL',
], 'CHA Property')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['CHURCH',
'CHURCH / SYNAGOGUE / PLACE OF WORSHIP',
'CHURCH PROPERTY',
'CHURCH/SYNAGOGUE/PLACE OF WORSHIP',
], 'PLACE OF WORSHIP')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['SCHOOL - PRIVATE BUILDING',
'SCHOOL - PRIVATE GROUNDS',
'SCHOOL - PUBLIC BUILDING',
'SCHOOL - PUBLIC GROUNDS',
'SCHOOL YARD',
'SCHOOL, PRIVATE, BUILDING',
'SCHOOL, PRIVATE, GROUNDS',
'SCHOOL, PUBLIC, BUILDING',
'SCHOOL, PUBLIC, GROUNDS',
'PUBLIC GRAMMAR SCHOOL',
'PUBLIC HIGH SCHOOL',
], 'SCHOOL')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['VACANT LOT',
'VACANT LOT / LAND',
'VACANT LOT/LAND'], 'VACANT LOT')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['RESIDENCE',
'APARTMENT',
'RESIDENCE - GARAGE',
'RESIDENCE - PORCH / HALLWAY',
'RESIDENCE - YARD (FRONT / BACK)',
'RESIDENCE PORCH/HALLWAY',
'RESIDENCE-GARAGE',
'RESIDENTIAL YARD (FRONT/BACK)',
'YARD', 'PORCH', 'APARTMENT',
'ROOMING HOUSE',
'DRIVEWAY',
'GARAGE',
'HOUSE',
'BASEMENT',
'DRIVEWAY - RESIDENTIAL'], 'RESIDENTIAL')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['HOTEL',
'HOTEL / MOTEL',
'MOTEL',
], 'HOTEL/MOTEL')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['LAKEFRONT / WATERFRONT / RIVERBANK',
], 'LAKEFRONT/WATERFRONT/RIVERBANK')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['HOSPITAL',
'HOSPITAL BUILDING/GROUNDS',
'HOSPITAL BUILDING / GROUNDS',
], 'HOSPITAL')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['MEDICAL / DENTAL OFFICE',
], 'MEDICAL/DENTAL OFFICE')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['GAS STATION DRIVE/PROP.',
], 'GAS STATION')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['FACTORY',
'FACTORY / MANUFACTURING BUILDING',
'FEDERAL BUILDING',
], 'FACTORY/MANUFACTURING BUILDING')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['GOVERNMENT BUILDING',
'GOVERNMENT BUILDING / PROPERTY',
], 'GOVERNMENT BUILDING/PROPERTY')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['CTA "L" PLATFORM',
'CTA "L" TRAIN',
'CTA BUS',
'CTA BUS STOP',
'CTA GARAGE / OTHER PROPERTY',
'CTA PARKING LOT / GARAGE / OTHER PROPERTY',
'CTA PLATFORM',
'CTA PROPERTY',
'CTA STATION',
'CTA SUBWAY STATION',
'CTA TRACKS - RIGHT OF WAY',
'CTA TRAIN',
], 'CTA PROPERTY')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['COLLEGE / UNIVERSITY - GROUNDS',
'COLLEGE / UNIVERSITY - RESIDENCE HALL',
'COLLEGE/UNIVERSITY GROUNDS',
'COLLEGE/UNIVERSITY RESIDENCE HALL',
], 'COLLEGE/UNIVERSITY')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['APPLIANCE STORE',
'ATHLETIC CLUB',
'AUTO',
'BAR OR TAVERN',
'BARBERSHOP',
'BARBER SHOP/BEAUTY SALON',
'BOWLING ALLEY',
'CAR WASH',
'CLEANING STORE',
'CLUB',
'COMMERCIAL / BUSINESS OFFICE',
'DEPARTMENT STORE',
'DRUG STORE',
'GROCERY FOOD STORE',
'MOVIE HOUSE/THEATER',
'MOVIE HOUSE / THEATER',
'NEWSSTAND',
'POOL ROOM',
'POOLROOM',
'RESTAURANT',
'RETAIL STORE',
'SMALL RETAIL STORE',
'TAVERN',
'TAVERN/LIQUOR STORE',
'YMCA',
'TAVERN / LIQUOR STORE',
'PAWN SHOP',
'AUTO / BOAT / RV DEALERSHIP',
'CONVENIENCE STORE',
'BANQUET HALL',
'FUNERAL PARLOR',
'LAUNDRY ROOM',
'LIQUOR STORE',
'LIVERY AUTO',
'LIVERY STAND OFFICE',
'LOADING DOCK',
'OFFICE',
'WAREHOUSE',
], 'BUSINESS')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['HIGHWAY / EXPRESSWAY',
'EXPRESSWAY EMBANKMENT',
], 'HIGHWAY/EXPRESSWAY')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['DELIVERY TRUCK', 'VEHICLE NON-COMMERCIAL',
'VEHICLE-COMMERCIAL', 'VEHICLE - COMMERCIAL',
'VEHICLE - COMMERCIAL: ENTERTAINMENT / PARTY BUS',
'VEHICLE - COMMERCIAL: TROLLEY BUS',
'VEHICLE - DELIVERY TRUCK',
'VEHICLE - OTHER RIDE SERVICE',
'VEHICLE - OTHER RIDE SHARE SERVICE (E.G., UBER, LYFT)',
'VEHICLE - OTHER RIDE SHARE SERVICE (LYFT, UBER, ETC.)',
'VEHICLE NON-COMMERCIAL',
'VEHICLE-COMMERCIAL',
'VEHICLE-COMMERCIAL - ENTERTAINMENT/PARTY BUS',
'VEHICLE-COMMERCIAL - TROLLEY BUS',
'TAXI CAB',
'TAXICAB',
'TRUCK',
'AUTO',
], 'VEHICLE')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['BOAT / WATERCRAFT',
], 'BOAT/WATERCRAFT')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['BANK',
'CREDIT UNION',
'CURRENCY EXCHANGE',
'SAVINGS AND LOAN',
], 'FINANCIAL INST.')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['SPORTS ARENA / STADIUM',
], 'SPORTS ARENA/STADIUM')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['POLICE FACILITY / VEHICLE PARKING LOT',
'POLICE FACILITY/VEH PARKING LOT',
], 'POLICE FACILITY')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['PARKING LOT',
'PARKING LOT / GARAGE (NON RESIDENTIAL)',
'PARKING LOT/GARAGE(NON.RESID.)',
], 'PARKING LOT')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['OTHER RAILROAD PROP / TRAIN DEPOT',
'OTHER RAILROAD PROPERTY / TRAIN DEPOT',
'RAILROAD PROPERTY',
], 'RAILROAD PROPERTY')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['OTHER (SPECIFY)',
'VESTIBULE',
'OTHER COMMERCIAL TRANSPORTATION',
], 'OTHER')
crimes_df['Location Description'] = crimes_df['Location Description'].replace(
['NURSING / RETIREMENT HOME',
'NURSING HOME',
'NURSING HOME/RETIREMENT HOME',
], 'NURSING HOME')
return crimes_df
|
__all__ = ('AsyncLifoQueue', 'AsyncQueue',)
from collections import deque
from ...utils import copy_docs, copy_func, ignore_frame, to_coroutine
from ..exceptions import CancelledError
from .future import Future
ignore_frame(__spec__.origin, 'result_no_wait', 'raise exception',)
ignore_frame(__spec__.origin, '__aexit__', 'raise exception',)
class AsyncQueue:
"""
An asynchronous FIFO queue.
``AsyncQueue`` is async iterable, so if you iterate over it inside of an `async for` loop do
`.set_exception(CancelledError())` to stop it without any specific exception.
Attributes
----------
_exception : `None`, `BaseException`
The exception set as the queue's result to raise, when the queue gets empty.
_loop : ``EventThread``
The loop to what the queue is bound to.
_results : `deque`
The results of the queue, which can be retrieved by ``.result``, ``.result_no_wait``, or by awaiting it.
_set_result_waiters : `None`, `list` of ``Future``
Result setter waiters for the queue to become empty.
_waiter : `None`, ``Future``
If the queue is empty and it's result is already waited, then this future is set. It's result is set, by the
first ``.set_result``, ``.set_exception`` call.
"""
__slots__ = ('_exception', '_loop', '_results', '_set_result_waiters', '_waiter',)
def __new__(cls, loop, iterable=None, max_length=None, exception=None):
"""
Creates a new ``AsyncQueue`` with the given parameter.
Parameters
----------
loop : ``EventThread``
The loop to what the created queue will be bound to.
iterable : `None`, `iterable` = `None`, Optional
A preset iterable to extend the queue with.
max_length : `None`, `int` = `None`, Optional
The maximal length of the queue.
exception : `None`, `BaseException` = `None`, Optional
Exception to raise when the queue is empty.
Raises
------
TypeError
If `StopIteration` is given as `exception`.
"""
if (exception is not None):
if isinstance(exception, type):
exception = exception()
if isinstance(exception, StopIteration):
raise TypeError(
f'{exception} cannot be raised to a(n) `{cls.__name__}`.'
)
if iterable is None:
results = deque(maxlen=max_length)
else:
results = deque(iterable, maxlen=max_length)
self = object.__new__(cls)
self._loop = loop
self._results = results
self._waiter = None
self._exception = exception
self._set_result_waiters = None
return self
def set_result(self, element):
"""
Puts the given `element` on the queue. If the queue is empty and it's result is already waited, feeds it to
``._waiter`` instead.
Parameters
----------
element : `Any`
The object to put on the queue.
Returns
-------
set_result : `int` (`0`, `1`, `2`)
If the result was set instantly, return `0`. If the result was not set, returns `1`.
"""
# should we raise InvalidStateError?
waiter = self._waiter
if waiter is None:
results = self._results
max_length = results.maxlen
if (max_length is None) or (len(results) < max_length):
results.append(element)
set_result = 0
else:
set_result = 1
else:
self._waiter = None
waiter.set_result_if_pending(element)
set_result = 0
return set_result
async def set_result_wait(self, element):
"""
Puts the given `element` on the queue. If the queue is full, blocks till it's elements are exhausted.
This method is an awaitable.
Parameters
----------
element : `Any`
The object to put on the queue.
Results
-------
set_result : `int` (`0`, `1`, `2`)
If the result was set instantly, return `0`. If the result was not set, returns `1`. If you needed to wait
for setting the result, returns `2`.
"""
waiter = self._waiter
if waiter is None:
results = self._results
max_length = results.maxlen
if (max_length is None) or (len(results) < max_length):
results.append(element)
set_result = 0
else:
set_result_waiters = self._set_result_waiters
if (set_result_waiters is None):
set_result_waiters = []
self._set_result_waiters = set_result_waiters
waiter = Future(self._loop)
set_result_waiters.append(waiter)
try:
can_set_result = await waiter
except:
try:
set_result_waiters.remove(waiter)
except ValueError:
pass
else:
if not set_result_waiters:
self._set_result_waiters = None
raise
else:
if can_set_result:
results.append(element)
set_result = 2
else:
set_result = 1
else:
self._waiter = None
waiter.set_result_if_pending(element)
set_result = 0
return set_result
def set_exception(self, exception):
"""
Sets the given `exception` to raise, when it's queue gets empty. If the queue is empty and it's result is
already waited, feeds it to ``._waiter`` instead.
Parameters
----------
exception : `None`, `BaseException`
Exception to raise when the queue is empty.
Raises
------
TypeError
If `StopIteration` is given as `exception`.
"""
# should we raise InvalidStateError?
if isinstance(exception, type):
exception = exception()
if isinstance(exception, StopIteration):
raise TypeError(
f'{exception} cannot be raised to a(n) `{self.__class__.__name__}`; {self!r}.'
)
self._exception = exception
waiter = self._waiter
if (waiter is not None):
self._waiter = None
waiter.set_exception_if_pending(exception)
# cancel all waiters
set_result_waiters = self._set_result_waiters
if (set_result_waiters is not None):
self._set_result_waiters = None
while set_result_waiters:
set_result_waiters.pop(0).set_result_if_pending(False)
def __await__(self):
"""
Waits till the next element of the queue is set. If the queue has elements set, yields the next of them, or if
the queue has exception set, raises it.
This method is a generator. Should be used with `await` expression.
Returns
-------
result : `Any`
The next element on the queue.
Raises
------
BaseException
Exception set to the queue, to raise when it is empty.
"""
results = self._results
if results:
return results.popleft()
exception = self._exception
if exception is not None:
raise exception
waiter = self._waiter
if waiter is None:
waiter = Future(self._loop)
self._waiter = waiter
try:
return (yield from waiter)
finally:
self._poll_from_set_result_waiters()
result = to_coroutine(copy_func(__await__))
def _poll_from_set_result_waiters(self):
"""
Polls one future from set result waiters.
"""
set_result_waiters = self._set_result_waiters
if (set_result_waiters is not None):
while True:
set_result = set_result_waiters.pop(0).set_result_if_pending(True)
if (not set_result_waiters):
self._set_result_waiters = None
break
if set_result == 1:
break
def result_no_wait(self):
"""
Returns the queue's next element if applicable.
Waits till the next element of the queue is set. If the queue has elements set, yields the next of them, or if
the queue has exception set, raises it.
Returns
-------
result : `Any`
The next element on the queue.
Raises
------
IndexError
The queue is empty.
BaseException
Exception set to the queue, to raise when it is empty.
"""
results = self._results
if results:
result = results.popleft()
self._poll_from_set_result_waiters()
return result
exception = self._exception
if exception is None:
raise IndexError('The queue is empty')
raise exception
def __repr__(self):
"""Returns the async queue's representation."""
repr_parts = [
self.__class__.__name__,
'([',
]
results = self._results
limit = len(results)
if limit:
index = 0
while True:
element = results[index]
repr_parts.append(repr(element))
index += 1
if index == limit:
break
repr_parts.append(', ')
repr_parts.append(']')
max_length = results.maxlen
if (max_length is not None):
repr_parts.append(', max_length=')
repr_parts.append(repr(max_length))
exception = self._exception
if (exception is not None):
repr_parts.append(', exception=')
repr_parts.append(str(exception))
repr_parts.append(')')
return ''.join(repr_parts)
__str__ = __repr__
def __aiter__(self):
"""
Async iterating over an ``AsyncQueue``, returns itself
Returns
-------
self : ``AsyncQueue``
"""
return self
async def __anext__(self):
"""
Waits till the next element of the queue is set. If the queue has elements set, yields the next of them, or if
the queue has exception set, raises it.
If the queue has ``CancelledError`` set as ``._exception``, then raises ``StopAsyncIteration`` to stop the queue
instead.
This method is a coroutine.
Returns
-------
result : `Any`
The next element on the queue.
Raises
------
StopAsyncIteration
If the queue was cancelled with ``CancelledError``.
BaseException
Exception set to the queue, to raise when it is empty.
"""
results = self._results
if results:
result = results.popleft()
self._poll_from_set_result_waiters()
return result
exception = self._exception
if exception is not None:
if type(exception) is CancelledError:
raise StopAsyncIteration from CancelledError
raise exception
waiter = self._waiter
if waiter is None:
waiter = Future(self._loop)
self._waiter = waiter
try:
return (await waiter)
except CancelledError as err:
raise StopAsyncIteration from err
# deque operations
@property
def max_length(self):
"""
Returns the queue's max length.
Returns
-------
max_length: `int`
"""
return self._results.maxlen
def clear(self):
"""
Clears the queue's results.
"""
self._results.clear()
def copy(self):
"""
Copies the queue.
Returns
-------
new : ``AsyncQueue``
"""
new = object.__new__(type(self))
new._loop = self._loop
new._results = self._results.copy()
new._waiter = None
new._exception = self._exception
new._set_result_waiters = None
return new
def reverse(self):
"""
Reverses the queue's actual results.
"""
self._results.reverse()
def __len__(self):
"""
Returns the queue's actual length.
"""
return len(self._results)
if __debug__:
def __del__(self):
"""
If the queue has ``_waiter`` set, silences it.
Notes
-----
This function is only present, when `__debug__` is set as `True`.
"""
waiter = self._waiter
if waiter is not None:
waiter.__silence__()
class AsyncLifoQueue(AsyncQueue):
"""
An asynchronous LIFO queue.
``AsyncLifoQueue`` is async iterable, so if you iterate over it inside of an `async for` loop do
`.set_exception(CancelledError())` to stop it without any specific exception.
Attributes
----------
_exception : `None`, `BaseException`
The exception set as the queue's result to raise, when the queue gets empty.
_loop : ``EventThread``
The loop to what the queue is bound to.
_results : `deque`
The results of the queue, which can be retrieved by ``.result``, ``.result_no_wait``, or by awaiting it.
_waiter : `None`, ``Future``
If the queue is empty and it's result is already waited, then this future is set. It's result is set, by the
first ``.set_result``, ``.set_exception`` call.
"""
__slots__ = ()
@copy_docs(AsyncQueue.__await__)
def __await__(self):
results = self._results
if results:
return results.pop()
exception = self._exception
if exception is not None:
raise exception
waiter = self._waiter
if waiter is None:
waiter = Future(self._loop)
self._waiter = waiter
return (yield from waiter)
@copy_docs(AsyncQueue.result_no_wait)
def result_no_wait(self):
results = self._results
if results:
return results.pop()
exception = self._exception
if exception is None:
raise IndexError('The queue is empty')
raise exception
@copy_docs(AsyncQueue.__anext__)
async def __anext__(self):
results = self._results
if results:
return results.pop()
exception = self._exception
if exception is not None:
if type(exception) is CancelledError:
raise StopAsyncIteration from CancelledError
raise exception
waiter = self._waiter
if waiter is None:
waiter = Future(self._loop)
self._waiter = waiter
try:
return (await waiter)
except CancelledError as err:
raise StopAsyncIteration from err
|
# -*- coding: utf-8 -*-
######################################################
# PROJECT : Sentence Similarity Calculator
# AUTHOR : Tarento Technologies
# DATE : May 05, 2020
######################################################
from pyspark.sql import SparkSession
from pyspark.ml import Pipeline, Model, PipelineModel
from pyspark.ml.feature import RegexTokenizer, NGram, HashingTF, MinHashLSH
from pyspark.sql.types import *
from pyspark.sql.functions import col
'''
This is the scalable ML based code for identifying the sentence similarity
between two datasets. Algorithm used is MinHashLSH.
'''
'''
---------------------------------------
SPARK SESSION CREATION
---------------------------------------
'''
spark = SparkSession \
.builder \
.appName("Sentence_Similarity_Tool") \
.getOrCreate()
spark.conf.set("spark.dynamicAllocation.enabled", "true")
spark.conf.set("spark.executor.cores", 6)
spark.conf.set("spark.dynamicAllocation.minExecutors","3")
spark.conf.set("spark.dynamicAllocation.maxExecutors","6")
LOG4JLOGGER = spark.sparkContext._jvm.org.apache.log4j
LOGGER = LOG4JLOGGER.LogManager.getLogger(__name__)
LOGGER.info("-------------------------------------------------------")
LOGGER.info("Starting the Sentence Similarity Identifier...")
LOGGER.info("-------------------------------------------------------")
# Define the Inputs
#IN_INPUT_FILE = "/Users/TIMAC044/Documents/Anuvaad/sentences_comparison/indian_kanoon_scrapped_sentences.csv"
IN_INPUT_FILE="/user/root/sim-score/input/scrapped_sentences_full/ik_data_final.csv"
#PDF_INPUT_FILE = "/Users/TIMAC044/Documents/Anuvaad/sentences_comparison/sample_indian_kanoon_twisted_sentence_sample.csv"
PDF_INPUT_FILE = "/user/root/sim-score/input/sc_tokenized_sentences/sent.out"
# Schema of the files
SCHEMA_INPUT_FILE = StructType([StructField("sentences", StringType(), True)])
SCRAPPED_INPUT_FILE = spark.read.format("com.databricks.spark.csv") \
.option("delimiter", ",") \
.option("quote", "\"") \
.option("escape", "\"") \
.option("header", "false") \
.option("delimiter", "\n") \
.schema(SCHEMA_INPUT_FILE) \
.load(IN_INPUT_FILE)
DF_INPUT_FILE_CLEAN = SCRAPPED_INPUT_FILE.filter("sentences rlike '[A-Z,a-z]'").repartition(100)
DOWNLOAD_PDF_INPUT_FILE = spark.read.format("com.databricks.spark.csv") \
.option("delimiter", ",") \
.option("quote", "\"") \
.option("escape", "\"") \
.option("header", "false") \
.option("delimiter", "\n") \
.schema(SCHEMA_INPUT_FILE) \
.load(PDF_INPUT_FILE)
DOWNLOAD_PDF_INPUT_FILE_CLEAN = DOWNLOAD_PDF_INPUT_FILE.filter("sentences rlike '[A-Z,a-z]'").repartition(100)
LOGGER.info("-------------------------------------------------------")
LOGGER.info("Loaded the dataframes...")
LOGGER.info("-------------------------------------------------------")
pipeline = Pipeline(stages=[
RegexTokenizer(
pattern="", inputCol="sentences", outputCol="tokens", minTokenLength=1
),
NGram(n=2, inputCol="tokens", outputCol="ngrams"),
HashingTF(inputCol="ngrams", outputCol="vectors"),
MinHashLSH(inputCol="vectors", outputCol="lsh")
])
model = pipeline.fit(DF_INPUT_FILE_CLEAN)
stored_hashed = model.transform(DF_INPUT_FILE_CLEAN)
landed_hashed = model.transform(DOWNLOAD_PDF_INPUT_FILE_CLEAN)
matched_df = model.stages[-1].approxSimilarityJoin(stored_hashed, landed_hashed, 0.5, "confidence")
.select(col("datasetA.sentences").alias("ik_sentence"),
col("datasetB.sentences").alias("pdf_sentence"),
col("confidence"))
LOGGER.info("-------------------------------------------------------")
LOGGER.info("Completed the ML pipeline...")
LOGGER.info("-------------------------------------------------------")
# matched_df.filter("confidence < 0.3").show(20, False)
matched_df.coalesce(1).write \
.format("com.databricks.spark.csv") \
.option("header", "true") \
.mode("overwrite") \
.save("/user/root/sim-score/output/")
LOGGER.info("-------------------------------------------------------")
LOGGER.info("Sentence Similarity File generated succesfully!")
LOGGER.info("-------------------------------------------------------")
|
# Copied to Cyclopath from
# https://priithon.googlecode.com/hg/Priithon/usefulGeo.py
"""
a bunch of 2d geometry functions
points are always defined as y,x tuples or numpy arrays
segments are sequences of pairs-of-nodes
boxes are a sequence of two diagonal edge-points
"""
from __future__ import absolute_import
def geoPointsEq(yx0, yx1, eps=1e-2):
"""
are two points equal
(within eps distance, x,y separately)
"""
d0 = yx0[0] - yx1[0]
d1 = yx0[1] - yx1[1]
return abs(d0)<eps and abs(d1)<eps
def geoSegEqOpposite(s0,s1, eps=1e-2):
"""
are two segments equals and oppositely directed
"""
return geoPointsEq(s0[0],s1[1], eps) and geoPointsEq(s0[1],s1[0], eps)
def geoPointSeqSide(p, seg):
"""
determine on which side of a segment a given point lies
http://astronomy.swin.edu.au/~pbourke/geometry/insidepoly/
Given a line segment between P0 (x0,y0) and P1 (x1,y1), another point P (x,y) has the following relationship to the line segment.
Compute
(y - y0) (x1 - x0) - (x - x0) (y1 - y0)
if it is less than 0 then P is to the right of the line segment, if greater than 0 it is to the left, if equal to 0 then it lies on the line segment.
"""
y,x = p
(y0,x0),(y1,x1) = seg
return (y - y0)*(x1 - x0) - (x - x0)*(y1 - y0)
#todo: U.mm([e.sidePointSeg(yx,seg) for seg in segs])
# but need to take "inward corners" into account to tell if inside polygon IF not CONVEX
def geoBoxToSegs(edges):
"""
return list of 4 seqments of box defined by its two edges
"""
(y0,x0),(y1,x1) = edges
if y0>y1:
y0,y1 = y1,y0
if x0>x1:
x0,x1 = x1,x0
return [((y0,x0),(y0,x1)),
((y0,x1),(y1,x1)),
((y1,x1),(y1,x0)),
((y1,x0),(y0,x0))]
def geoPointLineDist(p, seg, testSegmentEnds=False):
"""
Minimum Distance between a Point and a Line
Written by Paul Bourke, October 1988
http://astronomy.swin.edu.au/~pbourke/geometry/pointline/
"""
from numpy import sqrt
y3,x3 = p
(y1,x1),(y2,x2) = seg
dx21 = (x2-x1)
dy21 = (y2-y1)
lensq21 = dx21*dx21 + dy21*dy21
if lensq21 == 0:
#20080821 raise ValueError, "zero length line segment"
dy = y3-y1
dx = x3-x1
return sqrt( dx*dx + dy*dy ) # return point to point distance
u = (x3-x1)*dx21 + (y3-y1)*dy21
u = u / float(lensq21)
x = x1+ u * dx21
y = y1+ u * dy21
if testSegmentEnds:
if u < 0:
x,y = x1,y1
elif u >1:
x,y = x2,y2
dx30 = x3-x
dy30 = y3-y
return sqrt( dx30*dx30 + dy30*dy30 )
def geoPointSegsDist(p, segs):
"""
smallest distance of a point to a sequence of line segments
"""
return min([geoPointLineDist(p,seg, True) for seg in segs])
def geoPointInsideBox(p, edges):
"""
returns True only if p lies inside or on the sides of box
"""
y,x = p
(y0,x0),(y1,x1) = edges
if y0>y1:
y0,y1 = y1,y0
if x0>x1:
x0,x1 = x1,x0
return x0<=x<=x1 and y0<=y<=y1
def geoSeqsBoundingBox(segs):
"""
return corners (LB+TR) of smallest box containing all segments in segs
"""
yMin = 1e100
yMax =-1e100
xMin = 1e100
xMax =-1e100
for s in segs:
(y0,x0),(y1,x1) = s
yMax = max(yMax, y0)
yMin = min(yMin, y0)
yMax = max(yMax, y1)
yMin = min(yMin, y1)
xMax = max(xMax, x0)
xMin = min(xMin, x0)
xMax = max(xMax, x1)
xMin = min(xMin, x1)
from numpy import array
return array((yMin,xMin)),array((yMax,xMax))
def geoPointsBoundingBox(points, intCoords=False):
"""
return corners (LB+TR) of smallest box containing all points
if intCoords:
use int(...) for lower and int(...)+1 for upper bounds
"""
yMin = 1e100
yMax =-1e100
xMin = 1e100
xMax =-1e100
for p in points:
(y0,x0) = p
yMax = max(yMax, y0)
yMin = min(yMin, y0)
xMax = max(xMax, x0)
xMin = min(xMin, x0)
if intCoords:
yMax = int(yMax)+1
xMax = int(xMax)+1
yMin = int(yMin)
xMin = int(xMin)
from numpy import array
return array((yMin,xMin)),array((yMax,xMax))
|
from business_logic import exceptions
from six import with_metaclass
class _LogicErrorsMetaclass(type):
"""
Metaclass automatically creating errors registry and setting error code to attribute name.
You should subclass this and set all possible business logic exceptions.
"""
def __init__(cls, name, bases, dict):
super(_LogicErrorsMetaclass, cls).__init__(name, bases, dict)
# dictionary containing all errors by error code
cls._errors = {}
for attr_name, attr in dict.items():
if isinstance(attr, exceptions.LogicException):
# attribute name becomes exception `error_code`
attr.error_code = attr_name
attr.errors = cls._errors
cls._errors[attr_name] = attr
class LogicErrors(with_metaclass(_LogicErrorsMetaclass)):
pass
|
from LogansRun import *
game = LogansRun();
game.run();
|
from deepl.hacks import calculate_valid_timestamp, generate_id
def test_calculate_valid_timestamp():
assert 10 == calculate_valid_timestamp(timestamp=10, i_count=0)
assert 11 == calculate_valid_timestamp(timestamp=10, i_count=1)
assert 12 == calculate_valid_timestamp(timestamp=10, i_count=2)
assert 12 == calculate_valid_timestamp(timestamp=10, i_count=3)
assert 12 == calculate_valid_timestamp(timestamp=10, i_count=4)
assert 15 == calculate_valid_timestamp(timestamp=10, i_count=5)
assert 12 == calculate_valid_timestamp(timestamp=10, i_count=6)
def test_generate_id():
assert 100_000_000 > generate_id() > 1_000_000
|
#Faça um programa que leia um número inteiro e diga se ele é ou não um número primo.
n = int(input('Digite um número: '))
div = 0
for c in range(1, n + 1):
if n % c == 0:
print('\033[33m', end=' ')
div += 1
else:
print('\033[m', end=' ')
print('{}'.format(c), end=' ')
print('\nO número {} foi divisível {} vezes.'.format(n, div))
if div == 2:
print('\033[mPor isso ele é \033[36mPRIMO\033[m!')
else:
print('\033[mNÃO É PRIMO!!')
|
COLORS = {
"NORMAL": "#FFFFFF", # white
"WALL": "#000000", # black
"START": "#FF00FF", # magenta
"GOAL": "#00FF00", # green
"OPEN": "#FF0000", # red
"CLOSE": "#0000FF", # blue
"BT": "#FFFF00" # yellow
}
class Panel:
def __init__(self, canvas, w_x, h_z, pixel, type="NORMAL"):
self.abs_x = w_x
self.abs_z = h_z
self.pos_x = w_x * pixel + pixel // 2
self.pos_z = h_z * pixel + pixel // 2
self.pixel = pixel
self.canvas = canvas
self.type = type
self.root = None
self.parents = []
self.n = self.m = self.s = 0 # extend for a*
def bind(self):
pixel = self.pixel
self.canvas.create_rectangle(self.abs_x * pixel,
self.abs_z * pixel,
self.abs_x * pixel + pixel,
self.abs_z * pixel + pixel,
fill=COLORS[self.type])
def search_parents(self, tree, size_x, size_z):
if self.abs_x < size_x - 1: # search mid
self.parents.append(tree[self.abs_x + 1][self.abs_z])
if self.abs_z > 0: # left
self.parents.append(tree[self.abs_x + 1][self.abs_z - 1])
if self.abs_z < size_z - 1:
self.parents.append(tree[self.abs_x + 1][self.abs_z + 1])
if self.abs_x > 0:
self.parents.append(tree[self.abs_x - 1][self.abs_z])
if self.abs_z > 0: # left
self.parents.append(tree[self.abs_x - 1][self.abs_z - 1])
if self.abs_z < size_z - 1:
self.parents.append(tree[self.abs_x - 1][self.abs_z + 1])
if self.abs_z < size_z - 1:
self.parents.append(tree[self.abs_x][self.abs_z + 1])
if size_z > 0:
self.parents.append(tree[self.abs_x][self.abs_z - 1])
return self.parents
def on_search(self):
if not self.root:
return
self.canvas.create_line(self.root.pos_x, self.root.pos_z, # start
self.pos_x, self.pos_z, # end
width=5,
fill="green"
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ===============================================================
# Copyright (C) 2018 HuangYk.
# Licensed under The MIT Lincese.
#
# Filename : temporal_anchor.py
# Author : HuangYK
# Last Modified: 2019-03-25 20:26
# Description :
# ===============================================================
from __future__ import print_function, division, absolute_import
import numpy as np
from numpy.random import randint
class TemporalCenterAnchor(object):
"""
"""
def __init__(self, anchor_num, duration):
self.anchor_num = anchor_num
self.duration = duration
def __call__(self, frame_indices):
vid_length = len(frame_indices)
anchor_interval = vid_length // self.anchor_num
# print('video base: {}, {}'.format(anchor_interval, vid_length))
if self.duration > len(frame_indices):
anchor_out = frame_indices
for index in anchor_out:
if len(anchor_out) >= self.duration:
break
anchor_out.append(index)
out = []
[out.extend(anchor_out) for i in range(self.anchor_num)]
# print('out: {}'.format(out))
return out
else:
anchor_range = [[max(1, i*anchor_interval//2 - self.duration//2),
min(vid_length,
i*anchor_interval//2 + self.duration//2)]
for i in range(1, self.anchor_num+1)]
# print('anchor_range: {}'.format(anchor_range))
for i in range(len(anchor_range)):
if anchor_range[i][0] == 1:
anchor_range[i][1] = 1 + self.duration
out = []
if self.duration == 1:
[out.extend([anchor_start])
for anchor_start, anchor_end in anchor_range]
else:
[out.extend(range(anchor_start, anchor_end))
for anchor_start, anchor_end in anchor_range]
# print('out: {}'.format(out))
return out
class TemporalBeginAnchor(object):
"""
"""
def __init__(self, anchor_num, duration):
self.anchor_num = anchor_num
self.duration = duration
def __call__(self, frame_indices):
vid_length = len(frame_indices)
anchor_interval = vid_length // self.anchor_num
if self.duration > len(frame_indices):
anchor_out = frame_indices
for index in anchor_out:
if len(anchor_out) >= self.duration:
break
anchor_out.append(index)
out = []
[out.extend(anchor_out) for i in range(self.anchor_num)]
return out
else:
anchor_range = [[1+i*anchor_interval,
min(vid_length+1,
1+i*anchor_interval+self.duration)]
for i in range(self.anchor_num)]
out = []
for anchor_start, anchor_end in anchor_range:
anchor_slices = []
if anchor_end == vid_length+1:
anchor_slices = range(anchor_start, anchor_end)
for index in anchor_slices:
if len(anchor_slices) >= self.duration:
break
anchor_slices.append(index)
else:
anchor_slices = range(anchor_start, anchor_end)
out.extend(anchor_slices)
return out
class TemporalRandomAnchor(object):
"""
"""
def __init__(self, anchor_num, duration, slice_distribution='unit'):
self.anchor_num = anchor_num
self.duration = duration
self.slice_distribution = slice_distribution
def __call__(self, frame_indices):
vid_length = len(frame_indices)
anchor_interval = vid_length // self.anchor_num
if self.duration > len(frame_indices):
anchor_out = frame_indices
for index in anchor_out:
if len(anchor_out) >= self.duration:
break
anchor_out.append(index)
out = []
[out.extend(anchor_out) for i in range(self.anchor_num)]
return out
else:
# assert (anchor_interval-1)>0, 'anchor_interval:{},vid_length:{}'.format(
# anchor_interval, vid_length)
random_offset = anchor_interval-1 if (anchor_interval-1)>0 else 1
anchor_range = [[1+i*anchor_interval,
choose_slice(random_offset, self.slice_distribution)
]
for i in range(self.anchor_num)]
out = []
for interval_start, anchor_offset in anchor_range:
anchor_slices = []
# generate random anchor
anchor_start = interval_start+anchor_offset
anchor_start = 1 if anchor_start == 0 else anchor_start
anchor_end = anchor_start+self.duration
if anchor_end >= vid_length+1:
anchor_slices = list(range(anchor_start, vid_length+1))
for index in anchor_slices:
if len(anchor_slices) >= self.duration:
break
anchor_slices.append(index)
else:
anchor_slices = range(anchor_start, anchor_end)
out.extend(anchor_slices)
return out
class TemporalConseqAnchor(object):
"""
"""
def __init__(self, anchor_num, duration, slice_distribution='unit'):
self.anchor_num = anchor_num
self.duration = duration
self.slice_distribution = slice_distribution
def __call__(self, frame_indices):
vid_length = len(frame_indices)
anchor_interval = vid_length
if self.duration > len(frame_indices):
anchor_out = frame_indices
for index in anchor_out:
if len(anchor_out) >= self.duration:
break
anchor_out.append(index)
out = []
[out.extend(anchor_out) for i in range(self.anchor_num)]
return out
else:
if (anchor_interval-self.duration*self.anchor_num)>0:
random_offset = anchor_interval-self.duration*self.anchor_num
else:
random_offset = 1
anchor_start = choose_slice(random_offset, self.slice_distribution)
anchor_start = anchor_start if anchor_start>0 else 1
anchor_end = anchor_start+self.duration*self.anchor_num
if anchor_end >= vid_length+1:
anchor_slices = list(range(anchor_start, vid_length+1))
for index in anchor_slices:
if len(anchor_slices) >= self.duration*self.anchor_num:
break
anchor_slices.append(index)
else:
anchor_slices = range(anchor_start, anchor_end)
return anchor_slices
def choose_slice(slice_range, distribution='unit'):
assert distribution in ['unit', 'norm']
if distribution == 'unit':
return randint(slice_range)
elif distribution == 'norm':
return int((np.random.randn(1)*0.6+slice_range/2)[0])
|
from . import build_features
from . import Preprocessor
from . import CorpusLoader
from . import PickledCorpusReader
|
import time
import keyboard
def hey_siri(query) -> None:
"""
Simulate key presses to interact with Siri
"""
keyboard.press('command+space')
time.sleep(0.3)
keyboard.release('command+space')
time.sleep(0.2)
keyboard.write(query)
keyboard.send('enter')
print(query)
|
import os
from typing import Tuple
from tensorflow.python.keras.optimizers import SGD
from tqdm import tqdm
from dlex.configs import Configs, Params, ModuleConfigs
from dlex.datasets.torch import Dataset
from dlex.tf.models import BaseModel
from dlex.utils.logging import logger
from dlex.utils.model_utils import add_result
from dlex.tf.utils.model_utils import get_model, get_dataset
def evaluate(
model: BaseModel,
dataset: Dataset,
params: Params,
save_result=False,
output=False,
summary_writer=None) -> Tuple[dict, dict, list]:
total = {key: 0 for key in params.test.metrics}
acc = {key: 0. for key in params.test.metrics}
outputs = []
for batch in tqdm(dataset.all(), desc="Eval"):
y_pred, others = model.infer(batch)
for key in params.test.metrics:
_acc, _total = dataset.evaluate_batch(y_pred, batch, metric=key)
acc[key] += _acc
total[key] += _total
if output:
for i, predicted in enumerate(y_pred):
str_input, str_ground_truth, str_predicted = dataset.format_output(
predicted, batch.item(i))
outputs.append('\n'.join([str_input, str_ground_truth, str_predicted]))
if summary_writer is not None:
model.write_summary(summary_writer, batch, (y_pred, others))
result = {
"epoch": "%.1f" % model.epoch,
"result": {key: acc[key] / total[key] for key in acc}
}
best_result = add_result(params, result) if save_result else None
return result, best_result, outputs
def main(argv=None):
"""Main program."""
configs = Configs(mode="eval", argv=argv)
params, args = configs.params, configs.args
dataset = get_dataset(params)
dataset_test = dataset.get_keras_wrapper("validation")
# Init model
model_cls = get_model(params)
assert model_cls
model = model_cls(params, dataset_test).model
model.compile(
optimizer=SGD(0.1, momentum=0.9),
loss="categorical_crossentropy",
metrics=["acc"])
# checkpoint
checkpoint_path = os.path.join(configs.checkpoint_dir, "latest.h5")
logger.info("Load checkpoint from %s" % checkpoint_path)
model.load_weights(checkpoint_path)
res = model.evaluate_generator(
dataset_test.generator,
steps=len(dataset_test) // params.train.batch_size,
verbose=1
)
print(res)
if __name__ == "__main__":
main()
|
"""
@author: Gabriele Girelli
@contact: gigi.ga90@gmail.com
"""
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods for working with entity types in the ontology."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import typing
from typing import Optional, Tuple
from yamlformat.validator import base_lib
from yamlformat.validator import config_folder_lib
from yamlformat.validator import field_lib
from yamlformat.validator import findings_lib
ENTITY_TYPE_NAME_REGEX = re.compile(r'^[a-zA-Z][a-zA-Z0-9]*(?:_[a-zA-Z0-9]+)*$')
FIELD_INCREMENT_STRIPPER_REGEX = re.compile(
r'(^[a-z][a-z0-9]*(?:_[a-z][a-z0-9]*)*)((?:_[0-9]+)+)$')
FieldParts = typing.NamedTuple('FieldParts',
[('namespace', str), ('field', str),
('increment', str)])
OptWrapper = typing.NamedTuple('OptWrapper', [('field', FieldParts),
('optional', bool)])
TypeParts = typing.NamedTuple('TypeParts', [('namespace', str),
('typename', str)])
EntityIdByEntry = typing.NamedTuple('EntityIdByEntry', [('namespace', str),
('typename', str)])
def SeparateFieldNamespace(qualified_field_name: str) -> Tuple[str, str]:
"""Returns the namespace and its field name as separate values or an Error.
Args:
qualified_field_name: a qualified field string like `HVAC/run_status`
Throws:
TypeError: if the field is not qualified
"""
fqf_parsed = qualified_field_name.split('/')
if len(fqf_parsed) == 1:
raise TypeError('Type improperly formatted, a namespace is missing: ',
fqf_parsed)
if len(fqf_parsed) > 2:
raise ValueError('Type improperly formatted, too many separators: ',
fqf_parsed)
return fqf_parsed[0], fqf_parsed[1]
def SeparateFieldIncrement(field_name) -> Tuple[str, str]:
"""Takes as an input a field_name (string) and returns a tuple of strings.
The first element is the standard field name and its increment when available.
For example: zone_occupancy_status_1 -> [zone_occupancy_status, 1]
Args:
field_name: the field name to parse.
Returns:
A tuple of string, the standard field name and its increment if available.
"""
field_name_part = field_name
increment_part = ''
match = FIELD_INCREMENT_STRIPPER_REGEX.match(field_name)
if match:
field_name_part = match.group(1)
increment_part = match.group(2)
return field_name_part, increment_part
class EntityTypeUniverse(findings_lib.Findings):
"""Helper class to represent the defined universe of EntityTypes.
Only contains valid EntityTypes.
Attributes;
namespace_folder_map: a map of namespace names to EntityTypeFolders.
type_namespaces_map: a map of type names to TypeNamespaces.
type_ids_map: maps type IDs to entity types. Contains all valid types w/IDs.
"""
def __init__(self, entity_type_folders):
"""Init.
Args:
entity_type_folders: list of EntityTypeFolder objects parsed from files.
"""
super(EntityTypeUniverse, self).__init__()
self.namespace_folder_map = {}
self.type_namespaces_map = {}
self.type_ids_map = {}
self._BuildNamespaceFolderMap(entity_type_folders)
self._BuildTypeMaps(
[folder.local_namespace for folder in entity_type_folders])
def GetEntityType(self, namespace_name, typename):
"""Finds entity_type by namespace and typename and returns it or None."""
if namespace_name not in self.type_namespaces_map:
return None
return self.type_namespaces_map[namespace_name].GetType(typename)
def GetNamespace(self, namespace_name):
"""Finds namespace in the universe by name and returns it or None."""
return self.type_namespaces_map.get(namespace_name, None)
def GetNamespaces(self):
"""Get the entity type namespace objects in this universe.
Returns:
A list of EntityTypeNamespace objects
"""
return list(self.type_namespaces_map.values())
def _GetDynamicFindings(self, filter_old_warnings):
findings = []
for folder in self.namespace_folder_map.values():
findings += folder.GetFindings(filter_old_warnings)
return findings
def _BuildTypeMaps(self, type_namespaces):
"""Creates a dict mapping namespace strings to TypeNamespace objects.
Sets the self.type_namespaces_map attribute of the class.
Args:
type_namespaces: a list of TypeNamespace objects.
Raises:
RuntimeError: if assumptions about internal data structures are violated.
"""
for type_namespace in type_namespaces:
self.type_namespaces_map[type_namespace.namespace] = type_namespace
for entity_type in type_namespace.valid_types_map.values():
if entity_type.uid:
if entity_type.uid in self.type_ids_map:
dup_id_entry = self.type_ids_map[entity_type.uid]
dup_id_type = self.GetEntityType(dup_id_entry.namespace,
dup_id_entry.typename)
if dup_id_type is None:
raise RuntimeError('Duplicate type with uid ' + entity_type.uid +
' should always be mapped')
entity_type.AddFinding(
findings_lib.DuplicateIdsError(type_namespace.namespace,
entity_type, dup_id_type))
dup_id_type.AddFinding(
findings_lib.DuplicateIdsError(dup_id_entry.namespace,
dup_id_type, entity_type))
self.type_ids_map[entity_type.uid] = EntityIdByEntry(
namespace=type_namespace.namespace, typename=entity_type.typename)
def _BuildNamespaceFolderMap(self, type_folders):
"""Creates a dict mapping namespace strings to EntityTypeFolder objects.
Sets the self.namespace_folder_map attribute of the class.
Args:
type_folders: a list of EntityTypeFolder objects.
"""
for folder in type_folders:
self.namespace_folder_map[folder.local_namespace.namespace] = folder
class EntityTypeFolder(config_folder_lib.ConfigFolder):
"""Class representing a namespace folder of entity types.
Class fully validates all entity types defined within the namespace folder,
collects issues found, and stores all valid entity types.
Attributes:
local_namespace: TypeNamespace object representing this namespace.
"""
def __init__(self, folderpath, field_universe=None):
"""Init.
Args:
folderpath: required string with full path to the folder containing entity
type files. Path should be relative to google3/ and have no leading or
trailing /.
field_universe: optional FieldsUniverse object.
"""
super(EntityTypeFolder, self).__init__(folderpath,
base_lib.ComponentType.ENTITY_TYPE)
self.local_namespace = TypeNamespace(self._namespace_name, field_universe)
def Finalize(self):
"""Call to complete entity creation after all types are added."""
self.local_namespace.QualifyParentNames()
def _AddFromConfigHelper(self, document, context):
for type_name in document:
new_type = self._ConstructType(type_name, document[type_name],
context.filepath)
self._AddType(new_type)
def _ConstructField(self, local_field_names, optional, output_array):
for qualified_field_name in local_field_names:
field_ns, raw_field_name = field_lib.SplitFieldName(qualified_field_name)
std_field_name, increment = SeparateFieldIncrement(raw_field_name)
# Field will look local if undefined, but we'll catch the error later
# Because we do explict existence checks and it will fail
# TODO(berkoben) refactor so validation happens in an order that
# prevents this logic lint
field_ns = self.local_namespace.GetQualifiedNamespace(
field_ns, std_field_name)
output_array.append(
OptWrapper(
field=FieldParts(
namespace=field_ns, field=std_field_name,
increment=increment),
optional=optional))
def _ConstructType(self, type_name, type_contents, filepath):
"""Reads a entity type config block and generates an EntityType object."""
description = ''
parents = None
local_field_names = None
opt_local_field_names = None
is_abstract = False
is_canonical = False
uid = None
expected_keys = set([
'description', 'implements', 'uses', 'opt_uses', 'is_abstract', 'id',
'is_canonical'
])
if 'description' in type_contents:
description = type_contents['description']
if 'implements' in type_contents:
parents = type_contents['implements']
if 'uses' in type_contents:
local_field_names = type_contents['uses']
if 'opt_uses' in type_contents:
opt_local_field_names = type_contents['opt_uses']
if 'is_abstract' in type_contents:
is_abstract = type_contents['is_abstract']
if 'is_canonical' in type_contents:
is_canonical = type_contents['is_canonical']
if 'id' in type_contents:
uid = type_contents['id']
# Generate tuples to represent each field
fq_lfn = []
if local_field_names:
self._ConstructField(local_field_names, False, fq_lfn)
if opt_local_field_names:
self._ConstructField(opt_local_field_names, True, fq_lfn)
entity_type = EntityType(
filepath=filepath,
typename=type_name,
description=description,
parents=parents,
local_field_tuples=fq_lfn,
is_abstract=is_abstract,
inherited_fields_expanded=False,
is_canonical=is_canonical,
uid=uid,
namespace=self.local_namespace)
# Add errors to type if there's anything extra in the block. We add to the
# entity type because an extra key here is likely a typo in a real key name
# that would result in information being lost from the type.
for key in type_contents:
if key not in expected_keys:
entity_type.AddFinding(
findings_lib.UnrecognizedKeyError(key, entity_type.file_context))
return entity_type
def _AddType(self, entity_type):
"""Adds entity_type if it is fully valid.
If formatting is correct, continues on to field validation.
Records all findings in object.
Args:
entity_type: EntityType object.
Returns:
True if the entity type was successfully validated and added. False
otherwise.
"""
if not entity_type.IsValid():
self.AddFindings(entity_type.GetFindings())
return False
return self.local_namespace.InsertType(entity_type)
class TypeNamespace(findings_lib.Findings):
"""Class representing a namespace of entity types.
Attributes:
namespace: string
valid_types_map: Dict mapping typename strings to EntityType objects.
"""
def __init__(self, namespace, field_universe=None):
super(TypeNamespace, self).__init__()
self.namespace = namespace
self._field_universe = field_universe
self.valid_types_map = {}
self._parents_qualified = False
def _GetDynamicFindings(self, filter_old_warnings):
findings = []
for entity_type in self.valid_types_map.values():
findings += entity_type.GetFindings(filter_old_warnings)
return findings
def GetType(self, typename):
return self.valid_types_map.get(typename, None)
def InsertType(self, entity_type):
"""Validate that declared fields are defined.
Adds type if valid and unique.
Findings for non-validated fields are applied to this TypeNamespace.
Args:
entity_type: entity to attempt to add.
Returns:
True if entity was added successfully.
Raises:
RuntimeError: if this is called after qualifying parent names
"""
if self._parents_qualified:
raise RuntimeError('Cannot add types after Qualifying parents')
if self._ValidateFields(entity_type):
typename = entity_type.typename
mapped_entity_type = self.valid_types_map.get(typename)
if mapped_entity_type is None:
self.valid_types_map[typename] = entity_type
return True
# entity_type is a duplicate type
self.AddFinding(
findings_lib.DuplicateEntityTypeDefinitionError(
self, entity_type, mapped_entity_type.file_context))
return False
return False
def GetQualifiedNamespace(self, field_ns, field_name):
"""Returns the namespace name for this field.
Args:
field_ns: namespace of field as parsed from the config
field_name: unqualified field name string
Returns:
The fully qualified field string.
"""
if not field_ns and self.IsLocalField(field_name):
return self.namespace
return field_ns
def _BuildQualifiedParentTuple(self, parent_name):
"""Creates the two-part parent tuple with a fully-qualified namespace.
Args:
parent_name: string as specified in the config file.
Returns:
A TypeParts tuple representing this parent.
"""
namespace_name = self.namespace
split = parent_name.split('/')
if len(split) != 2:
if not self.GetType(parent_name):
# parent is in the global namespace
namespace_name = ''
else:
namespace_name = split[0]
parent_name = split[1]
return TypeParts(namespace=namespace_name, typename=parent_name)
def QualifyParentNames(self):
"""Sets parents attribute of this namespace with fully qualified names."""
if self._parents_qualified:
return
for entity_type in self.valid_types_map.values():
fq_tuplemap = {}
for parent in entity_type.unqualified_parent_names:
fq_tuple = self._BuildQualifiedParentTuple(parent)
fq_name = '{0}/{1}'.format(fq_tuple.namespace, fq_tuple.typename)
fq_tuplemap[fq_name] = fq_tuple
entity_type.parent_names = fq_tuplemap
self._parents_qualified = True
def IsLocalField(self, field_name):
"""Returns true if this unqualified field is defined in the namespace.
Args:
field_name: an unqualified field name with no leading '/'
"""
if not self._field_universe:
return False
return self._field_universe.IsFieldDefined(field_name, self.namespace)
def _ValidateFields(self, entity):
"""Validates that all fields declared by entity are defined."""
# if field_universe is not defined just return true
if not self._field_universe:
return True
valid = True
for field_tuple in entity.local_field_names.values():
if not self._ValidateField(field_tuple.field, entity):
valid = False
return valid
def _ValidateField(self, field_tuple, entity):
"""Validates that field declared by entity is defined.
Field formatting has already been validated.
Findings are saved on the TypeNamespace.
Args:
field_tuple: tuple representing a fully qualified field
entity: EntityType
Returns:
True if field is defined.
"""
if not self._field_universe.IsFieldDefined(field_tuple.field,
field_tuple.namespace):
self.AddFinding(
findings_lib.UndefinedFieldError(entity, field_tuple.field))
return False
return True
def BuildQualifiedField(opt_tuple):
field_tuple = opt_tuple.field
return '{0}/{1}{2}'.format(field_tuple.namespace, field_tuple.field,
field_tuple.increment)
class EntityType(findings_lib.Findings):
"""Creates an EntityType object from a set of values describing the type.
Attributes:
file_context: FileContext object containing file info.
typename: string.
description: string.
parent_names: a list of parent typename strings.
local_field_names: the local set of standard field names
inherited_field_names: the set of inherited field names. Is always assigned
to an empty set at init, to be expanded later.
inherited_fields_expanded: boolean.
is_canonical: boolean indicating if this is a curated canonical type.
uid: the database ID string of this type if uploaded
namespace: a reference to the namespace object the entity belongs to
Returns:
An instance of the EntityType class.
"""
def __init__(self,
begin_line_number=0,
filepath='',
typename='',
description='',
parents=None,
local_field_tuples=None,
is_abstract=False,
inherited_fields_expanded=False,
is_canonical=False,
uid=None,
namespace=None):
"""Init.
Args:
begin_line_number: int. Starting line number for the entity type
definition.
filepath: string. google3 path to the file defining the type.
typename: required string.
description: required string.
parents: list of parent typename strings.
local_field_tuples: list of OptWrapper tuples
is_abstract: boolean indicating if this is an abstract type.
inherited_fields_expanded: boolean. Should be false at init.
is_canonical: boolean indicating if this is a curated canonical type.
uid: the database ID string of this type if uploaded
namespace: a reference to the namespace object the entity belongs to
"""
super(EntityType, self).__init__()
self.file_context = findings_lib.FileContext(
begin_line_number=begin_line_number, filepath=filepath)
self.typename = typename
self.description = description
self.namespace = namespace
self.local_field_names = {}
local_field_names = []
if local_field_tuples:
local_field_names = [
BuildQualifiedField(opt_parts) for opt_parts in local_field_tuples
]
for i, lfn in enumerate(local_field_names):
self.local_field_names[lfn] = local_field_tuples[i]
self.inherited_field_names = {}
self.inherited_fields_expanded = inherited_fields_expanded
if parents is None:
parents = []
self.parent_names = None
self.parent_name_tuples = None
self.unqualified_parent_names = parents
self._all_fields = None
self._has_optional_fields = None
self.is_abstract = is_abstract
self.is_canonical = is_canonical
self.uid = uid
# TODO(berkoben) update this method to use tuples if possible
self._ValidateType(local_field_names)
def HasOptionalFields(self, run_unsafe=False):
if not (self.inherited_fields_expanded or run_unsafe):
raise RuntimeError('Type has not been expanded')
if self._has_optional_fields is not None:
return self._has_optional_fields
fields = self.GetAllFields()
for field in fields.values():
if field.optional:
self._has_optional_fields = True
return self._has_optional_fields
self._has_optional_fields = False
return self._has_optional_fields
def GetAllFields(self, run_unsafe=False):
"""Returns the expanded set of fields for this type.
Args:
run_unsafe: set true to run against a type before fields are fully
expanded. Running in this mode does not memoize the result.
Returns:
A dictionary of fully qualified strings representing fields in the type to
OptWrapper tuples representing the contents of the field.
Raises:
RuntimeError: if fields have not yet been expanded.
"""
if not (self.inherited_fields_expanded or run_unsafe):
raise RuntimeError('Type {0} has not been expanded'.format(self.typename))
if self._all_fields is None:
tmp = self.local_field_names.copy()
tmp.update(self.inherited_field_names)
if run_unsafe:
return tmp
self._all_fields = tmp
return self._all_fields
def HasFieldAsWritten(self,
fieldname_as_written: str,
run_unsafe: bool = False) -> bool:
"""Returns true if a valid config file value maps to a field in the type.
Accepts a field name as written in a configuration file
referencing this type. The method applies context-aware namespace
omission (i.e. referencing a field without its namespace) to identify the
field regardless of the namespace and syntax variation.
Note: to minimize redundancy, this method simply wraps.
`GetFieldFromConfigText()`. If your application also needs the `Field` use
that method instead to eliminate redundant processing.
Args:
fieldname_as_written: string verbatim from a building or ontology config
run_unsafe: set true to allow calls before parent type fields are expanded
Returns:
True if the Field is defined on the type. False otherwise.
"""
return self.GetFieldFromConfigText(fieldname_as_written,
run_unsafe) is not None
def GetFieldFromConfigText(self,
fieldname_as_written: str,
run_unsafe: bool = False) -> Optional[OptWrapper]:
"""Returns `OptWrapper` provided string validates against the entity.
Accepts a field name as written in a configuration file
referencing this type. The method applies all shorthanding rules to identify
the field regardless of the namespace and syntax variation.
Args:
fieldname_as_written: string verbatim from a building or ontology config
run_unsafe: set true to allow calls before parent type fields are expanded
Returns:
`OptWrapper` if field is present, None otherwise
"""
try:
# Check the field as if it's fully qualified.
return self.GetField(fieldname_as_written, run_unsafe)
except TypeError:
pass
# Field is unqualified so it is either global or type-namespace-local
# Check for a locally defined field first using type's namespace
field = self._GetField(
self.namespace.namespace + '/' + fieldname_as_written, run_unsafe)
if not field:
# Check field as if it's in the global namespace
field = self._GetField('/' + fieldname_as_written, run_unsafe)
return field
def HasField(self,
fully_qualified_fieldname: str,
run_unsafe: bool = False) -> bool:
"""Returns True if field string validates against the entity's fields.
Args:
fully_qualified_fieldname: a fully qualified names for example:
"HVAC/run_status_1".
run_unsafe: set true to run against a type before fields are fully
expanded. Running in this mode does not memoize the result.
Throws:
TypeError: if the field is not fully qualified
"""
return self.GetField(fully_qualified_fieldname, run_unsafe) is not None
def GetField(self,
fully_qualified_fieldname: str,
run_unsafe: bool = False) -> Optional[OptWrapper]:
"""Returns `OptWrapper` if field string validates against the entity.
Args:
fully_qualified_fieldname: a fully qualified names for example:
"HVAC/run_status_1".
run_unsafe: set true to run against a type before fields are fully
expanded. Running in this mode does not memoize the result.
Returns:
`OptWrapper` if field is present, None otherwise
Throws:
TypeError: if the field is not fully qualified
"""
# Throws an error in the case that this isn't a fully qualified field
_, _ = SeparateFieldNamespace(fully_qualified_fieldname)
return self._GetField(fully_qualified_fieldname, run_unsafe)
def _GetField(self,
fully_qualified_fieldname: str,
run_unsafe: bool = False) -> Optional[OptWrapper]:
return self.GetAllFields(run_unsafe).get(fully_qualified_fieldname)
def _ValidateType(self, local_field_names):
"""Validates that the entity type is formatted correctly.
Checks for formatting and duplicate fields and parents.
Records any errors found.
Args:
local_field_names: list of local field names for the type.
"""
# Make sure the typename is non-empty.
if not self.typename:
self.AddFinding(findings_lib.MissingTypenameError(self))
elif not isinstance(self.typename, str):
self.AddFinding(
findings_lib.IllegalKeyTypeError(self.typename, self.file_context))
elif not ENTITY_TYPE_NAME_REGEX.match(self.typename):
self.AddFinding(
findings_lib.InvalidTypenameError(self.typename, self.file_context))
# Make sure the type description is non-empty.
if not self.description:
self.AddFinding(findings_lib.MissingEntityTypeDescriptionWarning(self))
# Check for duplicate local fields.
# this check is case insensitive to catch dupes earlier in the event that
# we stop explicitly rejecting upper case characters
check_fields = set()
for field in local_field_names:
field_lower = field.lower()
if field_lower in check_fields:
self.AddFinding(findings_lib.DuplicateFieldError(self, field))
continue
check_fields.add(field_lower)
# TODO(berkoben): Add more checks to validate fields in isolation
# (in case we don't have a field set to check against)
# (i.e. check for chirality, formatting. Could use actual Field objects)
# Check formatting of field name
if len(field.split('/')) > 2:
self.AddFinding(findings_lib.UnrecognizedFieldFormatError(self, field))
# Check for duplicate parent names.
parent_names_check = set()
for parent_name in self.unqualified_parent_names:
if parent_name in parent_names_check:
self.AddFinding(findings_lib.DuplicateParentError(self, parent_name))
continue
parent_names_check.add(parent_name)
# Check formatting of parent name
if len(parent_name.split('/')) > 2:
self.AddFinding(
findings_lib.UnrecognizedParentFormatError(self, parent_name))
# Enforce that the inherited_fields_expanded field is not set
if self.inherited_fields_expanded:
self.AddFinding(findings_lib.InheritedFieldsSetError(self))
|
from dataclasses import dataclass, field
from typing import List
from collections import defaultdict
import numpy as np
from logzero import logger
from interval import interval
from sklearn.neighbors import KernelDensity
from BITS.plot.plotly import make_line, make_rect, make_hist, make_scatter, make_layout, show_plot
def read_to_ulens(read, min_ulen, max_ulen):
"""Extract units of [`min_ulen`..`max_ulen`] bp length from `read.units`."""
return [unit.length for unit in read.units
if min_ulen <= unit.length <= max_ulen]
def read_to_ulens_in_intvls(read, intvls):
"""Extract units within `intvls` from `read.units`."""
return [unit.length for unit in read.units
if unit.length in intvls]
def stratify_by_covered_length(reads, min_ulen, max_ulen):
"""Split `reads` into `List[reads]` according to the total unit length for every 1000 bp."""
stratified_reads = defaultdict(list) # {total_unit_length_in_kb: reads}
for read in reads:
stratified_reads[sum(read_to_ulens(read, min_ulen, max_ulen)) // 1000].append(read)
return stratified_reads
@dataclass(eq=False)
class TRReadFilter:
"""Class for extracting putative centromeric reads from `List[TRRead]` by using an assumption
that centromeric TR units must be abundant.
Before executing `run()`, It is recomended to find the best parameters by looking at unit length
distribution with `hist_unit_lengths()` via Jupyter Notebook.
Workflow example:
> f = TRReadFilter(your_params)
> f.hist_unit_lengths(tr_reads, x_min, x_max) # `tr_reads` is output of datruf
> # Modify `[min|max]_ulen` and `min_covered_length` here as you like
> f.find_peak_ulens(tr_reads)
> # Modify `band_width`, `min_density`, `deviation` here as you like
> centromere_reads = f.run(tr_reads, show_density=False)
optional arguments:
@ min_ulen <int> [50]
: Peaks of the unit length are detected from the range [`min_ulen`..`max_ulen`].
@ max_ulen <int> [500]
@ min_covered_length <int> [2000]
: Reads not covered more than `min_covered_length` bp by `min_ulen`-`max_ulen` bp units
: are not extracted.
@ band_width <int> [5]
: Parameter for peak detection. Smaller value results in more sensitive to peaks.
@ min_density <float> [0.005]
: Minimum density (= relative frequency) required for peak unit lengths.
@ deviation <float> [0.1]
: Units inside the range [`peak_ulen * (1 - deviation)`..`peak_ulen * (1 + deviation)`]
are extracted.
"""
min_ulen : int = 300
max_ulen : int = 400
min_covered_length : int = 2000
band_width : float = 0.5
min_density : float = 0.01
deviation : float = 0.015
peak_ulens : List[int] = field(init=False, default=None)
peak_intvls : interval = field(init=False, default=None)
def __post_init__(self):
if self.min_ulen < 30:
logger.warn(f"The value `min_ulen={self.min_ulen}` is very small and "
f"the calculation might be stacked.")
def run(self, tr_reads):
# Find peak unit length(s) and interval(s)
self.find_peak_ulens(tr_reads, show_density=False)
# Extract reads using `peak_intvls`
centromere_reads = \
list(filter(lambda read: \
sum(read_to_ulens_in_intvls(read, self.peak_intvls)) >= self.min_covered_length,
tr_reads))
logger.info(f"{len(tr_reads)} TR reads -> {len(centromere_reads)} centromere reads")
return centromere_reads
def hist_unit_lengths(self, tr_reads, x_min, x_max, bin_size=1,
width=None, height=None, x_range=None, y_range=None,
log_scale=False, out_fname=None):
"""Show histogram of the unit length. Counts are stratified by the total length of
`min_ulen`-`max_ulen` bp units in a read. Only [`x_min`..`x_max`] bp units are plotted.
"""
# Stacked histogram of unit length for each list of stratified reads
stratified_reads = stratify_by_covered_length(tr_reads, self.min_ulen, self.max_ulen)
traces = [make_hist([ulen for read in sreads for ulen in read_to_ulens(read, x_min, x_max)],
bin_size=bin_size,
name=f"{covered_kb}-{covered_kb + 1}kb-covered reads")
for covered_kb, sreads in sorted(stratified_reads.items())
if covered_kb >= self.min_covered_length // 1000]
# Show peak intervals if given
shapes = []
if self.peak_intvls is not None:
shapes += [make_rect(start, 0, end, 1,
fill_col="gray", opacity=0.3, yref="paper", layer="above")
for peak_intvl in self.peak_intvls.components
for start, end in peak_intvl]
layout = make_layout(width=width, height=height,
title=(f"Stratified by reads according to the total length of "
f"units of {self.min_ulen}-{self.max_ulen} bp"),
x_title="Unit length [bp]", y_title="Unit count",
x_range=x_range, y_range=y_range,
shapes=shapes)
layout["barmode"] = "stack"
if log_scale:
layout["yaxis_type"] = "log"
show_plot(traces, layout, out_fname=out_fname)
def find_peak_ulens(self, tr_reads, show_density=True):
# Aggregate all unit lengths of [`min_ulen`..`max_ulen`] bp from reads covered
# more than `min_covered_length` bp by such units
all_ulens = []
for read in tr_reads:
ulens = read_to_ulens(read, self.min_ulen, self.max_ulen)
if sum(ulens) >= self.min_covered_length:
all_ulens += ulens
# Smoothe the unit length distribution by kernel density estimation
ulen_dens = self.smooth_distribution(all_ulens)
if show_density:
self.plot_density(ulen_dens)
# Find peak unit length(s)
self.peak_ulens = self.find_peaks(ulen_dens)
logger.info("Peak unit lengths: " + ', '.join([f"{peak_ulen} bp"
for peak_ulen in self.peak_ulens]))
# Compute peak unit length interval(s) from peak unit length(s)
self.peak_intvls = interval(*[[-(- peak_ulen * (1. - self.deviation) // 1),
int(peak_ulen * (1. + self.deviation))]
for peak_ulen in self.peak_ulens])
logger.info("Peak intervals: " + ', '.join([f"{start}-{end} bp"
for peak_intvl in self.peak_intvls.components
for start, end in peak_intvl]))
def smooth_distribution(self, ulens):
"""Smooth the unit length distribution. Filtering of `unit_lens` must be finished in advance."""
return np.exp(KernelDensity(kernel="gaussian", bandwidth=self.band_width)
.fit(np.array(ulens).reshape(-1, 1))
.score_samples(np.arange(self.min_ulen, self.max_ulen + 1).reshape(-1, 1)))
def plot_density(self, dens):
show_plot([make_scatter(np.arange(self.min_ulen, self.max_ulen + 1), dens,
mode="lines", show_legend=False)],
make_layout(x_title="Unit length [bp]", y_title="Density by KDE",
shapes=[make_line(self.min_ulen, self.min_density,
self.max_ulen + 1, self.min_density,
width=2, layer="above")]))
def find_peaks(self, dens):
"""Detect peaks from the density data `dens`, a list of densities."""
return [self.min_ulen + i for i in range(1, self.max_ulen - self.min_ulen)
if dens[i] > dens[i - 1] and dens[i] > dens[i + 1] and dens[i] >= self.min_density]
|
# -*- coding: utf-8 -*-
import math
def func():
"""
定义函数
:return:
"""
print 'x'
def nop():
"""
空函数
:return:
"""
pass
# 函数返回多个值
def move(x, y, step, angle=0):
nx = x + step * math.cos(angle)
ny = y - step * math.sin(angle)
return nx, ny
x, y = move(100, 100, 60, math.pi / 6)
print x, y
# 在返回多个值时实际返回tuple,返回tuple可以省略括号,
# 多个变量也可以接受tuple,按位置赋值给对应的值
|
from tests.integration.test_base import QuickbooksTestCase
from datetime import datetime
from intuitquickbooks.objects.detailline import SalesItemLineDetail, \
DiscountLineDetail, SalesItemLine, DiscountLine
from intuitquickbooks.objects.tax import TxnTaxDetail
from intuitquickbooks.objects.customer import Customer
from intuitquickbooks.objects.base import Address, EmailAddress, CustomerMemo, Ref
from intuitquickbooks.objects.estimate import Estimate
from datetime import datetime
from intuitquickbooks.objects.base import Address, EmailAddress, CustomerMemo, Ref
from intuitquickbooks.objects.customer import Customer
from intuitquickbooks.objects.detailline import SalesItemLineDetail, \
DiscountLineDetail, SalesItemLine, DiscountLine
from intuitquickbooks.objects.estimate import Estimate
from intuitquickbooks.objects.tax import TxnTaxDetail
from tests.integration.test_base import QuickbooksTestCase
class EstimateTest(QuickbooksTestCase):
def test_create(self):
self.customer = Customer.all(max_results=1, qb=self.qb_client)[0]
estimate = Estimate()
estimate.TotalAmt = 31.5
estimate.ApplyTaxAfterDiscount = False
estimate.PrintStatus = "NeedToPrint"
estimate.EmailStatus = "NotSet"
estimate.BillAddr = Address()
estimate.BillAddr.Line1 = "65 Ocean Dr."
estimate.BillAddr.City = "Half Moon Bay"
estimate.BillAddr.CountrySubDivisionCode = "CA"
estimate.BillAddr.PostalCode = "94213"
estimate.BillAddr.Lat = "37.4300318"
estimate.BillAddr.Long = "-122.4336537"
estimate.ShipAddr = Address()
estimate.ShipAddr.Id = "2" + datetime.now().strftime('%d%H%M')
estimate.ShipAddr.Line1 = "65 Ocean Dr."
estimate.ShipAddr.City = "Half Moon Bay"
estimate.ShipAddr.CountrySubDivisionCode = "CA"
estimate.ShipAddr.PostalCode = "94213"
estimate.ShipAddr.Lat = "37.4300318"
estimate.ShipAddr.Long = "-122.4336537"
estimate.BillEmail = EmailAddress()
estimate.BillEmail.Address = "Cool_Cars@intuit.com"
estimate.CustomerMemo = CustomerMemo()
estimate.CustomerMemo.value = "Thank you for your business and have a great day!"
estimate.CustomerRef = Ref()
estimate.CustomerRef.value = self.customer.Id
estimate.CustomerRef.name = self.customer.DisplayName
estimate.TxnTaxDetail = TxnTaxDetail()
estimate.TxnTaxDetail.TotalTax = 0
line = SalesItemLine()
line.LineNum = 1
line.Description = "Pest Control Services"
line.Amount = 35.0
line.SalesItemLineDetail = SalesItemLineDetail()
line.SalesItemLineDetail.UnitPrice = 35
line.SalesItemLineDetail.Qty = 1
item_ref = Ref()
item_ref.value = "10"
item_ref.name = "Pest Control"
line.SalesItemLineDetail.ItemRef = item_ref
tax_code_ref = Ref()
tax_code_ref.value = "NON"
line.SalesItemLineDetail.TaxCodeRef = tax_code_ref
estimate.Line.append(line)
line2 = DiscountLine()
line2.Amount = 3.5
line2.DiscountLineDetail = DiscountLineDetail()
line2.DiscountLineDetail.PercentBased = True
line2.DiscountLineDetail.DiscountPercent = 10
line2.DiscountLineDetail.DiscountAccountRef = Ref()
line2.DiscountLineDetail.DiscountAccountRef.value = "86"
line2.DiscountLineDetail.DiscountAccountRef.name = "Discounts given"
line2.DetailType = "DiscountLineDetail"
estimate.Line.append(line2)
estimate.save(qb=self.qb_client)
query_estimate = Estimate.get(estimate.Id, qb=self.qb_client)
self.assertEqual(query_estimate.Id, estimate.Id)
self.assertEqual(query_estimate.TotalAmt, estimate.TotalAmt)
self.assertEqual(query_estimate.ApplyTaxAfterDiscount, estimate.ApplyTaxAfterDiscount)
self.assertEqual(query_estimate.PrintStatus, estimate.PrintStatus)
self.assertEqual(query_estimate.EmailStatus, estimate.EmailStatus)
self.assertEqual(query_estimate.BillAddr.Line1, estimate.BillAddr.Line1)
self.assertEqual(query_estimate.BillAddr.City, estimate.BillAddr.City)
self.assertEqual(query_estimate.BillAddr.CountrySubDivisionCode,
estimate.BillAddr.CountrySubDivisionCode)
self.assertEqual(query_estimate.BillAddr.PostalCode, estimate.BillAddr.PostalCode)
self.assertEqual(query_estimate.ShipAddr.Line1, estimate.ShipAddr.Line1)
self.assertEqual(query_estimate.ShipAddr.City, estimate.ShipAddr.City)
self.assertEqual(query_estimate.ShipAddr.CountrySubDivisionCode,
estimate.ShipAddr.CountrySubDivisionCode)
self.assertEqual(query_estimate.ShipAddr.PostalCode, estimate.ShipAddr.PostalCode)
self.assertEqual(query_estimate.BillEmail.Address, estimate.BillEmail.Address)
self.assertEqual(query_estimate.CustomerMemo.value, estimate.CustomerMemo.value)
self.assertEqual(query_estimate.CustomerRef.value, estimate.CustomerRef.value)
self.assertEqual(query_estimate.CustomerRef.name, estimate.CustomerRef.name)
self.assertEqual(query_estimate.TxnTaxDetail.TotalTax, estimate.TxnTaxDetail.TotalTax)
self.assertEqual(query_estimate.Line[0].LineNum, estimate.Line[0].LineNum)
self.assertEqual(query_estimate.Line[0].Description, estimate.Line[0].Description)
self.assertEqual(query_estimate.Line[0].Amount, estimate.Line[0].Amount)
self.assertEqual(query_estimate.Line[0].SalesItemLineDetail.UnitPrice,
estimate.Line[0].SalesItemLineDetail.UnitPrice)
self.assertEqual(query_estimate.Line[0].SalesItemLineDetail.Qty,
estimate.Line[0].SalesItemLineDetail.Qty)
self.assertEqual(query_estimate.Line[2].Amount, estimate.Line[1].Amount)
self.assertEqual(query_estimate.Line[2].DiscountLineDetail.PercentBased,
estimate.Line[1].DiscountLineDetail.PercentBased)
self.assertEqual(query_estimate.Line[2].DiscountLineDetail.DiscountPercent,
estimate.Line[1].DiscountLineDetail.DiscountPercent)
self.assertEqual(query_estimate.Line[2].DiscountLineDetail.DiscountAccountRef.value,
estimate.Line[1].DiscountLineDetail.DiscountAccountRef.value)
self.assertEqual(query_estimate.Line[2].DiscountLineDetail.DiscountAccountRef.name,
estimate.Line[1].DiscountLineDetail.DiscountAccountRef.name)
|
# This code is from a pytype bug report
# https://github.com/google/pytype/issues/450
# (I've moved one global binding to the end to make
# it more difficult)
# - pytype has problems with the "i":
# ERROR:pytype.analyze:No visible options for i
# line 12, in test: Name 'x' is not defined [name-error]
# line 12, in test: Name 'i' is not defined [name-error]
#- { @i defines/binding I }
i = 0
def test():
#- { @x ref X }
#- { @#1i ref I }
print(x, i)
#- { @#1i ref I }
while i < 3:
#- { @i ref I }
i += 1
#- { @x defines/binding X }
x = 1
|
from .GumbraiseInstagram. import *
|
#faça um programa que leia o nome completo de uma pessoa,
#mostrando em seguida o primeiro e o último nome separadamente
#Ex.: Ana Maria de Souza
#primeiro=Ana
#último=Souza
nome = str(input('Digite o seu nome: ')).strip()
nomeI = (nome.split())
print('Seu primeiro nome é {}'.format(nomeI[0]))
#print("Seu ultimo nome é {}".format(nomeI[-1])
#eu fiz a linha de cima e o Gustavo Guanabara a linha abaixo
print("O seu sobrenome é {}".format(nomeI[len(nomeI)-1]))
|
# server.py
# Import the Flask class. An instance of this class will be our WSGI application.
from flask import Flask, render_template
# Next we create an instance of this class. The first argument is the name of the application’s module or package.
# If you are using a single module (as in this example), you should use name because depending on if
# it’s started as application or imported as module the name will be different ('main' versus the actual import name).
# This is needed so that Flask knows where to look for templates, static files, and so on.
app = Flask(__name__, static_folder="../static/dist", template_folder="../static")
# We then use the route() decorator to tell Flask what URL should trigger our function.
# The function is given a name which is also used to generate URLs for that particular function,
# and returns the message we want to display in the user’s browser.
@app.route("/")
def index():
return render_template("index.html")
@app.route("/hello")
def hello():
return "Hello World!"
if __name__ == "__main__":
app.run()
|
from __future__ import print_function
from docopt import docopt
import random
from collections import defaultdict
if __name__ == "__main__":
args = docopt("""
Usage:
most_frequent_words.py <n> <file_name>
""")
n = int(args["<n>"])
file_name = args["<file_name>"]
words = defaultdict(int)
with open(file_name) as f:
for line in f:
for word in line.split():
words[word] += 1
top_words = sorted(words.keys(), key=lambda k: words[k], reverse=True)[:n]
for word in top_words:
print(word)
|
from . import cli
from .params import db_options
@cli.command(hidden=True)
@db_options
def repl(engine_uri):
# dynamic includes
import IPython
from traitlets.config import Config
# First create a config object from the traitlets library
c = Config()
c.InteractiveShellApp.extensions = ["autoreload"]
c.InteractiveShellApp.exec_lines = [
'print("\\nBooting import Meltano REPL\\n")',
"from meltano.core.project import Project",
"from meltano.core.plugin import PluginRef, PluginInstall, Plugin",
"from meltano.core.setting import Setting",
"from meltano.core.plugin.settings_service import PluginSettingsService",
"from meltano.core.config_service import ConfigService",
"from meltano.core.db import project_engine",
"from meltano.core.job import Job, State",
"project = Project.find()",
f"_, Session = project_engine(project, engine_uri='{engine_uri}', default=True)",
"session = Session()",
"%autoreload 2",
]
# c.InteractiveShell.colors = 'LightBG'
c.InteractiveShell.confirm_exit = False
c.TerminalIPythonApp.display_banner = True
# Now we start ipython with our configuration
IPython.start_ipython(argv=[], config=c)
|
from functools import partial
import numpy as np
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.metrics import (
accuracy_score,
log_loss,
make_scorer,
mean_squared_error,
)
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.preprocessing import normalize
class BaseSSLScorer:
def __init__(
self,
test_size=0.2,
random_state=42,
apply_normalize=True,
is_multiclass=False,
**kwargs,
):
self.test_size = test_size
self.random_state = random_state
self.model_kwargs = kwargs
self.apply_normalize = apply_normalize
self.is_multiclass = is_multiclass
def _get_model(self, **kwargs):
raise NotImplementedError
def _get_score_function(self):
raise NotImplementedError
def __call__(self, embeddings, target):
if self.apply_normalize:
embeddings = normalize(embeddings)
if self.is_multiclass:
target = np.argmax(target, axis=1)
X_train, X_test, y_train, y_test = train_test_split(
embeddings,
target,
test_size=self.test_size,
random_state=self.random_state,
)
model = self._get_model(**self.model_kwargs)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
metric_fn = self._get_score_function()
score = metric_fn(y_test, y_pred)
return score
class BaseSSLCVScorer:
scorer_kwargs = {}
def __init__(
self,
n_splits=5,
random_state=42,
apply_normalize=True,
is_multiclass=False,
n_jobs=1,
**kwargs,
):
self.n_splits = n_splits
self.random_state = random_state
self.model_kwargs = kwargs
self.apply_normalize = apply_normalize
self.is_multiclass = is_multiclass
self.n_jobs = n_jobs
def _get_model(self, **kwargs):
raise NotImplementedError
def _get_score_function(self):
raise NotImplementedError
def __call__(self, embeddings, target):
if self.apply_normalize:
embeddings = normalize(embeddings)
if self.is_multiclass:
target = np.argmax(target, axis=1)
model = self._get_model(**self.model_kwargs)
metric_fn = self._get_score_function()
scores = cross_val_score(
model,
embeddings,
target,
scoring=make_scorer(metric_fn, **self.scorer_kwargs),
cv=self.n_splits,
n_jobs=self.n_jobs,
)
score = np.mean(scores)
return score
class AccuracyWithLogisticRegressionCV(BaseSSLCVScorer):
def _get_model(self, **kwargs):
return LogisticRegression(**kwargs)
def _get_score_function(self):
return accuracy_score
class AccuracyWithLogisticRegression(BaseSSLScorer):
def _get_model(self, **kwargs):
return LogisticRegression(**kwargs)
def _get_score_function(self):
return accuracy_score
class LogLossWithLogisticRegressionCV(BaseSSLCVScorer):
def _get_model(self, **kwargs):
return LogisticRegression(**kwargs)
def _get_score_function(self):
return log_loss
class LogLossWithLogisticRegression(BaseSSLScorer):
def _get_model(self, **kwargs):
return LogisticRegression(**kwargs)
def _get_score_function(self):
return log_loss
class MSEWithLinearRegressionCV(BaseSSLCVScorer):
def _get_model(self, **kwargs):
return LinearRegression(**kwargs)
def _get_score_function(self):
return mean_squared_error
class MSEWithLinearRegression(BaseSSLScorer):
def _get_model(self, **kwargs):
return LinearRegression(**kwargs)
def _get_score_function(self):
return mean_squared_error
class RMSEWithLinearRegressionCV(BaseSSLCVScorer):
scorer_kwargs = {"squared": False}
def _get_model(self, **kwargs):
return LinearRegression(**kwargs)
def _get_score_function(self):
return mean_squared_error
class RMSEWithLinearRegression(BaseSSLScorer):
def _get_model(self, **kwargs):
return LinearRegression(**kwargs)
def _get_score_function(self):
return partial(mean_squared_error, squared=False)
|
import subprocess
def get_diff(path):
if not path.startswith("/"):
path = "/" + path
result = subprocess.run(
["git", "-C", path, "diff", "--staged"], capture_output=True, encoding="utf8"
)
return result.stdout
|
#!/usr/bin/python
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_mysql_analytics_cluster_actions
short_description: Perform actions on an AnalyticsCluster resource in Oracle Cloud Infrastructure
description:
- Perform actions on an AnalyticsCluster resource in Oracle Cloud Infrastructure
- "For I(action=add), dEPRECATED -- please use HeatWave API instead.
Adds an Analytics Cluster to the DB System."
- "For I(action=restart), dEPRECATED -- please use HeatWave API instead.
Restarts the Analytics Cluster."
- "For I(action=start), dEPRECATED -- please use HeatWave API instead.
Starts the Analytics Cluster."
- "For I(action=stop), dEPRECATED -- please use HeatWave API instead.
Stops the Analytics Cluster."
version_added: "2.9.0"
author: Oracle (@oracle)
options:
db_system_id:
description:
- The DB System L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
type: str
aliases: ["id"]
required: true
shape_name:
description:
- "The shape determines resources to allocate to the Analytics
Cluster nodes - CPU cores, memory."
- Required for I(action=add).
type: str
cluster_size:
description:
- The number of analytics-processing nodes provisioned for the
Analytics Cluster.
- Required for I(action=add).
type: int
action:
description:
- The action to perform on the AnalyticsCluster.
type: str
required: true
choices:
- "add"
- "restart"
- "start"
- "stop"
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Perform action add on analytics_cluster
oci_mysql_analytics_cluster_actions:
# required
db_system_id: "ocid1.dbsystem.oc1..xxxxxxEXAMPLExxxxxx"
shape_name: shape_name_example
cluster_size: 56
action: add
- name: Perform action restart on analytics_cluster
oci_mysql_analytics_cluster_actions:
# required
db_system_id: "ocid1.dbsystem.oc1..xxxxxxEXAMPLExxxxxx"
action: restart
- name: Perform action start on analytics_cluster
oci_mysql_analytics_cluster_actions:
# required
db_system_id: "ocid1.dbsystem.oc1..xxxxxxEXAMPLExxxxxx"
action: start
- name: Perform action stop on analytics_cluster
oci_mysql_analytics_cluster_actions:
# required
db_system_id: "ocid1.dbsystem.oc1..xxxxxxEXAMPLExxxxxx"
action: stop
"""
RETURN = """
analytics_cluster:
description:
- Details of the AnalyticsCluster resource acted upon by the current operation
returned: on success
type: complex
contains:
db_system_id:
description:
- The OCID of the parent DB System this Analytics Cluster is attached to.
returned: on success
type: str
sample: "ocid1.dbsystem.oc1..xxxxxxEXAMPLExxxxxx"
shape_name:
description:
- "The shape determines resources to allocate to the Analytics
Cluster nodes - CPU cores, memory."
returned: on success
type: str
sample: shape_name_example
cluster_size:
description:
- The number of analytics-processing compute instances, of the
specified shape, in the Analytics Cluster.
returned: on success
type: int
sample: 56
cluster_nodes:
description:
- An Analytics Cluster Node is a compute host that is part of an Analytics Cluster.
returned: on success
type: complex
contains:
node_id:
description:
- The ID of the node within MySQL Analytics Cluster.
returned: on success
type: str
sample: "ocid1.node.oc1..xxxxxxEXAMPLExxxxxx"
lifecycle_state:
description:
- The current state of the MySQL Analytics Cluster node.
returned: on success
type: str
sample: CREATING
time_created:
description:
- The date and time the MySQL Analytics Cluster node was created, as described by L(RFC 3339,https://tools.ietf.org/rfc/rfc3339).
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_updated:
description:
- The date and time the MySQL Analytics Cluster node was updated, as described by L(RFC 3339,https://tools.ietf.org/rfc/rfc3339).
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
lifecycle_state:
description:
- The current state of the Analytics Cluster.
returned: on success
type: str
sample: CREATING
lifecycle_details:
description:
- Additional information about the current lifecycleState.
returned: on success
type: str
sample: lifecycle_details_example
time_created:
description:
- The date and time the Analytics Cluster was created, as described by L(RFC 3339,https://tools.ietf.org/rfc/rfc3339).
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_updated:
description:
- The time the Analytics Cluster was last updated, as described by L(RFC 3339,https://tools.ietf.org/rfc/rfc3339).
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
sample: {
"db_system_id": "ocid1.dbsystem.oc1..xxxxxxEXAMPLExxxxxx",
"shape_name": "shape_name_example",
"cluster_size": 56,
"cluster_nodes": [{
"node_id": "ocid1.node.oc1..xxxxxxEXAMPLExxxxxx",
"lifecycle_state": "CREATING",
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00"
}],
"lifecycle_state": "CREATING",
"lifecycle_details": "lifecycle_details_example",
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
oci_config_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIActionsHelperBase,
get_custom_class,
)
try:
from oci.mysql import WorkRequestsClient
from oci.mysql import DbSystemClient
from oci.mysql.models import AddAnalyticsClusterDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class MysqlAnalyticsClusterActionsHelperGen(OCIActionsHelperBase):
"""
Supported actions:
add
restart
start
stop
"""
def get_waiter_client(self):
return oci_config_utils.create_service_client(self.module, WorkRequestsClient)
@staticmethod
def get_module_resource_id_param():
return "db_system_id"
def get_module_resource_id(self):
return self.module.params.get("db_system_id")
def get_get_fn(self):
return self.client.get_analytics_cluster
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_analytics_cluster,
db_system_id=self.module.params.get("db_system_id"),
)
def add(self):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, AddAnalyticsClusterDetails
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.add_analytics_cluster,
call_fn_args=(),
call_fn_kwargs=dict(
db_system_id=self.module.params.get("db_system_id"),
add_analytics_cluster_details=action_details,
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def restart(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.restart_analytics_cluster,
call_fn_args=(),
call_fn_kwargs=dict(db_system_id=self.module.params.get("db_system_id"),),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def start(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.start_analytics_cluster,
call_fn_args=(),
call_fn_kwargs=dict(db_system_id=self.module.params.get("db_system_id"),),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def stop(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.stop_analytics_cluster,
call_fn_args=(),
call_fn_kwargs=dict(db_system_id=self.module.params.get("db_system_id"),),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
MysqlAnalyticsClusterActionsHelperCustom = get_custom_class(
"MysqlAnalyticsClusterActionsHelperCustom"
)
class ResourceHelper(
MysqlAnalyticsClusterActionsHelperCustom, MysqlAnalyticsClusterActionsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=True
)
module_args.update(
dict(
db_system_id=dict(aliases=["id"], type="str", required=True),
shape_name=dict(type="str"),
cluster_size=dict(type="int"),
action=dict(
type="str", required=True, choices=["add", "restart", "start", "stop"]
),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="analytics_cluster",
service_client_class=DbSystemClient,
namespace="mysql",
)
result = resource_helper.perform_action(module.params.get("action"))
module.exit_json(**result)
if __name__ == "__main__":
main()
|
# 약수의 개수와 덧셈
def DivisorCount(n):
i, cnt = 1,0
divisor = []
while i * i <= n:
if n % i == 0:
divisor.append(i)
if i * i != n:
divisor.append(n//i)
cnt += 1
cnt += 1
i += 1
return n if cnt % 2 == 0 else -n
def solution(left, right):
answer = 0
for i in range(left, right + 1):
answer += DivisorCount(i)
return answer
'''
테스트 1 〉 통과 (2.82ms, 10.3MB)
테스트 2 〉 통과 (0.65ms, 10.2MB)
테스트 3 〉 통과 (1.20ms, 10.1MB)
테스트 4 〉 통과 (0.26ms, 10.2MB)
테스트 5 〉 통과 (2.71ms, 10.2MB)
테스트 6 〉 통과 (0.28ms, 10.2MB)
테스트 7 〉 통과 (0.17ms, 10.2MB)
'''
|
# Copyright 2019 Southwest Research Institute
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import asyncio
from asyncio import Event
from logging import Logger
import asyncssh
import traceback
from typing import Optional, cast
from launch import SomeActionsType, EventHandler
from launch.actions import OpaqueFunction
from launch.event_handlers import OnShutdown
from launch.events.process import ProcessStarted, ProcessExited, ShutdownProcess, \
SignalProcess
from launch.launch_context import LaunchContext
from launch.machine import Machine
from launch.some_substitutions_type import SomeSubstitutionsType
from launch.substitution import Substitution
from launch.utilities import is_a_subclass
import launch
class SshClientSession(asyncssh.SSHClientSession):
"""
Factory for generating SSH client sessions
"""
def __init__(self, logger: Logger, context: LaunchContext, process_event_args=None):
self.__logger = logger
self.__context = context
self.__process_event_args = process_event_args
def connection_made(self, chan):
self.__logger.debug("connection_made")
def data_received(self, data, datatype):
# Probably should emit this data via an event for the launch system
self.__logger.info("data_received: %s" % str(data))
def connection_lost(self, exc):
self.__logger.debug("connection_lost: %s" % exc)
class SshMachine(Machine):
"""Describes a machine for remotely launching ROS nodes."""
def __init__(self, *,
hostname: SomeSubstitutionsType,
env: Optional[SomeSubstitutionsType] = None,
**kwargs) -> None:
"""Initialize a machine description."""
super().__init__(**kwargs)
self.__hostname = hostname
self.__env = env
self.__conn = None
self.__chan = None
self.__logger = None
self.__first_run = True
self.__connection_ready = asyncio.Event()
@property
def hostname(self) -> Substitution:
return self.__hostname
@property
def env(self):
return self.__env
def __on_signal_process_event(self, event: Event, context: LaunchContext):
if self.__chan:
typed_event = cast(SignalProcess, context.locals.event)
self.__logger.info("signals don't work on OpenSSH < 7.9")
self.__chan.signal(typed_event.signal_name)
def __on_shutdown(self, event: Event, context: LaunchContext) -> Optional[SomeActionsType]:
try:
if self.__chan:
self.__logger.debug("Killing all jobs")
self.__chan.write('kill $(jobs -p)')
self.__chan.write_eof()
self.__chan.close()
self.__logger.debug("Closing SSH connection")
self.__conn.close()
except Exception:
self.__logger.error("Exception when shutting down channel: %s" % traceback.format_exc())
async def execute_process(self,
process_event_args: None,
log_cmd: False,
emulate_tty: False,
shell: False,
cleanup_fn: lambda: False,
context: LaunchContext) -> None:
if process_event_args is None:
raise RuntimeError('process_event_args unexpectedly None')
cmd = process_event_args['cmd']
cwd = process_event_args['cwd']
env = process_event_args['env']
if not self.__logger:
# The first time this method is called, set up a logger and
# event handlers for it.
self.__logger = launch.logging.get_logger(process_event_args['name'])
event_handlers = [
EventHandler(
matcher=lambda event: is_a_subclass(event, ShutdownProcess),
entities=OpaqueFunction(function=self.__on_shutdown),
),
EventHandler(
matcher=lambda event: is_a_subclass(event, SignalProcess),
entities=OpaqueFunction(function=self.__on_signal_process_event),
),
OnShutdown(on_shutdown=self.__on_shutdown)
]
self.__logger.debug("Registering event handlers")
for handler in event_handlers:
context.register_event_handler(handler)
if log_cmd:
self.__logger.info("process details: cmd=[{}], cwd='{}', custom_env?={}".format(
', '.join(cmd), cwd, 'True' if env is not None else 'False'
))
self.__logger.debug("Executing process")
process_event_args['pid'] = 0
await context.emit_event(ProcessStarted(**process_event_args))
try:
if self.__first_run:
# The first time this method runs, create an SSH connection
# and initialize the environment.
self.__first_run = False
def create_session():
return SshClientSession(self.__logger, context, process_event_args)
self.__conn = await asyncssh.connect(self.__hostname)
self.__chan, session = await self.__conn.create_session(
create_session,
encoding='utf8')
if self.__env:
self.__chan.write(self.__env)
self.__chan.write('\n')
self.__connection_ready.set()
# Every other time this method is called, we need to wait until
# the environment is ready.
await self.__connection_ready.wait()
if self.__chan:
# Run the command and put it in the background, then wait until
# the SSH channel closes
self.__chan.write(' '.join(cmd) + ' &\n')
self.__logger.debug("Waiting for SSH channel to close")
await self.__chan.wait_closed()
await context.emit_event(ProcessExited(
returncode=self.__chan.get_exit_status(),
**process_event_args))
self.__logger.debug("SSH connection exiting")
else:
self.__logger.error("SSH channel wasn't ready")
except Exception:
self.__logger.error('exception occurred while executing process:\n{}'.format(
traceback.format_exc()
))
finally:
cleanup_fn()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from nlpcda.tools.Basetool import Basetool
from nlpcda.config import similarword_path
class Similarword(Basetool):
'''
近义词,用于大致不改变原文下,【词级别的】,增强数据
'''
def __init__(self, base_file=similarword_path, create_num=5, change_rate=0.05, seed=1):
super(Similarword, self).__init__(base_file, create_num, change_rate, seed)
def load_paser_base_file(self):
combine_dict = {}
for line in open(self.base_file, "r", encoding='utf-8'):
seperate_word = line.strip().split(" ")
num = len(seperate_word)
for i in range(1, num):
wi = seperate_word[i]
# add to user dict
if len(wi) > 1: self.add_word(wi)
combine_dict[wi] = seperate_word[1:]
print('load :%s done' % (self.base_file))
return combine_dict
def replace(self, replace_str:str):
replace_str = replace_str.replace('\n', '').strip()
seg_list = self.jieba.cut(replace_str, cut_all=False)
words = list(seg_list)
sentences = [replace_str]
t = 0
while len(sentences) < self.create_num:
t += 1
a_sentence = ''
for word in words:
a_sentence += self.s1(word)
if a_sentence not in sentences:
sentences.append(a_sentence)
if t > self.create_num * self.loop_t / self.change_rate:
break
return sentences
def s1(self, word:str):
# 替换所有在combine_dict中的
if len(word) == 1: return word
if word in self.base_file_mapobj and self.random.random() < self.change_rate:
wi = self.random.randint(0, len(self.base_file_mapobj[word]) - 1)
place = self.base_file_mapobj[word][wi]
return place
else:
return word
def test(test_str, create_num=10, change_rate=0.3):
smw = Similarword(create_num=create_num, change_rate=change_rate)
try:
return smw.replace(test_str)
except:
print('error in Similarword.replace')
return [test_str]
if __name__ == '__main__':
ts = '''这个天大药业很不错啊,测试测试。这是一场疫情防控的人民战争、总体战、阻击战,习近平总书记亲自指挥、亲自部署。运筹帷幄 指挥若定始终把人民群众生命安全和身体健康放在第一位'''
rs = test(ts)
for s in rs:
print(s)
|
import discord
from ..Bots import Global
from ..Lib import Querier
from discord.ext.tasks import loop
from discord.ext.commands import Cog
class Monitoring(Cog):
def __init__(self, bot: discord.ext.commands.Bot):
self.bot = bot
self.querier = Querier()
@loop(minutes=Global._time)
async def monitorings(self):
try:
res = await self.querier.execute_post_query(
f"https://api.server-discord.com/v2/bots/{self.bot.user.id}/stats",
headers={"Authorization": f"{Global.SDC_token}"},
data={"shards": self.bot.shard_count or 1, "servers": len(self.bot.guilds)}
)
print(f"SDC Status updated: {await res.json()}")
except Exception as error:
print(error)
def setup(bot):
bot.add_cog(Monitoring(bot))
|
from .const import *
screen_start = f"""
WELCOME TO THE WASTELAND
(1) - START
(2) - LOAD
(3) - HELP
(4) - CHANGE LOG
(5) - QUITE
ENTER A NUMBER TO CONTINUE
"""
screen_class = """
CHOOSE YOUR CLASS
(1) - MAGE
(2) - WARRIOR
(3) - ARCHER
(4) - ASSASSIN
ENTER A NUMBER TO CONTINUE
"""
screen_mage = """
MAGE - MASTER OF THE MAGICAL ARTS
MAGES ARE WEAK IN PHYSICAL COMBAT,
BUT THEY DRAW ON THEIR POOL OF MANA
TO CAST POWERFUL SPELLS.
ARE YOU SURE YOU WANT TO BE A MAGE?
ENTER "Y" OR "N"
"""
|
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Jun 17 2015)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
import wx.grid
from models.Product import *
from models.Unit import *
from models.ProductType import *
###########################################################################
## Class MyPanel4
###########################################################################
class ProductPanel ( wx.Panel ):
def __init__( self, parent ):
wx.Panel.__init__ ( self, parent, id = wx.ID_ANY, pos = wx.DefaultPosition, size = wx.Size( 992,462 ), style = wx.TAB_TRAVERSAL )
bSizer6 = wx.BoxSizer( wx.VERTICAL )
gbSizer5 = wx.GridBagSizer( 0, 0 )
gbSizer5.SetFlexibleDirection( wx.BOTH )
gbSizer5.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.m_staticText12 = wx.StaticText( self, wx.ID_ANY, u"Product type:*", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText12.Wrap( -1 )
gbSizer5.Add( self.m_staticText12, wx.GBPosition( 0, 0 ), wx.GBSpan( 1, 1 ), wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
m_choiceproduct_type_idChoices = self.GetData(2)
self.m_choiceproduct_type_id = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choiceproduct_type_idChoices, 0 )
self.m_choiceproduct_type_id.SetSelection( 0 )
gbSizer5.Add( self.m_choiceproduct_type_id, wx.GBPosition( 0, 1 ), wx.GBSpan( 1, 3 ), wx.ALL|wx.EXPAND, 5 )
self.m_staticText10 = wx.StaticText( self, wx.ID_ANY, u"Unit:*", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText10.Wrap( -1 )
gbSizer5.Add( self.m_staticText10, wx.GBPosition( 0, 4 ), wx.GBSpan( 1, 1 ), wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
m_choiceunit_idChoices = self.GetData(1)
self.m_choiceunit_id = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, m_choiceunit_idChoices, 0, )
self.m_choiceunit_id.SetSelection( 0 )
gbSizer5.Add( self.m_choiceunit_id, wx.GBPosition( 0, 5 ), wx.GBSpan( 1, 3 ), wx.ALL|wx.EXPAND, 5 )
self.m_staticText13 = wx.StaticText( self, wx.ID_ANY, u"Product code:*", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText13.Wrap( -1 )
gbSizer5.Add( self.m_staticText13, wx.GBPosition( 1, 0 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_textMA = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 250,-1 ), 0 )
gbSizer5.Add( self.m_textMA, wx.GBPosition( 1, 1 ), wx.GBSpan( 1, 3 ), wx.ALL|wx.EXPAND, 5 )
self.m_staticText11 = wx.StaticText( self, wx.ID_ANY, u"Product name:*", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText11.Wrap( -1 )
gbSizer5.Add( self.m_staticText11, wx.GBPosition( 1, 4 ), wx.GBSpan( 1, 1 ), wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_textTEN = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
gbSizer5.Add( self.m_textTEN, wx.GBPosition( 1, 5 ), wx.GBSpan( 1, 3 ), wx.ALL|wx.EXPAND, 5 )
self.m_staticText111 = wx.StaticText( self, wx.ID_ANY, u"Tax:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText111.Wrap( -1 )
gbSizer5.Add( self.m_staticText111, wx.GBPosition( 2, 0 ), wx.GBSpan( 1, 1 ), wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_texttax = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
gbSizer5.Add( self.m_texttax, wx.GBPosition( 2, 1 ), wx.GBSpan( 1, 1 ), wx.ALL|wx.EXPAND, 5 )
self.m_staticText20 = wx.StaticText( self, wx.ID_ANY, u"Amount: ", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText20.Wrap( -1 )
gbSizer5.Add( self.m_staticText20, wx.GBPosition( 2, 2 ), wx.GBSpan( 1, 1 ), wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_textamount = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
gbSizer5.Add( self.m_textamount, wx.GBPosition( 2, 3 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_staticText122 = wx.StaticText( self, wx.ID_ANY, u"Cost: ", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText122.Wrap( -1 )
gbSizer5.Add( self.m_staticText122, wx.GBPosition( 2, 4 ), wx.GBSpan( 1, 1 ), wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_textcost = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
gbSizer5.Add( self.m_textcost, wx.GBPosition( 2, 5 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_staticText131 = wx.StaticText( self, wx.ID_ANY, u"Price: ", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText131.Wrap( -1 )
gbSizer5.Add( self.m_staticText131, wx.GBPosition( 2, 6 ), wx.GBSpan( 1, 1 ), wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_textprice = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
gbSizer5.Add( self.m_textprice, wx.GBPosition( 2, 7 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.m_staticText14 = wx.StaticText( self, wx.ID_ANY, u"Made by: ", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText14.Wrap( -1 )
gbSizer5.Add( self.m_staticText14, wx.GBPosition( 3, 0 ), wx.GBSpan( 1, 1 ), wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_textmade_in = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
gbSizer5.Add( self.m_textmade_in, wx.GBPosition( 3, 1 ), wx.GBSpan( 1, 7 ), wx.ALL|wx.EXPAND, 5 )
self.m_staticText121 = wx.StaticText( self, wx.ID_ANY, u"Description:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText121.Wrap( -1 )
gbSizer5.Add( self.m_staticText121, wx.GBPosition( 4, 0 ), wx.GBSpan( 1, 1 ), wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_textdescription = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 450,-1 ), 0 )
gbSizer5.Add( self.m_textdescription, wx.GBPosition( 4, 1 ), wx.GBSpan( 1, 7 ), wx.ALL|wx.EXPAND, 5 )
bSizer6.Add( gbSizer5, 1, wx.EXPAND, 5 )
bSizer9 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticTRONG = wx.StaticText( self, wx.ID_ANY, u" ", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticTRONG.Wrap( -1 )
bSizer9.Add( self.m_staticTRONG, 0, wx.ALL, 5 )
self.m_btnThem = wx.Button( self, wx.ID_ANY, u"Add record", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer9.Add( self.m_btnThem, 0, wx.ALL, 5 )
self.m_btnXoa = wx.Button( self, wx.ID_ANY, u"Delete", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer9.Add( self.m_btnXoa, 0, wx.ALL, 5 )
self.m_btnSua = wx.Button( self, wx.ID_ANY, u"Update", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer9.Add( self.m_btnSua, 0, wx.ALL, 5 )
self.m_btnDong = wx.Button( self, wx.ID_ANY, u"Close", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer9.Add( self.m_btnDong, 0, wx.ALL, 5 )
bSizer6.Add( bSizer9, 1, wx.EXPAND, 5 )
bSizer10 = wx.BoxSizer( wx.VERTICAL )
self.m_gridSource = wx.grid.Grid( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( -1,250 ), 0 )
# Grid
self.m_gridSource.CreateGrid(0, 11 )
self.m_gridSource.EnableEditing( True )
self.m_gridSource.EnableGridLines( True )
self.m_gridSource.EnableDragGridSize( False )
self.m_gridSource.SetMargins( 0, 0 )
# Columns
self.m_gridSource.SetColSize( 0, 67 )
self.m_gridSource.SetColSize( 1, 155 )
self.m_gridSource.SetColSize( 2, 80 )
self.m_gridSource.SetColSize( 3, 69 )
self.m_gridSource.SetColSize( 4, 82 )
self.m_gridSource.SetColSize( 5, 80 )
self.m_gridSource.SetColSize( 6, 80 )
self.m_gridSource.SetColSize( 7, 80 )
self.m_gridSource.SetColSize( 8, 80 )
self.m_gridSource.SetColSize( 9, 150 )
self.m_gridSource.SetColSize( 10, 150 )
self.m_gridSource.EnableDragColMove( False )
self.m_gridSource.EnableDragColSize( True )
self.m_gridSource.SetColLabelSize( 35 )
self.m_gridSource.SetColLabelAlignment( wx.ALIGN_CENTRE, wx.ALIGN_CENTRE )
# Rows
self.m_gridSource.EnableDragRowSize( True )
self.m_gridSource.SetRowLabelSize( 40 )
self.m_gridSource.SetRowLabelAlignment( wx.ALIGN_CENTRE, wx.ALIGN_CENTRE )
# Label Appearance
# Cell Defaults
self.m_gridSource.SetDefaultCellAlignment( wx.ALIGN_LEFT, wx.ALIGN_TOP )
bSizer10.Add( self.m_gridSource, 0, wx.ALL|wx.EXPAND, 5 )
bSizer6.Add( bSizer10, 1, wx.EXPAND, 5 )
self.SetSizer( bSizer6 )
self.Layout()
# Connect Events
self.m_btnThem.Bind( wx.EVT_BUTTON, self.m_btnThemOnButtonClick )
self.m_btnXoa.Bind( wx.EVT_BUTTON, self.m_btnXoaOnButtonClick )
self.m_btnSua.Bind( wx.EVT_BUTTON, self.m_btnSuaOnButtonClick )
self.m_btnDong.Bind( wx.EVT_BUTTON, self.m_btnDongOnButtonClick )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def m_btnThemOnButtonClick( self, event):
kq = 0
if self.CheckValid()==False:
kq = self.InsertFromGrid()
if(kq == 0):
wx.MessageBox("Please fill all required fields","Messages",wx.OK|wx.ICON_WARNING)
self.m_textMA.SetFocus()
return
else :
Ma = self.m_textMA.GetValue()
Ten = self.m_textTEN.GetValue()
idDVT = self.m_choiceunit_id.GetString(self.m_choiceunit_id.GetSelection())
idloaihh = self.m_choiceproduct_type_id.GetString(self.m_choiceproduct_type_id.GetSelection())
amount = self.m_textamount.GetValue()
unit_pricevon = self.m_textcost.GetValue()
unit_priceban = self.m_textprice.GetValue()
tax = self.m_texttax.GetValue()
made_in = self.m_textmade_in.GetValue()
description = self.m_textdescription.GetValue()
self.m_gridSource.AppendRows(1)
xldb = ProductModel()
kq = xldb.Insert(Ma,Ten, idDVT,idloaihh,amount,unit_pricevon,unit_priceban,tax,made_in, description)
if kq>0:
self.OnLoadData()
wx.MessageBox("Record added successfully","Messages",wx.OK|wx.ICON_INFORMATION)
self.m_textMA.SetValue("")
self.m_textTEN.SetValue("")
self.m_texttax.SetValue("")
self.m_textdescription.SetValue("")
self.m_textcost.SetValue("")
self.m_textamount.SetValue("")
self.m_textprice.SetValue("")
self.m_textmade_in.SetValue("")
self.m_choiceunit_id.SetLabelText("")
self.m_choiceproduct_type_id.SetLabelText("")
else:
wx.MessageBox("Error when trying add record","Messages",wx.OK|wx.ICON_WARNING)
def m_btnXoaOnButtonClick( self, event ):
try:
cell_value = []
row_index = self.m_gridSource.GetSelectedRows()[0]
for i in range(0,3):
cell_value.append(self.m_gridSource.GetCellValue(row_index,i))
xldb = ProductModel()
Id = str(cell_value[0])
kq = xldb.Delete(Id)
if kq>0:
self.OnLoadData()
else:
wx.MessageBox("Error when trying delete this record","Messages",wx.OK|wx.ICON_WARNING)
except :
wx.MessageBox("Please select record to delete")
def m_btnSuaOnButtonClick( self, event ):
try:
cell_value = []
row_index = self.m_gridSource.GetSelectedRows()[0]
for i in range(0,11):
cell_value.append(self.m_gridSource.GetCellValue(row_index,i))
Id = str(cell_value[0])
Ma = str(cell_value[1])
Ten = str(cell_value[2])
idDVT = str(cell_value[3])
idloaihh = str(cell_value[4])
amount = str(cell_value[5])
unit_pricevon = str(cell_value[6])
unit_priceban = str(cell_value[7])
tax = str(cell_value[8])
made_in = str(cell_value[9])
description = str(cell_value[10])
xldb = ProductModel()
kq = xldb.Update(Id,Ma,Ten,idDVT,idloaihh,amount,unit_pricevon,unit_priceban,tax,made_in,description)
if kq>0:
self.OnLoadData()
wx.MessageBox("Record added successfully","Messages",wx.OK|wx.ICON_INFORMATION)
else:
wx.MessageBox("Error when trying add record","Messages",wx.OK|wx.ICON_WARNING)
except :
wx.MessageBox("Please select your record to update")
def m_btnDongOnButtonClick( self, event ):
frame = self.GetParent()
frame.Close()
def InsertFromGrid(self):
try:
cell_value = []
row_index = self.m_gridSource.GetSelectedRows()[0]
for i in range(0,11):
cell_value.append(self.m_gridSource.GetCellValue(row_index,i))
xldb = ProductModel()
Id = str(cell_value[0])
Ma = str(cell_value[1])
Ten = str(cell_value[2])
idDVT = str(cell_value[3])
idloaihh = str(cell_value[4])
amount = str(cell_value[5])
unit_pricevon = str(cell_value[6])
unit_priceban = str(cell_value[7])
tax = str(cell_value[8])
made_in = str(cell_value[9])
description = str(cell_value[10])
if Ma == "":
return 0
self.m_gridSource.AppendRows(1)
kq = xldb.Insert(Ma,Ten,idDVT,idloaihh,amount,unit_pricevon,unit_priceban,tax,made_in,description)
if kq>0:
return 1
else:
return 0
except :
return 0
def GetCountRow(self):
count = 1
xldb = ProductModel()
kq = xldb.DanhSach()
if kq!=None:
count = len(kq)
return count
def CheckValid(self):
Ma = self.m_textMA.GetValue().strip()
Ten = self.m_textTEN.GetValue().strip()
valid = True
if len(Ma)==0:
valid = False
return valid
def GetData(self,loai):
if loai == 1:
xldb = Unit()
dsAll = xldb.DanhSach()
else:
xldb = ProductTypeModel()
dsAll = xldb.DanhSach()
celldata = []
if dsAll!=None:
for i in range (0,len(dsAll)):
cell = dsAll[i]
if loai == 1 and str(cell['unit_name'])!="":
celldata.append(str(cell['unit_name']))
elif loai == 2 and str(cell['product_type_name'])!="":
celldata.append(str(cell['product_type_name']))
return celldata
def InitData(self):
self.m_gridSource.SetColLabelValue(0, "Id")
self.m_gridSource.SetColLabelValue(1, "Product code")
self.m_gridSource.SetColLabelValue(2, "Product name")
self.m_gridSource.SetColLabelValue(3, "Unit")
self.m_gridSource.SetColLabelValue(4, "Product type")
self.m_gridSource.SetColLabelValue(5, "Amount")
self.m_gridSource.SetColLabelValue(6, "Cost")
self.m_gridSource.SetColLabelValue(7, "Price")
self.m_gridSource.SetColLabelValue(8, "Tax")
self.m_gridSource.SetColLabelValue(9, "Made by")
self.m_gridSource.SetColLabelValue(10, "Description")
row = self.GetCountRow()+1
self.m_gridSource.AppendRows(row)
self.OnLoadData()
def OnLoadData(self):
xldb = ProductModel()
dsAll = xldb.DanhSach()
if dsAll!=None:
self.m_gridSource.ClearGrid()
for i in range (0,len(dsAll)):
cell = dsAll[i]
self.m_gridSource.SetCellValue(i,0,str(cell['ID']))
self.m_gridSource.SetCellValue(i,1,str(cell['product_code']))
self.m_gridSource.SetCellValue(i,2,str(cell['product_name']))
self.m_gridSource.SetCellValue(i,3,str(cell['unit_id']))
self.m_gridSource.SetCellValue(i,4,str(cell['product_type_id']))
self.m_gridSource.SetCellValue(i,5,str(cell['amount']))
self.m_gridSource.SetCellValue(i,6,str(cell['cost']))
self.m_gridSource.SetCellValue(i,7,str(cell['price']))
self.m_gridSource.SetCellValue(i,8,str(cell['tax']))
self.m_gridSource.SetCellValue(i,9,str(cell['made_in']))
self.m_gridSource.SetCellValue(i,10,str(cell['description']))
return
|
import torch
import matplotlib.pyplot as plt
from examples.test_cases import test_case_factory
from thermodynamicestimators.estimators.tram import TRAM
from thermodynamicestimators.data_sets.infinite_dataloader import InfiniteDataLoader
"""
main.py
Uses MBAR to estimate free energies of the 1D double well.
"""
def main():
""" Generate a test problem and estimate the free energies using TRAM.
The input data consists of:
N_k : torch.Tensor of shape (S)
the total number of samples taken at each thermodynamic state,
discretized_trajectories : torch.Tensor
Tensor of shape (S, N_k) containing the discretized sample positions for
each trajectory (S trajectories with lenght N_k for traj. i)
bias_energies : torch.Tensor
Tensor of shape (S,N_k,S) for each state i the bias energies of each sample
in the trajectory of length N_k, evaluated at every thermodynamic state.
"""
n_markov_states, discretized_trajectories, bias_energies, transitions = test_case_factory.make_tram_test_case(
'double_well_1D')
dataloader = InfiniteDataLoader(transitions, batch_size=256, shuffle=True)
estimator = TRAM(n_markov_states, discretized_trajectories, bias_energies)
optimizer = torch.optim.SGD(estimator.parameters(), lr=1)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.95, patience=10 * len(dataloader),
verbose=True)
# estimate the free energies
estimator.estimate(dataloader, optimizer, [scheduler], tolerance=0)
# plot the estimate
plt.title('Estimated free energies')
plt.plot(estimator.free_energies_per_therm_state, label=r'Stochastic TRAM')
plt.ylabel(r'$f$')
plt.legend()
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
|
# -*- encoding: utf-8 -*-
from django.shortcuts import render, get_object_or_404
from django.conf import settings
from django.contrib.auth.decorators import login_required
from models import ItemAgenda
from forms import FormItemAgenda
@login_required
def lista(request):
# Recebe records da tabela ItemAgenda
lista_itens = ItemAgenda.objects.filter(usuario=request.user)
# Nome do template, dict com elementos que serao repassados ao template
return render(request, 'lista.html', {
'lista_itens': lista_itens,
})
@login_required
def adiciona(request):
if request.method == 'POST':
form = FormItemAgenda(request.POST, request.FILES)
if form.is_valid():
item = form.save(commit=False)
item.usuario = request.user
item.save()
return render(request, 'salvo.html', {'form': form})
else:
form = FormItemAgenda()
return render(request, 'adiciona.html', {'form': form})
@login_required
def remove(request, id_item):
# pk = primary key
item = get_object_or_404(ItemAgenda, pk=id_item, usuario=request.user)
if request.method == 'POST':
item.delete()
return render(request, 'removido.html', {})
return render(request, 'remove.html', {'item': item})
@login_required
def item(request, id_item):
# pk = primary key
item = get_object_or_404(ItemAgenda, pk=id_item, usuario=request.user)
if request.method == 'POST':
form = FormItemAgenda(request.POST, request.FILES, instance=item)
if form.is_valid():
item = form.save(commit=False)
item.usuario = request.user
item.save()
return render(request, 'salvo.html', {})
else:
form = FormItemAgenda(instance=item)
return render(request, 'item.html', {'form': form})
|
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
class Meta:
verbose_name = "User"
verbose_name_plural = "Users"
|
import math
import struct
# Python 2 compat
try:
int_types = (int, long,)
byte_iter = bytearray
except NameError:
int_types = (int,)
byte_iter = lambda x: x
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from fitparse.utils import FitParseError
DEV_TYPES = {}
class RecordBase(object):
# namedtuple-like base class. Subclasses should must __slots__
__slots__ = ()
# TODO: switch back to namedtuple, and don't use default arguments as None
# and see if that gives us any performance improvements
def __init__(self, *args, **kwargs):
for slot_name, value in zip_longest(self.__slots__, args, fillvalue=None):
setattr(self, slot_name, value)
for slot_name, value in kwargs.items():
setattr(self, slot_name, value)
class MessageHeader(RecordBase):
__slots__ = ('is_definition', 'is_developer_data', 'local_mesg_num', 'time_offset')
def __repr__(self):
return '<MessageHeader: %s%s -- local mesg: #%d%s>' % (
'definition' if self.is_definition else 'data',
'(developer)' if self.is_developer_data else '',
self.local_mesg_num,
', time offset: %d' % self.time_offset if self.time_offset else '',
)
class DefinitionMessage(RecordBase):
__slots__ = ('header', 'endian', 'mesg_type', 'mesg_num', 'field_defs', 'dev_field_defs')
type = 'definition'
@property
def name(self):
return self.mesg_type.name if self.mesg_type else 'unknown_%d' % self.mesg_num
def __repr__(self):
return '<DefinitionMessage: %s (#%d) -- local mesg: #%d, field defs: [%s], dev field defs: [%s]>' % (
self.name,
self.mesg_num,
self.header.local_mesg_num,
', '.join([fd.name for fd in self.field_defs]),
', '.join([fd.name for fd in self.dev_field_defs]),
)
class FieldDefinition(RecordBase):
__slots__ = ('field', 'def_num', 'base_type', 'size')
@property
def name(self):
return self.field.name if self.field else 'unknown_%d' % self.def_num
@property
def type(self):
return self.field.type if self.field else self.base_type
def __repr__(self):
return '<FieldDefinition: %s (#%d) -- type: %s (%s), size: %d byte%s>' % (
self.name,
self.def_num,
self.type.name, self.base_type.name,
self.size, 's' if self.size != 1 else '',
)
class DevFieldDefinition(RecordBase):
__slots__ = ('field', 'dev_data_index', 'base_type', 'def_num', 'size')
def __init__(self, **kwargs):
super(DevFieldDefinition, self).__init__(**kwargs)
# For dev fields, the base_type and type are always the same.
self.base_type = self.type
@property
def name(self):
return self.field.name if self.field else 'unknown_dev_%d_%d' % (self.dev_data_index, self.def_num)
@property
def type(self):
return self.field.type
def __repr__(self):
return '<DevFieldDefinition: %s:%s (#%d) -- type: %s, size: %d byte%s>' % (
self.name,
self.dev_data_index,
self.def_num,
self.type.name,
self.size, 's' if self.size != 1 else '',
)
class DataMessage(RecordBase):
__slots__ = ('header', 'def_mesg', 'fields')
type = 'data'
def get(self, field_name, as_dict=False):
# SIMPLIFY: get rid of as_dict
for field_data in self.fields:
if field_data.is_named(field_name):
return field_data.as_dict() if as_dict else field_data
def get_value(self, field_name):
# SIMPLIFY: get rid of this completely
field_data = self.get(field_name)
if field_data:
return field_data.value
def get_values(self):
# SIMPLIFY: get rid of this completely
return dict((f.name if f.name else f.def_num, f.value) for f in self.fields)
@property
def name(self):
return self.def_mesg.name
@property
def mesg_num(self):
# SIMPLIFY: get rid of this
return self.def_mesg.mesg_num
@property
def mesg_type(self):
# SIMPLIFY: get rid of this
return self.def_mesg.mesg_type
def as_dict(self):
# TODO: rethink this format
return {
'name': self.name,
'fields': [f.as_dict() for f in self.fields],
}
def __iter__(self):
# Sort by whether this is a known field, then its name
return iter(sorted(self.fields, key=lambda fd: (int(fd.field is None), fd.name)))
def __repr__(self):
return '<DataMessage: %s (#%d) -- local mesg: #%d, fields: [%s]>' % (
self.name, self.mesg_num, self.header.local_mesg_num,
', '.join(["%s: %s" % (fd.name, fd.value) for fd in self.fields]),
)
def __str__(self):
# SIMPLIFY: get rid of this
return '%s (#%d)' % (self.name, self.mesg_num)
class FieldData(RecordBase):
__slots__ = ('field_def', 'field', 'parent_field', 'value', 'raw_value', 'units')
def __init__(self, *args, **kwargs):
super(FieldData, self).__init__(self, *args, **kwargs)
if not self.units and self.field:
# Default to units on field, otherwise None.
# NOTE:Not a property since you may want to override this in a data processor
self.units = self.field.units
@property
def name(self):
return self.field.name if self.field else 'unknown_%d' % self.def_num
# TODO: Some notion of flags
def is_named(self, name):
if self.field:
if name in (self.field.name, self.field.def_num):
return True
if self.parent_field:
if name in (self.parent_field.name, self.parent_field.def_num):
return True
if self.field_def:
if name == self.field_def.def_num:
return True
return False
@property
def def_num(self):
# Prefer to return the def_num on the field
# since field_def may be None if this field is dynamic
return self.field.def_num if self.field else self.field_def.def_num
@property
def base_type(self):
# Try field_def's base type, if it doesn't exist, this is a
# dynamically added field, so field doesn't be None
return self.field_def.base_type if self.field_def else self.field.base_type
@property
def is_base_type(self):
return self.field.is_base_type if self.field else True
@property
def type(self):
return self.field.type if self.field else self.base_type
@property
def field_type(self):
return self.field.field_type if self.field else 'field'
def as_dict(self):
return {
'name': self.name, 'def_num': self.def_num, 'base_type': self.base_type.name,
'type': self.type.name, 'units': self.units, 'value': self.value,
'raw_value': self.raw_value,
}
def __repr__(self):
return '<FieldData: %s: %s%s, def num: %d, type: %s (%s), raw value: %s>' % (
self.name, self.value, ' [%s]' % self.units if self.units else '',
self.def_num, self.type.name, self.base_type.name, self.raw_value,
)
def __str__(self):
return '%s: %s%s' % (
self.name, self.value, ' [%s]' % self.units if self.units else '',
)
class BaseType(RecordBase):
__slots__ = ('name', 'identifier', 'fmt', 'parse')
values = None # In case we're treated as a FieldType
@property
def size(self):
return struct.calcsize(self.fmt)
@property
def type_num(self):
return self.identifier & 0x1F
def __repr__(self):
return '<BaseType: %s (#%d [0x%X])>' % (
self.name, self.type_num, self.identifier,
)
class FieldType(RecordBase):
__slots__ = ('name', 'base_type', 'values')
def __repr__(self):
return '<FieldType: %s (%s)>' % (self.name, self.base_type)
class MessageType(RecordBase):
__slots__ = ('name', 'mesg_num', 'fields')
def __repr__(self):
return '<MessageType: %s (#%d)>' % (self.name, self.mesg_num)
class FieldAndSubFieldBase(RecordBase):
__slots__ = ()
@property
def base_type(self):
return self.type if self.is_base_type else self.type.base_type
@property
def is_base_type(self):
return isinstance(self.type, BaseType)
def render(self, raw_value):
if self.type.values and (raw_value in self.type.values):
return self.type.values[raw_value]
return raw_value
class Field(FieldAndSubFieldBase):
__slots__ = ('name', 'type', 'def_num', 'scale', 'offset', 'units', 'components', 'subfields')
field_type = 'field'
class SubField(FieldAndSubFieldBase):
__slots__ = ('name', 'def_num', 'type', 'scale', 'offset', 'units', 'components', 'ref_fields')
field_type = 'subfield'
class DevField(FieldAndSubFieldBase):
__slots__ = ('dev_data_index', 'def_num', 'type', 'name', 'units', 'native_field_num',
# The rest of these are just to be compatible with Field objects. They're always None
'scale', 'offset', 'components', 'subfields')
field_type = 'devfield'
class ReferenceField(RecordBase):
__slots__ = ('name', 'def_num', 'value', 'raw_value')
class ComponentField(RecordBase):
__slots__ = ('name', 'def_num', 'scale', 'offset', 'units', 'accumulate', 'bits', 'bit_offset')
field_type = 'component'
def render(self, raw_value):
if raw_value is None:
return None
# If it's a tuple, then it's a byte array and unpack it as such
# (only type that uses this is compressed speed/distance)
if isinstance(raw_value, tuple):
unpacked_num = 0
# Unpack byte array as little endian
for value in reversed(raw_value):
unpacked_num = (unpacked_num << 8) + value
raw_value = unpacked_num
# Mask and shift like a normal number
if isinstance(raw_value, int_types):
raw_value = (raw_value >> self.bit_offset) & ((1 << self.bits) - 1)
return raw_value
class Crc(object):
"""FIT file CRC computation."""
CRC_TABLE = (
0x0000, 0xCC01, 0xD801, 0x1400, 0xF001, 0x3C00, 0x2800, 0xE401,
0xA001, 0x6C00, 0x7800, 0xB401, 0x5000, 0x9C01, 0x8801, 0x4400,
)
FMT = 'H'
def __init__(self, value=0, byte_arr=None):
self.value = value
if byte_arr:
self.update(byte_arr)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.value or "-")
def __str__(self):
return self.format(self.value)
def update(self, byte_arr):
"""Read bytes and update the CRC computed."""
if byte_arr:
self.value = self.calculate(byte_arr, self.value)
@staticmethod
def format(value):
"""Format CRC value to string."""
return '0x%04X' % value
@classmethod
def calculate(cls, byte_arr, crc=0):
"""Compute CRC for input bytes."""
for byte in byte_iter(byte_arr):
# Taken verbatim from FIT SDK docs
tmp = cls.CRC_TABLE[crc & 0xF]
crc = (crc >> 4) & 0x0FFF
crc = crc ^ tmp ^ cls.CRC_TABLE[byte & 0xF]
tmp = cls.CRC_TABLE[crc & 0xF]
crc = (crc >> 4) & 0x0FFF
crc = crc ^ tmp ^ cls.CRC_TABLE[(byte >> 4) & 0xF]
return crc
def parse_string(string):
try:
end = string.index(0x00)
except TypeError: # Python 2 compat
end = string.index('\x00')
return string[:end].decode('utf-8', errors='replace') or None
# The default base type
BASE_TYPE_BYTE = BaseType(name='byte', identifier=0x0D, fmt='B', parse=lambda x: None if all(b == 0xFF for b in x) else x)
BASE_TYPES = {
0x00: BaseType(name='enum', identifier=0x00, fmt='B', parse=lambda x: None if x == 0xFF else x),
0x01: BaseType(name='sint8', identifier=0x01, fmt='b', parse=lambda x: None if x == 0x7F else x),
0x02: BaseType(name='uint8', identifier=0x02, fmt='B', parse=lambda x: None if x == 0xFF else x),
0x83: BaseType(name='sint16', identifier=0x83, fmt='h', parse=lambda x: None if x == 0x7FFF else x),
0x84: BaseType(name='uint16', identifier=0x84, fmt='H', parse=lambda x: None if x == 0xFFFF else x),
0x85: BaseType(name='sint32', identifier=0x85, fmt='i', parse=lambda x: None if x == 0x7FFFFFFF else x),
0x86: BaseType(name='uint32', identifier=0x86, fmt='I', parse=lambda x: None if x == 0xFFFFFFFF else x),
0x07: BaseType(name='string', identifier=0x07, fmt='s', parse=parse_string),
0x88: BaseType(name='float32', identifier=0x88, fmt='f', parse=lambda x: None if math.isnan(x) else x),
0x89: BaseType(name='float64', identifier=0x89, fmt='d', parse=lambda x: None if math.isnan(x) else x),
0x0A: BaseType(name='uint8z', identifier=0x0A, fmt='B', parse=lambda x: None if x == 0x0 else x),
0x8B: BaseType(name='uint16z', identifier=0x8B, fmt='H', parse=lambda x: None if x == 0x0 else x),
0x8C: BaseType(name='uint32z', identifier=0x8C, fmt='I', parse=lambda x: None if x == 0x0 else x),
0x0D: BASE_TYPE_BYTE,
}
def add_dev_data_id(message):
global DEV_TYPES
dev_data_index = message.get('developer_data_index').raw_value
if message.get('application_id'):
application_id = message.get('application_id').raw_value
else:
application_id = None
# Note that nothing in the spec says overwriting an existing type is invalid
DEV_TYPES[dev_data_index] = {'dev_data_index': dev_data_index, 'application_id': application_id, 'fields': {}}
def add_dev_field_description(message):
global DEV_TYPES
dev_data_index = message.get('developer_data_index').raw_value
field_def_num = message.get('field_definition_number').raw_value
base_type_id = message.get('fit_base_type_id').raw_value
field_name = message.get('field_name').raw_value
units = message.get('units').raw_value
native_field_num = message.get('native_field_num')
if native_field_num is not None:
native_field_num = native_field_num.raw_value
if dev_data_index not in DEV_TYPES:
raise FitParseError("No such dev_data_index=%s found" % (dev_data_index))
fields = DEV_TYPES[int(dev_data_index)]['fields']
# Note that nothing in the spec says overwriting an existing field is invalid
fields[field_def_num] = DevField(dev_data_index=dev_data_index,
def_num=field_def_num,
type=BASE_TYPES[base_type_id],
name=field_name,
units=units,
native_field_num=native_field_num)
def get_dev_type(dev_data_index, field_def_num):
if dev_data_index not in DEV_TYPES:
raise FitParseError("No such dev_data_index=%s found when looking up field %s" % (dev_data_index, field_def_num))
elif field_def_num not in DEV_TYPES[dev_data_index]['fields']:
raise FitParseError("No such field %s for dev_data_index %s" % (field_def_num, dev_data_index))
return DEV_TYPES[dev_data_index]['fields'][field_def_num]
|
#!/usr/bin/env python3
from tabnanny import check
from to_load import *
from to_visualize import *
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from jagerml.helper import check_other_gpu
check_other_gpu()
data_dir, (img_height, img_width) = get_data_dir(flowers_dataset_url, flowers_dataset_name)
(train_ds, test_ds), (train_lb, test_lb) = create_dataset(data_dir, img_height, img_width)
#plot_ds(train_ds, train_lb)
#plot_ds(test_ds, test_lb)
for image_batch, labels_batch in train_ds.take(3):
print(image_batch.shape)
print(labels_batch.shape, labels_batch)
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
test_ds = test_ds.cache().prefetch(buffer_size=AUTOTUNE)
data_augmentation = tf.keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip("horizontal",
input_shape=(img_height,
img_width,
3)),
layers.experimental.preprocessing.RandomRotation(0.1),
layers.experimental.preprocessing.RandomZoom(0.1),
]
)
num_classes = 5
# model = Sequential([
# layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
# layers.Conv2D(16, 3, padding='same', activation='relu'),
# layers.MaxPooling2D(),
# layers.Conv2D(32, 3, padding='same', activation='relu'),
# layers.MaxPooling2D(),
# layers.Conv2D(64, 3, padding='same', activation='relu'),
# layers.MaxPooling2D(),
# layers.Flatten(),
# layers.Dense(128, activation='relu'),
# layers.Dense(num_classes)
# ])
model = Sequential([
data_augmentation,
layers.experimental.preprocessing.Rescaling(1./255),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Dropout(0.2),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
epochs=15
history = model.fit(
train_ds,
validation_data=test_ds,
epochs=epochs
)
plot_metrics(history, epochs)
|
#!/usr/bin/env python
"""
create_counts_lookup.py
"""
print('3)-->files')
import sys,os
import argparse
import pymysql as MySQLdb
import json
import configparser as ConfigParser
"""
SELECT sum(seq_count), dataset_id, domain_id,domain
FROM sequence_pdr_info
JOIN sequence_uniq_info USING(sequence_id)
JOIN silva_taxonomy_info_per_seq USING(silva_taxonomy_info_per_seq_id)
JOIN silva_taxonomy USING(silva_taxonomy_id)
JOIN domain USING(domain_id)
JOIN phylum USING(phylum_id)
where dataset_id = '426'
GROUP BY dataset_id, domain_id
SELECT sum(seq_count), dataset_id, domain_id,domain,phylum_id,phylum
FROM sequence_pdr_info
JOIN sequence_uniq_info USING(sequence_id)
JOIN silva_taxonomy_info_per_seq USING(silva_taxonomy_info_per_seq_id)
JOIN silva_taxonomy USING(silva_taxonomy_id)
JOIN domain USING(domain_id)
JOIN phylum USING(phylum_id)
where dataset_id = '426'
GROUP BY dataset_id, domain_id, phylum_id
"""
query_coreA = " FROM sequence_pdr_info"
# query_coreA += " JOIN sequence_uniq_info USING(sequence_id)"
query_core_join_silva119 = " JOIN silva_taxonomy_info_per_seq USING(sequence_id)"
query_core_join_silva119 += " JOIN silva_taxonomy USING(silva_taxonomy_id)"
query_core_join_rdp = " JOIN rdp_taxonomy_info_per_seq USING(sequence_id)"
query_core_join_rdp += " JOIN rdp_taxonomy USING(rdp_taxonomy_id)"
#SELECT sum(seq_count), dataset_id, domain_id
query_coreA_matrix = " FROM generic_taxonomy_info"
query_core_join_matrix = " JOIN generic_taxonomy USING(generic_taxonomy_id)"
#JOIN generic_taxonomy USING(generic_taxonomy_id) WHERE dataset_id in ('4413','4414','4415','4416','4417') GROUP BY dataset_id, domain_id ORDER BY NULL
where_part = " WHERE dataset_id in ('%s')"
# query_core = " FROM sequence_pdr_info"
# query_core += " JOIN sequence_uniq_info USING(sequence_id)"
# query_core += " JOIN silva_taxonomy_info_per_seq USING(silva_taxonomy_info_per_seq_id)"
# query_core += " JOIN silva_taxonomy USING(silva_taxonomy_id)"
domain_queryA = "SELECT sum(seq_count), dataset_id, domain_id"
#domain_query += query_core
domain_queryB = where_part
domain_queryB += " GROUP BY dataset_id, domain_id"
phylum_queryA = "SELECT sum(seq_count), dataset_id, domain_id, phylum_id"
#phylum_query += query_core
phylum_queryB = where_part
phylum_queryB += " GROUP BY dataset_id, domain_id, phylum_id"
class_queryA = "SELECT sum(seq_count), dataset_id, domain_id, phylum_id, klass_id"
#class_query += query_core
class_queryB = where_part
class_queryB += " GROUP BY dataset_id, domain_id, phylum_id, klass_id"
order_queryA = "SELECT sum(seq_count), dataset_id, domain_id, phylum_id, klass_id, order_id"
#order_query += query_core
order_queryB = where_part
order_queryB += " GROUP BY dataset_id, domain_id, phylum_id, klass_id, order_id"
family_queryA = "SELECT sum(seq_count), dataset_id, domain_id, phylum_id, klass_id, order_id, family_id"
#family_query += query_core
family_queryB = where_part
family_queryB += " GROUP BY dataset_id, domain_id, phylum_id, klass_id, order_id, family_id"
genus_queryA = "SELECT sum(seq_count), dataset_id, domain_id, phylum_id, klass_id, order_id, family_id, genus_id"
#genus_query += query_core
genus_queryB = where_part
genus_queryB += " GROUP BY dataset_id, domain_id, phylum_id, klass_id, order_id, family_id, genus_id"
species_queryA = "SELECT sum(seq_count), dataset_id, domain_id, phylum_id, klass_id, order_id, family_id, genus_id, species_id"
#species_query += query_core
species_queryB = where_part
species_queryB += " GROUP BY dataset_id, domain_id, phylum_id, klass_id, order_id, family_id, genus_id, species_id"
strain_queryA = "SELECT sum(seq_count), dataset_id, domain_id, phylum_id, klass_id, order_id, family_id, genus_id, species_id, strain_id"
#strain_query += query_core
strain_queryB = where_part
strain_queryB += " GROUP BY dataset_id, domain_id, phylum_id, klass_id, order_id, family_id, genus_id, species_id, strain_id"
end_group_query = " ORDER BY NULL"
required_metadata_fields = [ "collection_date","env_biome_id", "env_feature_id", "env_material_id", "env_package_id","geo_loc_name_id","latitude", "longitude", "dna_region_id",'adapter_sequence_id','sequencing_platform_id','target_gene_id','domain_id','illumina_index_id','primer_suite_id', 'run_id'];
req_query = "SELECT dataset_id, "+','.join(required_metadata_fields)+" from required_metadata_info WHERE dataset_id in ('%s')"
cust_pquery = "SELECT project_id,field_name from custom_metadata_fields WHERE project_id = '%s'"
#queries = [domain_query,phylum_query,class_query,order_query,family_query,genus_query,species_query,strain_query]
queries = [{"rank": "domain", "queryA": domain_queryA, "queryB": domain_queryB},
{"rank": "phylum", "queryA": phylum_queryA, "queryB": phylum_queryB},
{"rank": "klass", "queryA": class_queryA, "queryB": class_queryB},
{"rank": "order", "queryA": order_queryA, "queryB": order_queryB},
{"rank": "family", "queryA": family_queryA, "queryB": family_queryB},
{"rank": "genus", "queryA": genus_queryA, "queryB": genus_queryB},
{"rank": "species", "queryA": species_queryA, "queryB": species_queryB},
{"rank": "strain", "queryA": strain_queryA, "queryB": strain_queryB}
]
# Globals
CONFIG_ITEMS = {}
DATASET_ID_BY_NAME = {}
#
#
#
class Dict2Obj(object):
"""
Turns a dictionary into a class
"""
#----------------------------------------------------------------------
def __init__(self, dictionary):
"""Constructor"""
for key in dictionary:
setattr(self, key, dictionary[key])
def go_add(args):
print ("Starting "+os.path.basename(__file__))
print('Changing dict to obj')
try:
args = Dict2Obj(args)
except:
pass
print(type(args))
global mysql_conn, cur
if args.host == 'vamps' or args.host == 'vampsdb' or args.host == 'bpcweb8':
hostname = 'vampsdb'
elif args.host == 'vampsdev' or args.host == 'bpcweb7':
hostname = 'bpcweb7'
else:
hostname = 'localhost'
args.NODE_DATABASE = 'vamps_development'
mysql_conn = MySQLdb.connect(db = args.NODE_DATABASE, host=hostname, read_default_file=os.path.expanduser("~/.my.cnf_node") )
cur = mysql_conn.cursor()
get_config_data(args)
pid = CONFIG_ITEMS['project_id']
counts_lookup = {}
if args.units == 'rdp':
file_prefix = os.path.join(args.jsonfile_dir,args.NODE_DATABASE+'--datasets_rdp2.6')
elif args.units == 'generic':
file_prefix = os.path.join(args.jsonfile_dir,args.NODE_DATABASE+'--datasets_generic')
elif args.units == 'matrix': # add matrix files to generic
file_prefix = os.path.join(args.jsonfile_dir,args.NODE_DATABASE+'--datasets_generic')
else: # default 'silva119'
file_prefix = os.path.join(args.jsonfile_dir,args.NODE_DATABASE+'--datasets_silva119')
if not os.path.exists(file_prefix):
os.makedirs(file_prefix)
if args.verbose:
print (file_prefix)
#DATASET_ID_BY_NAME[ds] = did
dids = [str(x) for x in DATASET_ID_BY_NAME.values()]
print ('dids',dids)
#dids = get_dataset_ids(pid)
# delete old did files if any
for did in dids:
pth = os.path.join(file_prefix,str(did)+'.json')
try:
os.remove(pth)
except:
pass
did_sql = "','".join(dids)
#print counts_lookup
for q in queries:
if args.units == 'rdp':
query = q["queryA"] + query_coreA + query_core_join_rdp + q["queryB"] % did_sql + end_group_query
elif args.units == 'generic' or args.units == 'matrix':
query = q["queryA"] + query_coreA_matrix + query_core_join_matrix + q["queryB"] % did_sql + end_group_query
else: # default 'silva119'
query = q["queryA"] + query_coreA + query_core_join_silva119 + q["queryB"] % did_sql + end_group_query
if args.verbose:
print (query)
dirs = []
cur.execute(query)
for row in cur.fetchall():
#print row
count = int(row[0])
did = str(row[1])
tax_id_str = ''
for k in range(2,len(row)):
tax_id_str += '_' + str(row[k])
#print 'tax_id_str',tax_id_str
if did in counts_lookup:
#sys.exit('We should not be here - Exiting')
if tax_id_str in counts_lookup[did]:
sys.exit('We should not be here - Exiting')
else:
counts_lookup[did][tax_id_str] = count
else:
counts_lookup[did] = {}
counts_lookup[did][tax_id_str] = count
if args.verbose:
print('counts_lookup')
print(counts_lookup)
metadata_lookup = {}
print ('getting required metadata from db')
metadata_lookup = go_required_metadata(did_sql)
print ('getting custom metadata from db')
metadata_lookup = go_custom_metadata(dids, pid, metadata_lookup)
print ('writing individual json files')
write_json_files(args, file_prefix, metadata_lookup, counts_lookup)
print ('writing all metadata file')
write_all_metadata_file(args, metadata_lookup)
print ('writing all taxcount file')
print ("Finished "+os.path.basename(__file__))
def write_all_metadata_file(args, metadata_lookup):
original_metadata_lookup = read_original_metadata(args)
md_file = os.path.join(args.jsonfile_dir,args.NODE_DATABASE+"--metadata.json")
#print md_file
for did in metadata_lookup:
original_metadata_lookup[did] = metadata_lookup[did]
json_str = json.dumps(original_metadata_lookup)
#print(json_str)
f = open(md_file,'w')
f.write(json_str+"\n")
f.close()
def write_json_files(args, file_prefix, metadata_lookup, counts_lookup):
print ("In write_json_files")
for did in counts_lookup:
file_path = os.path.join(file_prefix,str(did)+'.json')
if args.verbose:
print('file_path: '+file_path)
f = open(file_path,'w')
my_counts_str = json.dumps(counts_lookup[did])
if did in metadata_lookup:
my_metadata_str = json.dumps(metadata_lookup[did])
else:
print ('WARNING -- no metadata for dataset: '+str(did))
my_metadata_str = json.dumps({})
#f.write('{"'+str(did)+'":'+mystr+"}\n")
f.write('{"taxcounts":'+my_counts_str+',"metadata":'+my_metadata_str+'}'+"\n")
f.close()
def go_required_metadata(did_sql):
"""
metadata_lookup_per_dsid[dsid][metadataName] = value
"""
global mysql_conn, cur
req_metadata_lookup = {}
query = req_query % (did_sql)
cur.execute(query)
for row in cur.fetchall():
did = str(row[0])
for i,f in enumerate(required_metadata_fields):
#print i,did,name,row[i+1]
value = row[i+1]
if did in req_metadata_lookup:
req_metadata_lookup[did][f] = str(value)
else:
req_metadata_lookup[did] = {}
req_metadata_lookup[did][f] = str(value)
return req_metadata_lookup
def go_custom_metadata(did_list,pid,metadata_lookup):
global mysql_conn, cur
field_collection = ['dataset_id']
query = cust_pquery % (pid)
cur.execute(query)
cust_metadata_lookup = {}
table = 'custom_metadata_'+ str(pid)
for row in cur.fetchall():
pid = str(row[0])
field = row[1]
if field != 'dataset_id':
field_collection.append(field)
print ('did_list',did_list)
print ('field_collection',field_collection)
cust_dquery = "SELECT `" + '`,`'.join(field_collection) + "` from " + table
print (cust_dquery)
try:
cur.execute(cust_dquery)
#print 'metadata_lookup1',metadata_lookup
for row in cur.fetchall():
#print row
did = str(row[0])
if did in did_list:
for i,f in enumerate(field_collection):
#cnt = i
if f != 'dataset_id':
value = str(row[i])
#print 'XXX',did,i,f,value
if did in metadata_lookup:
metadata_lookup[did][f] = value
else:
metadata_lookup[did] = {}
metadata_lookup[did][f] = value
except:
print ('could not find or read',table,'Skipping')
print()
#print 'metadata_lookup2',metadata_lookup
#sys.exit()
return metadata_lookup
def read_original_taxcounts(args):
file_path = os.path.join(args.jsonfile_dir,args.NODE_DATABASE+'--taxcounts.json')
if os.path.exists(file_path):
with open(file_path) as data_file:
data = json.load(data_file)
else:
data = {}
return data
def read_original_metadata(args):
file_path = os.path.join(args.jsonfile_dir,args.NODE_DATABASE+'--metadata.json')
if os.path.exists(file_path):
with open(file_path) as data_file:
data = json.load(data_file)
else:
data = {}
return data
def get_config_data(args):
global mysql_conn, cur
config_path = os.path.join(args.project_dir, args.config_file)
print (config_path)
config = ConfigParser.ConfigParser()
config.optionxform=str
config.read(config_path)
for name, value in config.items('MAIN'):
#print ' %s = %s' % (name, value)
CONFIG_ITEMS[name] = value
CONFIG_ITEMS['datasets'] = []
for dsname, count in config.items('MAIN.dataset'):
CONFIG_ITEMS['datasets'].append(dsname)
#print ('project',CONFIG_ITEMS['project'])
q = "SELECT project_id FROM project"
q += " WHERE project = '"+CONFIG_ITEMS['project_name']+"'"
cur.execute(q)
row = cur.fetchone()
CONFIG_ITEMS['project_id'] = row[0]
q = "SELECT dataset,dataset_id from dataset"
q += " WHERE dataset in('"+"','".join(CONFIG_ITEMS['datasets'])+"')"
cur.execute(q)
for row in cur.fetchall():
DATASET_ID_BY_NAME[row[0]] = row[1]
mysql_conn.commit()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
usage = """usage: vamps_script_create_json_dataset_files.py [options]
-pid/--project_id ID Must be combined with --add or --delete
This script only add/deletes to taxcounts files NOT MySQL
-add/--add Add project (will delete and overwrite if already present)
OR
-del/--delete Delete all dids (whole project) from dir (requires pid) ()
-l/ --list List: list all projects in taxcounts files [default]
count_lookup_per_dsid[dsid][rank][taxid] = count
This script will add a project to ../json/<NODE-DATABASE>/<DATASET-NAME>.json JSON object
But ONLY if it is already in the MySQL database.
To add a new project to the MySQL database:
If already GASTed:
use ./upload_project_to_database.py in this directory
If not GASTed
use py_mbl_sequencing_pipeline custom scripts
"""
parser.add_argument("-host", "--host",
required=False, action="store", dest = "host", default='local',
help = '')
parser.add_argument("-db","--database",
required=False, action="store", dest = "NODE_DATABASE", default='vamps2',
help="""NODE_DATABASE [default:vamps2]""")
parser.add_argument("-project_dir", "--project_dir",
required=True, action="store", dest = "project_dir",
help = 'ProjectDirectory')
parser.add_argument("-p", "--project",
required=True, action="store", dest = "project",
help="""ProjectName""")
parser.add_argument("-o", "--jsonfile_dir",
required=True, action="store", dest = "jsonfile_dir",
help="""JSON Files Directory""")
parser.add_argument("-units", "--tax_units",
required = False, action = 'store', choices = ['silva119', 'rdp', 'generic', 'matrix'], dest = "units",
default = 'silva119',
help = "Default: 'silva119'; only other choice available is 'rdp', 'generic', 'matrix'")
parser.add_argument("-config", "--config",
required = False, action = 'store', dest = "config_file",
default = 'INFO.config',
help = "")
parser.add_argument("-v", "--verbose",
required=False, action="store_true", dest = "verbose", default=False,
help = 'chatty')
args = parser.parse_args()
go_add(args)
print ("DONE")
fp = open(os.path.join(args.project_dir,'ASSIGNMENT_COMPLETE.txt'),'w')
try:
fp.write(str(CONFIG_ITEMS['project_id']))
except:
fp.write('ERROR')
fp.close()
#
# THIS MUST BE THE LAST PRINT!!!!
print ("PID="+str(CONFIG_ITEMS['project_id']))
##
|
import bpy
from bpy.props import *
from ...nodes.BASE.node_tree import RenderStackNode
def update_node(self, context):
self.update_parms()
class RSNodeEeveeRenderSettingsNode(RenderStackNode):
"""A simple input node"""
bl_idname = 'RSNodeEeveeRenderSettingsNode'
bl_label = 'Eevee Settings'
samples: IntProperty(default=64, min=1, name="Eevee Samples", update=update_node)
# data_path: StringProperty(name='Data Path', default='')
#
# float_value: FloatProperty(name='Value', update=update_node)
# string_value: StringProperty(name='Value', update=update_node)
# bool_value: BoolProperty(name='On', update=update_node)
# int_value: FloatProperty(name='Value', update=update_node)
def init(self, context):
self.outputs.new('RSNodeSocketRenderSettings', "Render Settings")
self.width = 175
def draw_buttons(self, context, layout):
layout.use_property_split = 1
layout.use_property_decorate = 0
layout.prop(self, "samples", text='Samples')
def get_data(self):
task_data = {}
task_data['engine'] = "BLENDER_EEVEE"
task_data['samples'] = self.samples
return task_data
def register():
bpy.utils.register_class(RSNodeEeveeRenderSettingsNode)
def unregister():
bpy.utils.unregister_class(RSNodeEeveeRenderSettingsNode)
|
from multiprocessing import Pool, cpu_count
import os
import random
import numpy as np
from easydict import EasyDict
from dataset.make_all import generate
from utils.geometry import random_spherical_point, get_prospective_location
from utils.io import mkdir, write_serialized, catch_abort
from utils.constants import CONFIG_FOLDER, SIM_OUTPUT_FOLDER, RENDER_OUTPUT_FOLDER, VIDEO_OUTPUT_FOLDER, \
OCCLUDER_HALF_WIDTH
from utils.misc import rand, random_distinct_colors, repeat_scale, get_host_id, BlenderArgumentParser
from utils.shape_net import SHAPE_DIMENSIONS, random_shape_net
train_prefix = "train"
TRAIN_CONFIG_FOLDER = mkdir(os.path.join(CONFIG_FOLDER, train_prefix))
TRAIN_SIM_OUTPUT_FOLDER = mkdir(os.path.join(SIM_OUTPUT_FOLDER, train_prefix))
TRAIN_RENDER_OUTPUT_FOLDER = mkdir(os.path.join(RENDER_OUTPUT_FOLDER, train_prefix))
TRAIN_VIDEO_OUTPUT_FOLDER = mkdir(os.path.join(VIDEO_OUTPUT_FOLDER, train_prefix))
def parse_args():
parser = BlenderArgumentParser(description='')
parser.add_argument('--start', help='image index to start', type=int, default=0)
parser.add_argument('--start_index', help='image index to start', type=int)
parser.add_argument('--end', help='image index to end', type=int, required=True)
parser.add_argument("--stride", help="image index stride", type=int, default=1)
parser.add_argument("--requires_valid", type=int, default=1)
parser.add_argument("--preview", type=int, default=0)
parser.add_argument("--is_single_image", type=int, default=0)
return parser.parse_args()
def get_occluders(colors, materials):
occluders = []
occluder_rand = rand(0, 1)
init_pos = (rand(-.5, .5), rand(-1., 1.), 0)
half_width = rand(.5, 1.5)
half_height = rand(.5, 1.)
scale = (OCCLUDER_HALF_WIDTH, half_width, half_height)
init_orn = (0, 0, rand(-20, 20))
if occluder_rand < .85:
# rotating occluder
joint_rand = rand(0, 1)
joint_t = np.random.randint(10, 25)
if joint_rand < 1 / 6:
joint_pattern = [(90, 90, joint_t), (90, 0, 250 - joint_t),
(0, 90, 250 - joint_t), (90, 90, joint_t)]
elif joint_rand < 1 / 3:
joint_pattern = [(90, 90, joint_t), (90, 0, 250 - joint_t),
(0, -90, 250 - joint_t), (-90, -90, joint_t)]
elif joint_rand < .5:
joint_pattern = [(-90, -90, joint_t), (-90, 0, 250 - joint_t),
(0, 90, 250 - joint_t), (90, 90, joint_t)]
elif joint_rand < 2 / 3:
joint_pattern = [(-90, -90, joint_t), (-90, 0, 250 - joint_t),
(0, -90, 250 - joint_t), (-90, -90, joint_t)]
elif joint_rand < 5 / 6:
joint_pattern = [(0, 0, joint_t), (0, 90, 250 - joint_t),
(90, 0, 250 - joint_t), (0, 0, joint_t)]
else:
joint_pattern = [(0, 0, joint_t), (0, -90, 250 - joint_t),
(-90, 0, 250 - joint_t), (0, 0, joint_t)]
occluder = dict(shape="cube", color=colors.pop(), joint="revolute",
material=materials.pop(),
init_pos=init_pos, init_orn=init_orn,
scale=scale, joint_pattern=joint_pattern)
occluders.append(occluder)
elif occluder_rand < .9:
# sliding occluder
joint_rand = rand(0, 1)
if joint_rand < .25:
joint_pattern = [(rand(.6, 1.2), 0, 250), (0, rand(.6, 1.2), 250)]
elif joint_rand < .5:
joint_pattern = [(rand(.6, 1.2), 0, 250), (0, rand(-1.2, -.6), 250)]
elif joint_rand < .75:
joint_pattern = [(rand(-1.2, -.6), 0, 250), (0, rand(.6, 1.2), 250)]
else:
joint_pattern = [(rand(-1.2, -.6), 0, 250), (0, rand(-1.2, -.6), 250)]
occluder = dict(shape="cube", color=colors.pop(), joint="prismatic",
material=materials.pop(),
init_pos=init_pos, init_orn=init_orn,
scale=scale, joint_pattern=joint_pattern)
occluders.append(occluder)
return occluders
def get_objects(colors, materials):
objects = []
n_objects = np.random.randint(2, 3)
for obj_id in range(n_objects):
side_rand = rand(0, 1)
size = rand(.2, .4)
while True:
cat_id = np.random.randint(55)
if cat_id % 5 != 0:
break
shape = random_shape_net(cat_id, True)
pos_z = SHAPE_DIMENSIONS[shape][2] * size
scale = repeat_scale(size)
orn_z = rand(-180, 180)
if side_rand < .4:
init_pos = (rand(-2.5, .5), rand(-4, -2), pos_z)
init_v = (rand(-.6, .6), rand(.5, 1.5), 0)
elif side_rand < .8:
init_pos = (rand(-2.5, .5), rand(2, 4), pos_z)
init_v = (rand(-.6, .6), rand(-1.5, -.5), 0)
else:
init_pos = (rand(-1.5, 0), rand(-.8, .8), pos_z)
init_v = (rand(-.6, .6), rand(-1.5, 1.5), 0)
color = colors.pop()
backward_rand = rand(0, 1)
if backward_rand < .4:
backward_time = np.random.randint(200, 300)
material = materials.pop()
mid_pos = get_prospective_location(init_pos, init_v, backward_time / 100)
object_orginal = dict(shape=shape, color=color,
material=material, init_pos=init_pos, init_orn=(0, 0, orn_z),
scale=scale, init_v=init_v, disappear_time=backward_time)
object_stop = dict(shape=shape, color=color,
material=material, init_pos=mid_pos, init_orn=(0, 0, orn_z),
scale=scale, init_v=[0, 0, 0], appear_time=backward_time,
disappear_time=backward_time + 50)
object_backward = dict(shape=shape, color=color,
material=material, init_pos=mid_pos, init_orn=(0, 0, orn_z),
scale=scale, init_v=[-x for x in init_v], appear_time=backward_time + 50)
for o in [object_orginal, object_stop, object_backward]:
objects.append(o)
continue
object = dict(shape=shape, color=color,
material=materials.pop(), init_pos=init_pos, init_orn=(0, 0, orn_z),
scale=scale, init_v=init_v)
objects.append(object)
return objects
def generate_config(case_name, args):
np.random.seed()
random.seed()
colors = random_distinct_colors(7)
materials = ["rubber"] * 7
objects = get_objects(colors, materials)
occluders = get_occluders(colors, materials)
if args.is_single_image:
sim = dict(output_dir=os.path.join(TRAIN_SIM_OUTPUT_FOLDER, case_name), sim_time=0.01)
else:
sim = dict(output_dir=os.path.join(TRAIN_SIM_OUTPUT_FOLDER, case_name), sim_time=5.)
rendering = dict(motion_file=os.path.join(TRAIN_SIM_OUTPUT_FOLDER, case_name, "motion.json"),
output_dir=os.path.join(TRAIN_RENDER_OUTPUT_FOLDER, case_name))
video = dict(frame_dir=os.path.join(TRAIN_RENDER_OUTPUT_FOLDER, case_name),
output_dir=os.path.join(TRAIN_VIDEO_OUTPUT_FOLDER, case_name))
scene = dict(case_name=case_name, objects=objects, occluders=occluders,
sim=sim, rendering=rendering, video=video)
write_serialized(scene, os.path.join(TRAIN_CONFIG_FOLDER, case_name + ".yaml"))
return scene
def main(case_id, args):
while True:
config = generate_config("train_{:05d}".format(case_id), args)
valid = generate(EasyDict(config), args)
if valid:
break
if __name__ == '__main__':
args = parse_args()
catch_abort()
worker_args = []
if args.start_index is None:
args.start_index = args.start + get_host_id() % args.stride
for i in range(args.start_index, args.end, args.stride):
worker_args.append((i, args))
with Pool(2) as p:
p.starmap(main, worker_args)
|
from .resource import ResourceAPI
from .connection import REQUEST
class EnvironmentAPI(ResourceAPI):
def __init__(self, connection):
super().__init__(connection, resource_type="environments")
def list(self, params={}, ignore_error=False):
return self.connection._call(
self.LIST,
verify=False,
request_json=params,
method=REQUEST.METHOD.POST,
ignore_error=ignore_error,
timeout=(5, 300),
)
|
import ast
import argparse
from pathlib import Path
from .__version__ import __version__
from .core import compile, exec
def main():
argparser = argparse.ArgumentParser(description=f"MíngShé {__version__}")
argparser.add_argument("filepath", type=Path, help="The .she file")
argparser.add_argument(
"--compile", dest="compile", action="store_true", help="Only compile"
)
argparser.add_argument(
"-v",
"--verbose",
action="count",
default=0,
help="Print timing stats; repeat for more debug output",
)
args = argparser.parse_args()
verbose = args.verbose
verbose_tokenizer = verbose >= 3
verbose_parser = verbose == 2 or verbose >= 4
if args.compile:
ast_obj = compile(
args.filepath.read_text(encoding="utf8"),
filename=args.filepath,
verbose_tokenizer=verbose_tokenizer,
verbose_parser=verbose_parser,
)
py_text = ast.unparse(ast_obj)
py_path = args.filepath.with_suffix(".py").absolute()
py_path.write_text(py_text, encoding="utf8")
else:
exec(args.filepath.read_text(encoding="utf8"), filename=args.filepath)
|
from ffmpy import FFmpeg
import os, glob
class Normalize_Audio():
def __init__(self):
''' Constructor for this class. '''
pass
def normalize(infile,bitrate):
filename=os.path.splitext(os.path.split(infile)[1])[0]
filepath=os.path.dirname(infile)+"\\normalized"
try:
os.mkdir(filepath)
except OSError:
pass
ff = FFmpeg(inputs={infile: None},outputs={'%s\%s_normalized.mp3'%(filepath,filename): '-ac 1 -ab %s'%(bitrate)})
ff.run()
return print("File Normalized")
def compress(infile,bitrate):
filename=os.path.splitext(os.path.split(infile)[1])[0]
filepath=os.path.dirname(infile)+"\\compressed"
try:
os.mkdir(filepath)
except OSError:
pass
ff = FFmpeg(inputs={infile: None},outputs={'%s\%s_compressed.mp3'%(filepath,filename): '-ac 1 -ab %s'%(bitrate)})
ff.run()
return print("File Compressed")
|
# Generated by Django 3.1.5 on 2021-01-27 16:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workshop', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='subscription',
name='country',
field=models.CharField(default=1, max_length=150, verbose_name='Country'),
preserve_default=False,
),
migrations.AddField(
model_name='subscription',
name='is_active',
field=models.BooleanField(default=False, verbose_name='Account Verification'),
),
]
|
"""MIT License
Copyright (c) 2020 utilitybot.co
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
import discord
from discord.ext import commands
from discord.utils import get
class Botinfo(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['infobot, info'])
async def botinfo(self, ctx):
embed = discord.Embed(
title = "All about Utility Bot!",
description = "Hello! I am Utility Bot I'm here to help you manage and moderate your server with a touch of fun! I was written in Python by Flop#7234 using discord.py and python3.7! Type u!help to get a link to our website for a list of commands!",
color = discord.Color.orange()
)
embed.set_image(url="https://discord.com/channels/@me/700807345630085201/767113082446807070")
embed.set_thumbnail(url="https://discord.com/channels/@me/700807345630085201/767113082446807070")
embed.set_footer(text=f"Guilds: {len(self.bot.guilds)}. Utility Bot The Server Management Bot.")
def setup(bot):
bot.add_cog(Botinfo(bot))
bot.logging.info(f'$GREENLoaded $BLUE"botinfo" $GREENcommand!')
|
#Reorder a MISO gff file. Miso takes the inclusion isoform as the first listed mRNA. This reorders the file by start coord. All other fields (ID, etc.) are unchanged. Designed to work on AFE and ALE gff3s to sort by "most distal" isoform. This means most upstream isoform for AFEs and most downstream isoform for ALEs.
#Usage python ReorderGff3.py infile outfile
import sys
from operator import itemgetter
infh = open(sys.argv[1], 'r')
outfh = open(sys.argv[2], 'w')
events = []
currentgene = []
currentmrnas = []
currentexons = []
#Make a list of lists of lists (called events). The innermost list is each line in the GFF. The next lists gathers up all exons or mrnas belonging to the same event. The next list up gathers up all lines belonging to the same event (gene). The outer list (events) gathers up all events.
for line in infh:
line = line.strip()
line = line.split('\t')
if len(line) == 9: #sometimes the gff has a header line...skip it
if line[2] == 'gene':
if len(currentgene) == 0:
currentgene.append(line)
elif len(currentgene) > 0:
currentgene.append(currentmrnas)
currentgene.append(currentexons)
events.append(currentgene)
currentgene = []
currentmrnas = []
currentexons = []
currentgene.append(line)
elif line[2] == 'mRNA':
currentmrnas.append(line)
elif line[2] == 'exon':
currentexons.append(line)
currentgene.append(currentmrnas)
currentgene.append(currentexons)
events.append(currentgene)
#for event in events:
#event[0] = gene, event[1] = the mrnas, event[2] = the exons
for event in events:
strand = event[0][6]
outfh.write(('\t').join(event[0]) + '\n')
mrnas = event[1]
if strand == '+': #+ for AFEs - for ALEs
mrnas.sort(key=itemgetter(3)) #sort by start coord
for mrna in mrnas:
outfh.write(('\t').join(mrna) + '\n')
elif strand == '-': #- for AFEs + for ALEs
mrnas.sort(key=itemgetter(4), reverse=True) #reverse sort by stop coord
for mrna in mrnas:
outfh.write(('\t').join(mrna) + '\n')
exons = event[2]
if strand == '+': #+ for AFE - for ALEs
exons.sort(key=itemgetter(3))
for exon in exons:
outfh.write(('\t').join(exon) + '\n')
elif strand == '-': #- for AFEs + for ALEs
exons.sort(key=itemgetter(4), reverse=True)
for exon in exons:
outfh.write(('\t').join(exon) + '\n')
infh.close()
outfh.close()
|
from typing import Any, Callable, Type
import ruamel.yaml
UserType = Any
YamlType = Any
_safe = ruamel.yaml.YAML(typ="safe", pure=True)
_rt = ruamel.yaml.YAML(typ="rt", pure=True)
def register_class(cls: Type) -> None:
_safe.register_class(cls)
_rt.register_class(cls)
def add_representer(
data_type: Type, representer: Callable[[ruamel.yaml.BaseRepresenter, UserType], YamlType]
) -> None:
_safe.representer.add_representer(data_type, representer)
_rt.representer.add_representer(data_type, representer)
def add_multi_representer(
base_data_type: Type,
multi_representer: Callable[[ruamel.yaml.BaseRepresenter, UserType], YamlType],
) -> None:
_safe.representer.add_multi_representer(base_data_type, multi_representer)
_rt.representer.add_multi_representer(base_data_type, multi_representer)
def add_constructor(
tag: str, constructor: Callable[[ruamel.yaml.BaseConstructor, YamlType], UserType]
) -> None:
_safe.constructor.add_constructor(tag, constructor)
_rt.constructor.add_constructor(tag, constructor)
def add_multi_constructor(
tag_prefix: str,
multi_constructor: Callable[[ruamel.yaml.BaseConstructor, str, YamlType], UserType],
) -> None:
_safe.constructor.add_multi_constructor(tag_prefix, multi_constructor)
_rt.constructor.add_multi_constructor(tag_prefix, multi_constructor)
def _prepare_for_output(d: dict) -> None:
ruamel.yaml.scalarstring.walk_tree(d)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 3 15:47:19 2018
@author: Yann Roussel and Tuan Bui
Editted by: Emine Topcu on Oct 2021
"""
from random import gauss
from Beat_and_glide import Beat_and_glide_base
class Beat_and_glide_V0v_KO(Beat_and_glide_base):
__tV0vKOstart = None #calculated at init
__tV0vKOend = None #calculated at init
_tshutoff = None
def __init__ (self, stim0 = 2.89, sigma = 0, sigma_LR = 0.1, E_glu = 0, E_gly = -70, cv = 0.80,
nMN = 15, ndI6 = 15, nV0v = 15, nV2a = 15, nV1 = 15, nMuscle = 15,
R_str = 1.0):
super().__init__(stim0, sigma, sigma_LR, E_glu, E_gly, cv,
nMN, ndI6, nV0v, nV2a, nV1, nMuscle, R_str)
self.setTimeParameters() #to nitialize with default values
def setTimeParameters(self, tmax_ms = 10000, tshutoff_ms = 50, tskip_ms = 1000, dt = 0.1, tV0vKOstart_ms = 6000, tV0vKOend_ms = 11000):
super().setTimeParameters(tmax_ms, tshutoff_ms, tskip_ms, dt)
self.__tV0vKOstart = (tV0vKOstart_ms + tskip_ms) / dt
self.__tV0vKOend = (tV0vKOend_ms + tskip_ms) / dt
self._tshutoff = super().gettShutOff() #to prevent multiple function calls
def calcV0vPotentialsandResidues(self, t):
for k in range (0, self.nV0v):
if t < self._tshutoff: #Synaptic currents are shut off for the first 50 ms of the sims to let initial conditions subside
IsynL = 0.0
IsynR = 0.0
IGapL = - sum(self.LSGap_V0v_V0v[k,:]) + sum(self.LSGap_V0v_V0v[:,k]) -sum(self.LSGap_V0v_MN[k,:]) + sum(self.LSGap_MN_V0v[:,k])
IGapR = - sum(self.RSGap_V0v_V0v[k,:]) + sum(self.RSGap_V0v_V0v[:,k]) -sum(self.RSGap_V0v_MN[k,:]) + sum(self.RSGap_MN_V0v[:,k])
elif (t > self.__tV0vKOstart and t < self.__tV0vKOend):
IsynL = 0.0
IsynR = 0.0
IGapL = 0.0
IGapR = 0.0
else:
IsynL = sum(self.LSyn_V2a_V0v[self.nV0v*l+k,0]*self.LW_V2a_V0v[l,k] for l in range (0, self.nV2a)) + sum(self.LSyn_V1_V0v[self.nV0v*l+k,0]*self.LW_V1_V0v[l,k]*self.R_str for l in range (0, self.nV1))
IsynR = sum(self.RSyn_V2a_V0v[self.nV0v*l+k,0]*self.RW_V2a_V0v[l,k] for l in range (0, self.nV2a)) + sum(self.RSyn_V1_V0v[self.nV0v*l+k,0]*self.RW_V1_V0v[l,k]*self.R_str for l in range (0, self.nV1))
IGapL = - sum(self.LSGap_V0v_V0v[k,:]) + sum(self.LSGap_V0v_V0v[:,k]) -sum(self.LSGap_V0v_MN[k,:]) + sum(self.LSGap_MN_V0v[:,k])
IGapR = - sum(self.RSGap_V0v_V0v[k,:]) + sum(self.RSGap_V0v_V0v[:,k]) -sum(self.RSGap_V0v_MN[k,:]) + sum(self.RSGap_MN_V0v[:,k])
self.resLV0v[k,:] = self.L_V0v[k].getNextVal(self.resLV0v[k,0], self.resLV0v[k,1], IGapL + IsynL)
self.VLV0v[k,t] = self.resLV0v[k,0]
self.resRV0v[k,:] = self.R_V0v[k].getNextVal(self.resRV0v[k,0], self.resRV0v[k,1], IGapR + IsynR)
self.VRV0v[k,t] = self.resRV0v[k,0]
|
#!/usr/bin/env python3
##########################################################
## Jose F. Sanchez ##
## Copyright (C) 2019-2020 Lauro Sumoy Lab, IGTP, Spain ##
##########################################################
"""
Provides configuration for the pipeline.
.. seealso:: Additional information on BacterialTyper configuration and requirements
- :doc:`Configuration <../../user_guide/installation/installing>`
"""
## useful imports
import os
import io
import sys
import re
import shutil
from io import open
from sys import argv
import subprocess
from termcolor import colored
from distutils.version import LooseVersion
import pkg_resources
## import my modules
from BacterialTyper.config import extern_progs
from BacterialTyper.config import install_dependencies
import HCGB.functions.aesthetics_functions as HCGB_aes
import HCGB.functions.time_functions as HCGB_time
import HCGB.functions.main_functions as HCGB_main
import HCGB.functions.files_functions as HCGB_files
import HCGB.functions.system_call_functions as HCGB_sys
################
## Software
################
##################
def get_exe(prog, Debug=False, Return_Version=False):
"""Return absolute path of the executable program requested.
Given a program name it returns its executable to be called. It has to fulfilled a minimum version specified.
:param prog: Software name
:type prog: string
:returns: Absolute path for the executable requested
:warning: if no executable available in system ``$PATH`` or not fulfilling the expected version.
.. seealso:: This function depends on other ``BacterialTyper`` functions:
- :func:`BacterialTyper.config.set_config.my_which`
- :func:`BacterialTyper.config.extern_progs.return_defatult_soft`
- :func:`BacterialTyper.config.extern_progs.return_min_version_soft`
- :func:`BacterialTyper.config.set_config.get_version`
.. attention:: Be aware of Copyright
The code implemented here was retrieved and modified from ARIBA (https://github.com/sanger-pathogens/ariba)
Give them credit accordingly.
"""
exe = ""
if prog in os.environ:
exe = os.environ[prog] ## python environent variables
else:
exe = extern_progs.return_defatult_soft(prog) ## install in the system
## fix for trimmomatic
if prog == 'trimmomatic':
list_v = ['0.36','0.37','0.38','0.39']
for v in list_v:
## if using conda: there is caller
exe="trimmomatic-" + v + ".jar"
## get paths
exe_path_tmp = my_which(exe)
if (exe_path_tmp):
break
exe = "trimmomatic"
## get paths
exe_path_tmp = my_which(exe)
if (exe_path_tmp):
break
else:
## get paths
exe_path_tmp = my_which(exe)
## debug message
if (Debug):
print(colored("** Debug: exe: %s" %exe,'yellow'))
print(colored("** Debug: exe_path_tmp: %s" %exe_path_tmp,'yellow'))
## get min_version
min_version = extern_progs.return_min_version_soft(prog)
## debug message
if (Debug):
print(colored("** Debug: min_version: %s" %min_version,'yellow'))
## return if not available
## no min version available
if min_version == 'na':
if exe_path_tmp:
if (Return_Version):
return (exe_path_tmp[0], '') ## return first item
else:
return (exe_path_tmp[0]) ## return first item
## not installed in path
if (not exe_path_tmp):
if (Return_Version):
return('ERROR', 'n.a.')
else:
print(colored("\n**ERROR: Software %s could not be found." % prog,'red'))
exit()
## Loop for all possibilities
for p in exe_path_tmp:
prog_ver = get_version(prog, p, Debug=Debug)
if (Debug):
print (colored("** Debug: Software: %s\nPath: %s\nVersion: %s" %(prog, p, prog_ver), 'yellow'))
if (not prog_ver):
continue
if (prog_ver == "."):
continue
if LooseVersion(prog_ver) >= LooseVersion(min_version):
if (Return_Version):
return (p, prog_ver)
else:
return (p)
if (Return_Version):
return('ERROR', 'n.a.')
else:
print(colored("\n**ERROR: Software %s version does not match minimum version expected [%s]." %(prog,min_version),'red'))
exit()
################
def access_check(fn, mode=os.F_OK | os.X_OK):
"""Check executable permission
This function checks whether a given path is a folder or file and if it is
executable and accessible. It also works if a java jar file provided.
:param fn: Absolute path file
:param mode: Value to pass as the mode parameter of access()
:type fn: string
:type mode: string
`mode` defaults to:
- os.F_OK: Value to pass as the mode parameter of access() to test the existence of path.
- os.X_OK: Value to include in the mode parameter of access() to determine if path can be executed.
.. attention:: Be aware of Copyright
The code implemented here was retrieved and modified from shutil (https://github.com/python/cpython/blob/master/Lib/shutil.py).
Give them credit accordingly.
We modified the code to work if java jar files provided.
"""
## the original code belongs to shutil, slightly modified here
# https://github.com/python/cpython/blob/master/Lib/shutil.py
#if os.path.isdir(fn):
# return False
if os.path.exists(fn):
if fn.endswith('.jar'):
return True
if os.access(fn, mode):
return True
#################
def my_which(cmd):
"""Return the absolute path to the executable
Given a command return the absolute path(s), if any.
:param cmd: Software command name
:returns: List of absolute paths(s) of the given command.
.. attention:: Be aware of Copyright
The code implemented here was retrieved and modified from shutil (https://github.com/python/cpython/blob/master/Lib/shutil.py).
Give them credit accordingly.
We modified the code to return multiple paths in a list if available different installed binaries in $PATH.
"""
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if access_check(cmd):
return cmd
return None
use_bytes = isinstance(cmd, bytes)
path=None
if path is None:
path = os.environ.get("PATH", None)
if path is None:
try:
path = os.confstr("CS_PATH")
except (AttributeError, ValueError):
# os.confstr() or CS_PATH is not available
path = os.defpath
# bpo-35755: Don't use os.defpath if the PATH environment variable is
# set to an empty string
# PATH='' doesn't match, whereas PATH=':' looks in the current directory
if not path:
return None
if use_bytes:
path = os.fsencode(path)
path = path.split(os.fsencode(os.pathsep))
else:
path = os.fsdecode(path)
path = path.split(os.pathsep)
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
return_paths = [] ## modification
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
#print ("Normdir: ", normdir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
#print ("Name: ", name)
if access_check(name):
## return (name) ## previously, it would only return the first item
return_paths.append(name) ## modification
if (len(return_paths) >= 1):
return return_paths
else:
return None
##################
def get_version(prog, path, Debug=False):
"""Get version of software
Given a program name and expected path, tries to determine its version.
:param prog: Program name
:param path: Absolute path
:param Debug: True/False
:type prog: string
:type path: string
:type Debug: bool
:returns: String containing version. Returns NA message if no found and raises attention error message.
.. attention:: Be aware of Copyright
The code implemented here was retrieved and modified from ARIBA (https://github.com/sanger-pathogens/ariba)
Give them credit accordingly.
"""
## read dependencies information
dependencies_pd = extern_progs.read_dependencies()
## get information for prog
regex = re.compile(dependencies_pd.loc[prog, 'get_version'])
args = dependencies_pd.loc[prog, 'version_cmd']
## debug messages
if (Debug):
print(colored("** Debug: regex: %s" %regex,'yellow'))
print(colored("** Debug: args: %s" %args, 'yellow'))
if prog == 'spades':
cmd = "python3 " + path + " " + args
cmd_output = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,stderr=subprocess.PIPE).communicate()
elif prog == 'dimob':
perl_exe = get_exe("perl")
cmd = perl_exe + ' ' + path
cmd_output = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,stderr=subprocess.PIPE).communicate()
elif prog == 'trimmomatic':
java_bin = get_exe('java')
java_jar = java_bin + ' -jar ' + path + ' ' + args
cmd_output = subprocess.Popen(java_jar, shell=True,
stdout=subprocess.PIPE,stderr=subprocess.PIPE).communicate()
elif prog == 'generate_plot':
return("12")
else:
cmd = path + ' ' + args
cmd_output = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
## decode command
cmd_output = HCGB_main.decode(cmd_output[0]).split('\n')[:-1] + HCGB_main.decode(cmd_output[1]).split('\n')[:-1]
## debug messages
if (Debug):
print(colored("** Debug: cmd_output:\n ",'yellow'))
print (cmd_output)
## retrieve version information
for line in cmd_output:
hits = regex.search(line)
if (Debug):
print (hits)
if hits:
return hits.group(1)
if Debug:
print (colored('Attention: I tried to get the version of ' + prog + ' with: "' + cmd + '" and the output didn\'t match this regular expression: "' + regex.pattern + '"', 'red'))
return("")
def check_dependencies(install_option, install_path, Debug):
"""
Check if available the different software required for ``BacterialTyper`` execution.
Using the function :func:`BacterialTyper.config.extern_progs.read_dependencies` the
information for all the dependencies is retrieved from file :file:`BacterialTyper/config/software/dependencies.csv`.
For each software, the function :func:`BacterialTyper.config.set_config.get_exe` retrieves
whether it is installed in the system or not and its version and it is check using
:func:`BacterialTyper.config.set_config.check_install`. If not installed, the
:func:`BacterialTyper.config.install_dependencies.install` parses the software name and
proceeds for the installation of the software.
:param install_option: True/False for proceeding with installation or only check.
:param install_path: Installation absolute path.
:param Debug: True/False for debugging messages
:type install_option: boolean
:type install_path: string
:type Debug: boolean
:returns: Print messages and information
.. seealso:: This function also depends on additional functions:
- :func:`BacterialTyper.config.extern_progs.read_dependencies`
- :func:`BacterialTyper.config.set_config.get_exe`
- :func:`BacterialTyper.config.set_config.check_install`
- :func:`BacterialTyper.config.install_dependencies.install`
"""
## read dependencies information
dependencies_pd = extern_progs.read_dependencies()
for soft, row in dependencies_pd.iterrows():
(soft_path, installed) = get_exe(soft, Debug=Debug, Return_Version=True)
soft_name = row['soft_name']
min_version = row['min_version']
## debug messages
if (Debug):
print ("Software:", soft)
print ("Soft name:", soft_name)
print ("Min_Version:", min_version)
print ("Soft Path: ", soft_path)
print ("Version installed:", installed)
## check if installed
message = check_install_module(installed, soft_name, min_version, 'Software')
if (message == 'OK'):
continue
else:
if (install_option):
if (Debug):
print ("Install software: ", soft)
installed = install_dependencies.install(soft, min_version, install_path, Debug)
message2 = check_install_module(installed, soft_name, min_version, 'Software')
if (message2 == 'OK'):
continue
else:
print ("+ attempt to install software: ", soft_name, " failed. Install it manually to continue with BacterialTyper\n\n")
else:
print ("\t+ Please install manually software: ", soft_name, " to continue with BacterialTyper\n\n")
################
## Python
################
def get_python_packages(Debug):
"""
Retrieves the version of the python packages installed in the system.
It retrieves the dependencies name conversion from file :file:`BacterialTyper/config/python/name_conversion_module.csv` if any.
using function :func:`BacterialTyper.config.extern_progs.file_list` and :func:`BacterialTyper.scripts.functions.get_data`.
For each module it retrieves the package version installed in the system using
:func:`BacterialTyper.config.set_config.check_package_version`.
:returns: Dictionary containing for each python module (key) the installed version (value).
.. seealso:: This function relies on other ``BacterialTyper`` functions:
- :func:`BacterialTyper.config.set_config.check_package_version`
- :func:`BacterialTyper.config.extern_progs.file_list`
- :func:`BacterialTyper.scripts.functions.file2dictionary`
"""
## get import names for packages:
dict_python_packages = extern_progs.min_python_module_version()
## get version of packages
my_packages_installed = {}
for module_name in dict_python_packages.keys():
installed = check_package_version(module_name, Debug) ## check version installed in system
my_packages_installed[module_name] = installed
return (my_packages_installed)
##################
def check_python_packages(Debug, option_install, install_path):
"""
This functions checks whether the packages installed in the system fulfilled the
minimum version specified in the configuration folder.
It uses function :func:`BacterialTyper.config.set_config.get_python packages` to
retrieve the version of the python packages installed in the system. Then it uses
:func:`BacterialTyper.config.extern_progs.min_python_module_version` to retrieve the minimum
version specified. It compares them using function :func:`BacterialTyper.config.set_config.check_install_module`.
:param Debug: True/False for debugging messages
:param option_install: True/False for installing missing dependencies
:type Debug: boolean
:type option_install: string
:returns: Print messages if packages are installed.
.. seealso:: This function relies on other ``BacterialTyper`` functions:
- :func:`BacterialTyper.config.set_config.get_python packages`
- :func:`BacterialTyper.config.set_config.check_install_module`
- :func:`BacterialTyper.config.extern_progs.min_python_module_version`
- :func:`BacterialTyper.config.install_dependencies.python_package_install`
- :func:`BacterialTyper.scripts.functions.file2dictionary`
"""
## get python packages installed
my_packages_installed = get_python_packages(Debug)
## debug messages
if (Debug):
print ("my_packages_installed :: ")
print (my_packages_installed)
## min versions for packages
my_packages_requirements = extern_progs.min_python_module_version()
## debug messages
if (Debug):
print ("my_packages_requirements")
print (my_packages_requirements)
## some modules do not have the same name when install from pip and called from import
file_module_dependecies = extern_progs.file_list("name_conversion_module")
name_conversion_module = HCGB_main.file2dictionary(file_module_dependecies, ',')
## check each package
for each in my_packages_requirements:
## get min version
min_version = my_packages_requirements[each]
## get version installed in system
installed = my_packages_installed[each]
## module name conversion (if any)
if each in list(name_conversion_module.keys()):
module_name = name_conversion_module[each]
else:
module_name = each
## debug messages
if (Debug):
print ("Module:", each)
print ("Module name:", module_name)
print ("Min_Version:", min_version)
print ("Version installed:", installed)
## check if installed
message = check_install_module(installed, module_name, min_version, 'Module')
if (message == 'OK'):
continue
else:
if (option_install): # try to install
if (Debug):
print ("Install module: ", each)
installed = install_dependencies.python_package_install(module_name, min_version)
message2 = check_install_module(installed, module_name, min_version, 'Module')
if (message2 == 'OK'):
continue
else:
print ("+ attempt to install package: ", module_name, " failed. Install it manually to continue with BacterialTyper\n\n")
else:
print ("+ Please install manually package: ", module_name, " to continue with BacterialTyper\n\n")
################
def check_package_version(package, Debug):
"""
Retrieve python package version installed
This is a modification of the original code from ARIBA (https://github.com/sanger-pathogens/ariba).
It basically uses pkg_resources.get_distribution(), pkg_resources.resource_filename() or imports module
and retrieves version from __version__ variable.
:param package: Python package name
:param Debug: True/False for debugging messages
:type package: string
:type Debug: boolean
:returns: Version retrieved
.. attention:: Be aware of Copyright
The code implemented here was retrieved and modified from ARIBA (https://github.com/sanger-pathogens/ariba)
Give them credit accordingly.
"""
try:
version = pkg_resources.get_distribution(package).version
if (Debug):
print ("Method: pkg_resources.get_distribution(package).version")
except:
try:
exec('import ' + package)
version = eval(package + '.__version__')
if (Debug):
print ("Method: exec('import ' + package); version = eval(package + '.__version__')")
except:
try:
if (Debug):
print ("Method: pkg_resources.resource_filename(package, 'version.py')")
version = pkg_resources.resource_filename(package, 'version.py')
except:
version = 'n.a.'
if (Debug):
print ('Package:', package)
print ('Version:', version)
return(version)
################
################
## Perl
################
def get_perl_packages(Debug, file_name):
"""
Retrieves the version of the perl packages installed in the system.
It retrieves the dependencies name conversion from file :file:`BacterialTyper/config/perl/perl_dependencies.csv`
using function :func:`BacterialTyper.config.extern_progs.file_list` and :func:`BacterialTyper.scripts.functions.get_data`.
For each module it retrieves the package version installed in the system using
:func:`BacterialTyper.config.set_config.check_perl_package_version`.
:returns: Dictionary containing for each perl module (key) the installed version (value).
.. seealso:: This function relies on other ``BacterialTyper`` functions:
- :func:`BacterialTyper.config.set_config.check_perl_package_version`
- :func:`BacterialTyper.config.extern_progs.file_list`
- :func:`BacterialTyper.scripts.functions.get_data`
"""
## get info for perl modules
perl_lib_dependecies_file = extern_progs.file_list(file_name)
perl_lib_dependecies = HCGB_main.get_data(perl_lib_dependecies_file, ',', 'index_col=0')
my_packages_installed = {}
for index_name, row in perl_lib_dependecies.iterrows():
module_name = row['module']
installed = check_perl_package_version(module_name, Debug) ## check version installed in system
if not (installed):
installed = 'n.a.'
my_packages_installed[index_name] = installed
return (my_packages_installed)
##################
def check_perl_packages(file_name, Debug, option_install, install_path):
"""
Check the perl packages required
This functions checks whether the packages installed in the system fulfilled the
minimum version specified in the configuration file. Details of the perl packages
required are available in :file:`BacterialTyper/config/perl/`.
It uses function :func:`BacterialTyper.config.set_config.get_perl_packages` to
retrieve the version of the perl packages installed in the system. Then it uses
:func:`BacterialTyper.config.extern_progs.min_perl_package_version` to retrieve the minimum
version specified. It compares them using function :func:`BacterialTyper.config.set_config.check_install_module`.
:param file_name: Name of the file to search within :file:`BacterialTyper/config/perl/`.
:param Debug: True/False for debugging messages
:param option_install: True/False for installing missing dependencies
:param install_path: Install path for installing modules.
:type file_name: string
:type Debug: boolean
:type option_install: boolean
:type install_path: string
:returns: Print messages if packages are installed.
.. seealso:: This function relies on other ``BacterialTyper`` functions:
- :func:`BacterialTyper.config.set_config.get_perl_packages`
- :func:`BacterialTyper.config.set_config.check_install_module`
- :func:`BacterialTyper.config.extern_progs.min_perl_package_version`
- :func:`BacterialTyper.config.install_dependencies.perl_package_install`
"""
## get perl packages installed
my_packages_installed = get_perl_packages(Debug, file_name)
## debug messages
if (Debug):
print ("my_packages_installed :: ")
print (my_packages_installed)
## min versions for packages
my_packages_requirements = extern_progs.min_perl_package_version(file_name)
## debug messages
if (Debug):
print ("my_packages_requirements")
print (my_packages_requirements)
## get info for perl modules
perl_lib_dependecies_file = extern_progs.file_list(file_name)
perl_lib_dependecies = HCGB_main.get_data(perl_lib_dependecies_file, ',', 'index_col=0')
## check each package
for each in my_packages_requirements:
## get min version
min_version = my_packages_requirements[each]
## get version installed in system
installed = my_packages_installed[each]
## module name conversion
module_name = perl_lib_dependecies.loc[each, 'module']
## debug messages
if (Debug):
print ("Module:", each)
print ("Module name:", module_name)
print ("Min_Version:", min_version)
print ("Version installed:", installed)
## check if installed
message = check_install_module(installed, module_name, min_version, 'Package')
if (message == 'OK'):
continue
else:
print (colored("** ATTENTION: Installation of perl modules is not supported",'red'))
print ("+ Please install manually package: ", module_name, " to continue with BacterialTyper\n\n")
################
def check_perl_package_version(package, Debug):
"""
Retrieve perl package version installed
It basically uses a one line perl command to load the package and print the version.
:param package: package name
:param Debug: True/False for debugging messages
:type package: string
:type Debug: boolean
:returns: Version retrieved
"""
perl_exe = get_exe('perl')
perl_one_line_command = perl_exe + ' -M' + package + ' -e \'print $' + package + '::VERSION\';'
if (Debug):
print ("** DEBUG: perl command:\n")
print (perl_one_line_command)
## execute one line perl command
output_one_line = HCGB_sys.system_call(perl_one_line_command, returned=True, message=False)
return(HCGB_main.decode(output_one_line))
################
## IslandPath
################
def check_IslandPath(Debug, option_install, install_path):
## get perl packages installed
check_perl_packages("IslandPath_dependencies", Debug, option_install, install_path)
## check additional software required
print ("+ Check additional software for IslandPath optional analysis...")
################
################
## R
################
def R_package_path_installed():
"""Provides absolute path to file ``R_package.info.txt`` containing path to missing R packages installed"""
## check if exists or try to install
RDir_package = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'R', 'R_package.info.txt')
if HCGB_files.is_non_zero_file(RDir_package):
list=HCGB_main.readList_fromFile(RDir_package)
return (list[0])
else:
path2install = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'R', 'install_packages')
HCGB_files.create_folder(path2install)
return (path2install)
################
def get_R_packages():
dep_file = os.path.abspath(os.path.join(os.path.dirname( __file__ ), 'R', 'R_dependencies.csv'))
dep_file_data = HCGB_main.get_data(dep_file, ',', 'index_col=0')
return (dep_file_data)
################
def get_check_R_files():
check_install_system = os.path.abspath(os.path.join(os.path.dirname( __file__ ), 'R', 'check_install_system.R'))
check_install_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), 'R', 'check_install_path.R'))
return (check_install_system, check_install_path)
################
def check_R_packages(install, install_path, Debug):
packages = get_R_packages()
(check_install_system, check_install_path) = get_check_R_files()
R_script_exe = get_exe('Rscript')
## if no install path, check for previous store information in R_package_info.txt
if not install_path:
install_path = R_package_path_installed()
for index,row in packages.iterrows():
## debugging messages
if Debug:
print ('\n+ Check package: ', index)
print('+ Source: ', row['source'])
## first try to check if package available in system
cmd_check = R_script_exe + ' ' + check_install_system + ' -l ' + index
code = HCGB_sys.system_call(cmd_check, message=False, returned=False)
if (code=='OK'):
check_install_module('1', index, '0', 'package')
else:
check_install_module('0', index, '1', 'System package')
## check if installed in path
cmd_check_path = R_script_exe + ' ' + check_install_path + ' -l ' + index + ' -p ' + install_path
code2 = HCGB_sys.system_call(cmd_check_path, message=False, returned=False)
if (code2=='OK'):
check_install_module('1', index, '0', 'Install path package')
else:
check_install_module('0', index, '1', 'Install path package')
if (install):
install_dependencies.install_R_packages(index, row['source'], install_path, row['extra'])
else:
print ("Please install module %s manually to continue with BacterialTyper" %index)
################
## Miscellaneous
################
def print_module_comparison(module_name, message, color, tag):
"""
Creates print message for a given module, version, message
:param module_name: Name of the module
:param message: Message to include in the print message: OK | FAILED | NOT FOUND
:param color: Print message color: green | orange | red
:param tag: Tag to include: Module, package, software
:type module_name: string
:type message: string
:type color: string
:type tag: string
:returns: Print message
"""
print (colored("{:.<30}{:.>35}".format("%s: %s" %(tag, module_name), "[ %s ]" %message), color))
#########
##################
def check_install_module(installed, module_name, min_version, tag):
"""
Checks modules installation
Checks whether a module is installed and fulfilling requirements.
It prints messages using :func:`BacterialTyper.config.set_config.print_module_comparison`.
:param installed: Version string of the module installed.
:param module_name: Module name
:param min_version: Version string for the minimum version required
:type installed: string
:type module_name: string
:type min_version: string
"""
## Not installed
if (installed == 'n.a.' or not installed):
message = 'NOT FOUND'
color = 'red'
print_module_comparison(module_name, message, color, tag)
# check version
elif LooseVersion(installed) >= LooseVersion(min_version):
message = 'OK'
color = 'green'
print_module_comparison(module_name, message, color, tag)
else:
message = 'FAILED'
color = 'yellow'
print_module_comparison(module_name, message, color, tag)
## return message
return (message)
#########
|
# -*- coding: utf-8 -*-
"""Console script for exo."""
import errno
import math
import sys
import click
import numpy as np
# Adapted Java treeview image compression algorithm
def rebin(a, new_shape):
M, N = a.shape
m, n = new_shape
if m >= M:
# repeat rows in data matrix
a = np.repeat(a, math.ceil(float(m) / M), axis=0)
M, N = a.shape
m, n = new_shape
row_delete_num = M % m
col_delete_num = N % n
np.random.seed(seed=0)
if row_delete_num > 0:
# select deleted rows with equal intervals
row_delete = np.linspace(0, M - 1, num=row_delete_num, dtype=int)
# sort the random selected deleted row ids
row_delete = np.sort(row_delete)
row_delete_plus1 = row_delete[1:-1] + \
1 # get deleted rows plus position
# get deleted rows plus position (top +1; end -1)
row_delete_plus1 = np.append(
np.append(row_delete[0] + 1, row_delete_plus1), row_delete[-1] - 1)
# put the info of deleted rows into the next rows by mean
a[row_delete_plus1, :] = (
a[row_delete, :] + a[row_delete_plus1, :]) / 2
a = np.delete(a, row_delete, axis=0) # random remove rows
if col_delete_num > 0:
# select deleted cols with equal intervals
col_delete = np.linspace(0, N - 1, num=col_delete_num, dtype=int)
# sort the random selected deleted col ids
col_delete = np.sort(col_delete)
col_delete_plus1 = col_delete[1:-1] + \
1 # get deleted cols plus position
# get deleted cols plus position (top +1; end -1)
col_delete_plus1 = np.append(
np.append(col_delete[0] + 1, col_delete_plus1), col_delete[-1] - 1)
# put the info of deleted cols into the next cols by mean
a[:, col_delete_plus1] = (
a[:, col_delete] + a[:, col_delete_plus1]) / 2
a = np.delete(a, col_delete, axis=1) # random remove columns
M, N = a.shape
# compare the heatmap matrix
a_compress = a.reshape((m, int(M / m), n, int(N / n))).mean(3).mean(1)
return np.array(a_compress)
def calculate_threshold(input_file, threshold_type, threshold_value, header, start_col, row_num, col_num, min_upper_lim):
data0 = []
with open(input_file, 'r') as data:
if header:
data.readline()
for rec in data:
tmp = [(x.strip()) for x in rec.split('\t')]
data0.append(tmp[start_col:])
data0 = np.array(data0, dtype=float)
if row_num == -999:
row_num = data0.shape[0]
if col_num == -999:
col_num = data0.shape[1]
# rebin data0
if row_num < data0.shape[0] and col_num < data0.shape[1]:
data0 = rebin(data0, (row_num, col_num))
elif row_num < data0.shape[0]:
data0 = rebin(data0, (row_num, data0.shape[1]))
elif col_num < data0.shape[1]:
data0 = rebin(data0, (data0.shape[0], col_num))
if threshold_type == 'quantile':
# Calculate contrast limits here
rows, cols = np.nonzero(data0)
upper_lim = np.percentile(data0[rows, cols], threshold_value)
lower_lim = 0
# Setting an absolute threshold to a minimum,
# in cases the 95th percentile contrast is <= user defined min_upper_lim
if threshold_value > 0.0:
click.echo(
"\nCalculated constrast UPPER LIMIT using quantile: {}".format(upper_lim))
click.echo("Assigned LOWER LIMIT: {}".format(lower_lim))
if upper_lim <= min_upper_lim:
click.echo(
"UPPER LIMIT <= min-upper-limit ; Setting Max contrast to min_upper_lim")
upper_lim = min_upper_lim
else:
# Set absolute constrast here.
upper_lim = threshold_value
lower_lim = 0
# Generate the output file.
outfile = open('calcThreshold.txt', 'w')
outfile.write("upper_threshold:{}\nlower_threshold:{}\nrow_num:{}\ncol_num:{}\nheader:{}\nstart_col:{}".format(
upper_lim, lower_lim, row_num, col_num, header, start_col))
click.echo('\ncontrast_upper_threshold:' + str(upper_lim))
click.echo('contrast_lower_threshold:' + str(lower_lim))
outfile.flush()
outfile.close()
click.echo('.' * 50 + '\n')
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(options_metavar='<options>', context_settings=CONTEXT_SETTINGS)
@click.argument('tagpileup-cdt', type=click.Path(exists=True, resolve_path=True, file_okay=True, dir_okay=False,))
@click.option('-hh', '--has-header', metavar="<string>", type=click.BOOL, default='T', prompt=True, show_default='True', help='has headers ?')
@click.option('-ct', '--threshold-type', type=click.Choice(['quantile', 'absolute'], case_sensitive=False), prompt=True, default='quantile')
@click.option('-cv', '--threshold-value', metavar="<float>", default=90.0, prompt=True, show_default="quantile:90.0", help="Takes values >=0")
@click.option('-m', '--min-upper-limit', metavar="<float>", default=5.0, prompt=True, show_default='5.0', help='Minimum upper limit')
@click.option('-s', '--start_col', metavar="<int>", default=2, prompt=True, show_default='2', help='Start column')
@click.option('-r', '--row_num', metavar="<int>", default=700, prompt=True, show_default='700', help='Height of the plot')
@click.option('-c', '--col_num', metavar="<int>", default=300, prompt=True, show_default='300', help='Width of the plot')
def cli(min_upper_limit, tagpileup_cdt, has_header, start_col, row_num, col_num, threshold_type, threshold_value):
"""
Calculates a contrast threshold from the CDT file generated by tagpileup. The calculated values are reported ina text file which can then used to set a uniform contrast for multiple heatmaps generated downstream.
\b
For example if your trying to create multiple heatmaps with dimensions (Width x Height)px = (300 x 700)px.
Use width (col_num) = 300, height(row_num) = 700.
"""
click.echo('\n' + '.' * 50)
click.echo('Contrast threshold Type: %s' % threshold_type)
click.echo('Contrast threshold Value: %s' % threshold_value)
click.echo('Has Header: %s' % has_header)
click.echo('Start column: %s' % start_col)
click.echo('Height (pixels): %s' % row_num)
click.echo('Width (pixels):%s' % col_num)
click.echo('Min Upper Limit (used only with quantile):%s' %
min_upper_limit)
if threshold_value <= 0:
click.echo('\n Invalid threshold value')
sys.exit(errno.EINVAL)
else:
calculate_threshold(tagpileup_cdt, threshold_type, threshold_value,
has_header, start_col, row_num, col_num, min_upper_limit)
return 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.