hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b613718c7e2d35ae6a1c1ae7f73a8f4eabb66ad5 | 4,153 | py | Python | mindhome_alpha/erpnext/selling/page/sales_funnel/sales_funnel.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | mindhome_alpha/erpnext/selling/page/sales_funnel/sales_funnel.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | null | null | null | mindhome_alpha/erpnext/selling/page/sales_funnel/sales_funnel.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | # Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from erpnext.accounts.report.utils import convert
import pandas as pd
def validate_filters(from_date, to_date, company):
if from_date and to_date and (from_date >= to_date):
frappe.throw(_("To Date must be greater than From Date"))
if not company:
frappe.throw(_("Please Select a Company"))
@frappe.whitelist()
def get_funnel_data(from_date, to_date, company):
validate_filters(from_date, to_date, company)
active_leads = frappe.db.sql("""select count(*) from `tabLead`
where (date(`creation`) between %s and %s)
and company=%s""", (from_date, to_date, company))[0][0]
opportunities = frappe.db.sql("""select count(*) from `tabOpportunity`
where (date(`creation`) between %s and %s)
and opportunity_from='Lead' and company=%s""", (from_date, to_date, company))[0][0]
quotations = frappe.db.sql("""select count(*) from `tabQuotation`
where docstatus = 1 and (date(`creation`) between %s and %s)
and (opportunity!="" or quotation_to="Lead") and company=%s""", (from_date, to_date, company))[0][0]
converted = frappe.db.sql("""select count(*) from `tabCustomer`
JOIN `tabLead` ON `tabLead`.name = `tabCustomer`.lead_name
WHERE (date(`tabCustomer`.creation) between %s and %s)
and `tabLead`.company=%s""", (from_date, to_date, company))[0][0]
return [
{ "title": _("Active Leads"), "value": active_leads, "color": "#B03B46" },
{ "title": _("Opportunities"), "value": opportunities, "color": "#F09C00" },
{ "title": _("Quotations"), "value": quotations, "color": "#006685" },
{ "title": _("Converted"), "value": converted, "color": "#00AD65" }
]
@frappe.whitelist()
def get_opp_by_lead_source(from_date, to_date, company):
validate_filters(from_date, to_date, company)
opportunities = frappe.get_all("Opportunity", filters=[['status', 'in', ['Open', 'Quotation', 'Replied']], ['company', '=', company], ['transaction_date', 'Between', [from_date, to_date]]], fields=['currency', 'sales_stage', 'opportunity_amount', 'probability', 'source'])
if opportunities:
default_currency = frappe.get_cached_value('Global Defaults', 'None', 'default_currency')
cp_opportunities = [dict(x, **{'compound_amount': (convert(x['opportunity_amount'], x['currency'], default_currency, to_date) * x['probability']/100)}) for x in opportunities]
df = pd.DataFrame(cp_opportunities).groupby(['source', 'sales_stage'], as_index=False).agg({'compound_amount': 'sum'})
result = {}
result['labels'] = list(set(df.source.values))
result['datasets'] = []
for s in set(df.sales_stage.values):
result['datasets'].append({'name': s, 'values': [0]*len(result['labels']), 'chartType': 'bar'})
for row in df.itertuples():
source_index = result['labels'].index(row.source)
for dataset in result['datasets']:
if dataset['name'] == row.sales_stage:
dataset['values'][source_index] = row.compound_amount
return result
else:
return 'empty'
@frappe.whitelist()
def get_pipeline_data(from_date, to_date, company):
validate_filters(from_date, to_date, company)
opportunities = frappe.get_all("Opportunity", filters=[['status', 'in', ['Open', 'Quotation', 'Replied']], ['company', '=', company], ['transaction_date', 'Between', [from_date, to_date]]], fields=['currency', 'sales_stage', 'opportunity_amount', 'probability'])
if opportunities:
default_currency = frappe.get_cached_value('Global Defaults', 'None', 'default_currency')
cp_opportunities = [dict(x, **{'compound_amount': (convert(x['opportunity_amount'], x['currency'], default_currency, to_date) * x['probability']/100)}) for x in opportunities]
df = pd.DataFrame(cp_opportunities).groupby(['sales_stage'], as_index=True).agg({'compound_amount': 'sum'}).to_dict()
result = {}
result['labels'] = df['compound_amount'].keys()
result['datasets'] = []
result['datasets'].append({'name': _("Total Amount"), 'values': df['compound_amount'].values(), 'chartType': 'bar'})
return result
else:
return 'empty' | 41.53 | 273 | 0.696846 | 0 | 0 | 0 | 0 | 3,645 | 0.877679 | 0 | 0 | 1,743 | 0.419697 |
b61396aef7f1b8aef702d703e3287512dc0d3e4d | 4,535 | py | Python | ns/nxml.py | kentnf/noschema | f6506d7a5fb5c4bd86cb64daa72602dc9af4a69c | [
"MIT"
] | null | null | null | ns/nxml.py | kentnf/noschema | f6506d7a5fb5c4bd86cb64daa72602dc9af4a69c | [
"MIT"
] | null | null | null | ns/nxml.py | kentnf/noschema | f6506d7a5fb5c4bd86cb64daa72602dc9af4a69c | [
"MIT"
] | null | null | null | #!/usr/bin/python3
'''
parse large XML files which stores funtional annotations of features
infact, we do not need xml module such as SAX at here
'''
import sys
# import datetime
'''
re_match_tag -- using regexp to match tag of xml
string: line of xml file
tag: <tag> or </tag>
status: 0 or 1, 0 match start, 1 match end
The string is much faster than regexp method, but can not ignore the upcase and lowcase
'''
def re_match_tag(string, tag, status):
if (status):
tag = '</' + tag + '>'
if (tag in string):
return(1)
#return(re.match(r'.*</' + tag + '>.*', string, re.I))
else:
tag1 = '<' + tag + '>'
tag2 = '<' + tag + ' '
if ((tag1 in string) or (tag2 in string)):
return(1)
#return(re.match(r'.*<' + tag + ' .*', string, re.I) or re.match(r'.*<' + tag + '>.*', string, re.I))
return(0)
'''
xml_get_attr -- get attr value base on key
'''
def xml_get_attr(string, key):
string = string.strip('\n')
member = string.split(' ')
for m in member:
m = m.replace('"', '')
m = m.replace('/>', '')
if ('=' in m):
(k, v) = m.split('=', 2)
if (k == key):
return(v)
return(0)
'''
xml_get_text -- get text in tag
'''
def xml_get_text(string):
return(string[string.find('>')+1:string.rfind('<')])
'''
xml_get_value -- get value from xml tag, the value could be a attr of a key, or text in xml tag
if key exist, get attr value; otherwise, get text
'''
def xml_get_value(string, key):
if (key):
return(xml_get_attr(string, key))
else:
return(xml_get_text(string))
'''
keep_hits -- keep hit_num of hits in xml_str
'''
def keep_hits(xml_str, hit_tag, hit_num):
hit_order = 0
out_status = 1
new_xml_str = ''
end_xml_str = ''
lines = xml_str.split('\n')
for line in lines:
if (out_status == 1):
new_xml_str = new_xml_str + line + '\n'
else:
end_xml_str = end_xml_str + line + '\n'
if ( re_match_tag(line, hit_tag, 1) ):
hit_order = hit_order + 1
# print(out_status, hit_order, hit_num)
if (hit_order >= hit_num):
out_status = 0
end_xml_str = ''
new_xml_str = new_xml_str + end_xml_str
return(new_xml_str)
'''
remove_tag -- remove tag from xml
'''
def remove_tag(xml_str, remove_tag):
out_status = 1
new_xml_str = ''
lines = xml_str.split('\n')
for line in lines:
if ( re_match_tag(line, remove_tag, 0) ):
out_status = 0
continue
if ( re_match_tag(line, remove_tag, 1) ):
out_status = 1
continue
if (out_status == 1):
new_xml_str = new_xml_str + line + '\n'
return(new_xml_str)
'''
xml_to_attr -- save xml to dict: hits
input:
xml_file -- file name
branch_tag -- tag for branch wich includes multiple hits of features, sometimes includes feature info
feature_tag -- tag for feature
feature_key -- key for retrieve feature; blank key '' indicates feature store in text of tag
hit_tag -- tag name of each hit
hit_num -- number of hits (top 5 for blast, all for interproscan)
return:
key: feature_name
value: xml content
'''
def xml_to_attr(xml_file, branch_tag, feature_tag, feature_key, hit_tag, hit_num):
hits = {}
status = 0
value = ''
# print(datetime.datetime.now())
with open(xml_file, 'r+') as fh:
for line in fh:
if ( re_match_tag(line, feature_tag, 0) ):
feature_name = xml_get_value(line, feature_key)
feature_name_dict[feature_name] = 1
if ( re_match_tag(line, branch_tag, 0) ):
if (value and len(feature_name_dict) > 0):
if (hit_num > 0):
value = keep_hits(value, hit_tag, hit_num)
value = remove_tag(value, 'Iteration_stat')
for fname in feature_name_dict:
hits[fname] = value
# debug
# if (feature_name == 'MELO3C027439.2.1'):
# break
status = 1
value = ''
feature_name_dict = {}
if (status == 1):
value = value + line
if ( re_match_tag(line, branch_tag, 1) ):
status = 0
# process the last record
if (value and len(feature_name_dict) > 0):
if (hit_num > 0):
value = keep_hits(value, hit_tag, hit_num)
value = remove_tag(value, 'Iteration_stat')
for fname in feature_name_dict:
hits[fname] = value
print("Processing and store %d of branch xml to dict." % len(hits))
# print(datetime.datetime.now())
# print 1 record for debug
# for fid in hits:
# if (fid == 'MELO3C027439.2.1'):
# print(hits[fid])
# sys.exit()
return(hits)
#if __name__ == '__main__':
#xml_to_attr("dataset/CM4.0_protein.fasta.xml", 'protein', 'xref', 'name', 'matches', -1)
#xml_to_attr("dataset/CM4.0_protein.dia.tr.xml", 'Iteration', 'Iteration_query-def', '','Hit', 5)
| 24.646739 | 103 | 0.644983 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,034 | 0.448512 |
b6143b3550f49956ddea8c4bfd01709e885c15e6 | 703 | py | Python | CodesComplete/Alura/DesignPatterns/calculador_de_impostos.py | vinimmelo/python | ef1f4e0550773592d3b0a88a3213de2f522870a3 | [
"MIT"
] | null | null | null | CodesComplete/Alura/DesignPatterns/calculador_de_impostos.py | vinimmelo/python | ef1f4e0550773592d3b0a88a3213de2f522870a3 | [
"MIT"
] | null | null | null | CodesComplete/Alura/DesignPatterns/calculador_de_impostos.py | vinimmelo/python | ef1f4e0550773592d3b0a88a3213de2f522870a3 | [
"MIT"
] | 1 | 2020-03-03T22:34:13.000Z | 2020-03-03T22:34:13.000Z | class Calculador_de_impostos:
def realiza_calculo(self, orcamento, imposto):
imposto_calculado = imposto.calcula(orcamento)
print(imposto_calculado)
if __name__ == '__main__':
from orcamento import Orcamento, Item
from impostos import ISS, ICMS, ICPP, IKCV
orcamento = Orcamento()
orcamento.adiciona_item(Item('ITEM 1', 50))
orcamento.adiciona_item(Item('ITEM 2', 200))
orcamento.adiciona_item(Item('ITEM 3', 250))
calculador_de_impostos = Calculador_de_impostos()
print('ISS e ICMS')
calculador_de_impostos.realiza_calculo(orcamento, ICMS(ISS()))
print('ICPP e IKCV')
calculador_de_impostos.realiza_calculo(orcamento, IKCV(ICPP()))
| 30.565217 | 67 | 0.721195 | 168 | 0.238976 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.083926 |
37337c62dbb2bddf2dd085e45f52e14215a16cb3 | 866 | py | Python | pyy1/.pycharm_helpers/python_stubs/-1550516950/_dbus_bindings/SignalMessage.py | pyy1988/pyy_test1 | 6bea878409e658aa87441384419be51aaab061e7 | [
"Apache-2.0"
] | null | null | null | pyy1/.pycharm_helpers/python_stubs/-1550516950/_dbus_bindings/SignalMessage.py | pyy1988/pyy_test1 | 6bea878409e658aa87441384419be51aaab061e7 | [
"Apache-2.0"
] | null | null | null | pyy1/.pycharm_helpers/python_stubs/-1550516950/_dbus_bindings/SignalMessage.py | pyy1988/pyy_test1 | 6bea878409e658aa87441384419be51aaab061e7 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
# module _dbus_bindings
# from /usr/lib/python3/dist-packages/_dbus_bindings.cpython-35m-x86_64-linux-gnu.so
# by generator 1.145
"""
Low-level Python bindings for libdbus. Don't use this module directly -
the public API is provided by the `dbus`, `dbus.service`, `dbus.mainloop`
and `dbus.mainloop.glib` modules, with a lower-level API provided by the
`dbus.lowlevel` module.
"""
# imports
import dbus.lowlevel as __dbus_lowlevel
class SignalMessage(__dbus_lowlevel.Message):
"""
A signal message.
Constructor::
dbus.lowlevel.SignalMessage(path: str, interface: str, method: str)
"""
def __init__(self, path, interface, method): # real signature unknown; restored from __doc__
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
| 27.0625 | 96 | 0.69515 | 411 | 0.474596 | 0 | 0 | 0 | 0 | 0 | 0 | 636 | 0.734411 |
3733d3e30660680d9a0362cfceee9dfad9c58b74 | 2,871 | py | Python | examples3/sac/visualize_deepmind.py | lgvaz/torchrl | cfff8acaf70d1fec72169162b95ab5ad3547d17a | [
"MIT"
] | 5 | 2018-06-21T14:33:40.000Z | 2018-08-18T02:26:03.000Z | examples3/sac/visualize_deepmind.py | lgvaz/reward | cfff8acaf70d1fec72169162b95ab5ad3547d17a | [
"MIT"
] | null | null | null | examples3/sac/visualize_deepmind.py | lgvaz/reward | cfff8acaf70d1fec72169162b95ab5ad3547d17a | [
"MIT"
] | 2 | 2018-05-08T03:34:49.000Z | 2018-06-22T15:04:17.000Z | import torch, torch.nn as nn, numpy as np
import reward as rw, reward.utils as U
from dm_control import suite, viewer
DEVICE = U.device.get()
class PolicyNN(nn.Module):
def __init__(self, n_in, n_out, hidden=256, activation=nn.ReLU, logstd_range=(-20, 2)):
super().__init__()
self.logstd_range = logstd_range
layers = []
layers += [nn.Linear(n_in, hidden), activation()]
layers += [nn.Linear(hidden, hidden), activation()]
self.layers = nn.Sequential(*layers)
self.mean = nn.Linear(hidden, n_out)
self.mean.weight.data.uniform_(-3e-3, 3e-3)
self.mean.bias.data.uniform_(-3e-3, 3e-3)
self.log_std = nn.Linear(hidden, n_out)
self.log_std.weight.data.uniform_(-3e-3, 3e-3)
self.log_std.bias.data.uniform_(-3e-3, 3e-3)
def forward(self, x):
x = self.layers(x)
mean = self.mean(x)
log_std = self.log_std(x).clamp(*self.logstd_range)
return mean, log_std
class Policy:
def __init__(self, nn): self.nn = nn
def get_dist(self, s):
mean, log_std = self.nn(s)
return rw.dist.TanhNormal(loc=mean, scale=log_std.exp())
def get_act(self, s=None, dist=None):
assert (s is not None and dist is None) or (s is None and dist is not None)
dist = dist or self.get_dist(s=s)
return dist.rsample()
def get_act_pre(self, s=None, dist=None):
assert (s is not None and dist is None) or (s is None and dist is not None)
dist = dist or self.get_dist(s=s)
return dist.rsample_with_pre()
def logprob(self, dist, acs): return dist.log_prob(acs).sum(-1, keepdim=True)
def logprob_pre(self, dist, acs): return dist.log_prob_pre(acs).sum(-1, keepdim=True)
def mean(self, dist): return dist.loc
def std(self, dist): return dist.scale
def concat_state_shape(s_spec): return (int(np.sum([np.prod(o.shape) for o in s_spec.values()])), )
def concat_state(s): return np.concatenate([o.flatten() for o in s.values()])
def get_act_fn(policy, a_map):
def get(tstep):
s = S(concat_state(tstep.observation)[None]).to_tensor()
return a_map(U.to_np(policy.get_act(s)[0]))
return get
# env = suite.load(domain_name="cartpole", task_name="three_poles")
env = suite.load(domain_name="walker", task_name="run")
# Define spaces
S = rw.space.Continuous(shape=concat_state_shape(env.observation_spec()), low=-np.inf, high=np.inf)
A = rw.space.Continuous(low=env.action_spec().minimum, high=env.action_spec().maximum, shape=env.action_spec().shape)
a_map = U.map_range(-1, 1, A.low[0], A.high[0])
pnn = PolicyNN(n_in=S.shape[0], n_out=A.shape[0]).to(DEVICE)
policy = Policy(nn=pnn)
U.load_model(pnn, path='logs/dm/walker/run-max999-v9-2/models/pnn_checkpoint')
get_act = get_act_fn(policy=policy, a_map=a_map)
viewer.launch(env, policy=get_act)
| 38.797297 | 117 | 0.66388 | 1,703 | 0.593173 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.051898 |
37359cb8f0f551b30886095bb2ffa0068e416f4d | 3,599 | py | Python | evaluator/evaluate_model.py | k-chuang/code-generation-from-images | 903a9a88262b57307836b0253ef6afcfd010dc06 | [
"MIT"
] | 5 | 2018-10-17T02:48:49.000Z | 2021-12-12T14:51:09.000Z | evaluator/evaluate_model.py | k-chuang/code-generation-from-images | 903a9a88262b57307836b0253ef6afcfd010dc06 | [
"MIT"
] | null | null | null | evaluator/evaluate_model.py | k-chuang/code-generation-from-images | 903a9a88262b57307836b0253ef6afcfd010dc06 | [
"MIT"
] | 2 | 2019-03-31T23:35:11.000Z | 2021-11-29T07:56:23.000Z | import sys
sys.path.extend(['..'])
import tensorflow as tf
config = tf.ConfigProto(log_device_placement=False)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from generator.generate_code import *
from nltk.translate.bleu_score import corpus_bleu
from config.config import *
from base.BaseModel import *
from utils.tokenizer import *
def evaluate_model(input_path, model_path, tokenizer, max_length=48, display=False):
'''
Evaluate model by comparing actual vs predictions via the BLEU scoring criteria
:param input_path: input path containing images + gui code pairs to evaluate model on
:param model_path: path to model files
:param tokenizer: a Keras Tokenizer object fit on vocab
:param max_length: context length
:param display: bool on whether to print out DSL code predictions and actual labels to standard output
:return: 4-ngram BLEU score, list of actual DSL code, list of predicted DSL code
'''
model_json_path = glob.glob(os.path.join(model_path, '*.json'))[0]
model_weights_path = glob.glob(os.path.join(model_path, '*.hdf5'))[0]
with open(model_json_path, 'r') as fh:
model_json = fh.read()
model = model_from_json(model_json)
model.load_weights(model_weights_path)
print('Successfully loaded model and model weights...')
images, texts = load_data(input_path)
actual, predictions = list(), list()
for i in range(len(texts)):
predicted_code = generate_code(model, images[i], tokenizer, max_length, display)
# store actual and predicted
if display:
print('\n\nActual---->\n\n' + texts[i])
actual.append([texts[i].split()])
predictions.append(predicted_code.split())
bleu = corpus_bleu(actual, predictions)
return bleu, actual, predictions
if __name__ == '__main__':
argv = sys.argv[1:]
if len(argv) != 1:
print('Need to supply an argument specifying model path')
exit(0)
model_path = argv[0]
test_dir = '../data/test/'
# model_path = '../results/'
vocab_path = '../data/code.vocab'
tokenizer = tokenizer(vocab_path)
bleu, actual, predictions = evaluate_model(test_dir, model_path, tokenizer, CONTEXT_LENGTH, display=False)
# Calculate BLEU score (standard is 4-gram, but just get all individual N-Gram BLEU scores from 1 gram to 4 gram)
# By default, the sentence_bleu() and corpus_bleu() scores calculate the cumulative 4-gram BLEU score, also called BLEU-4.
# It is common to report the cumulative BLEU-1 to BLEU-4 scores when describing the skill of a text generation system.
# 4-gram is the most strict and corresponds the best to human translations
print('BLEU-1: %f' % corpus_bleu(actual, predictions, weights=(1.0, 0, 0, 0)))
print('BLEU-2: %f' % corpus_bleu(actual, predictions, weights=(0.5, 0.5, 0, 0)))
print('BLEU-3: %f' % corpus_bleu(actual, predictions, weights=(0.3, 0.3, 0.3, 0)))
print('BLEU-4: %f' % corpus_bleu(actual, predictions, weights=(0.25, 0.25, 0.25, 0.25)))
bleu_score_path = os.path.join(model_path, 'bleu_score.txt')
with open(bleu_score_path, 'w') as fh:
fh.write('Test set dir: %s\n' % test_dir)
fh.write('BLEU-1: %f \n' % corpus_bleu(actual, predictions, weights=(1.0, 0, 0, 0)))
fh.write('BLEU-2: %f \n' % corpus_bleu(actual, predictions, weights=(0.5, 0.5, 0, 0)))
fh.write('BLEU-3: %f \n' % corpus_bleu(actual, predictions, weights=(0.3, 0.3, 0.3, 0)))
fh.write('BLEU-4: %f \n' % corpus_bleu(actual, predictions, weights=(0.25, 0.25, 0.25, 0.25)))
| 46.74026 | 126 | 0.684912 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,335 | 0.370936 |
373624a9b643c4143bff91e625ea72076589eebb | 3,580 | py | Python | populate/populate.py | vascoalramos/roomie | 031aef815af910b259da0fef7cca5bec02459006 | [
"MIT"
] | null | null | null | populate/populate.py | vascoalramos/roomie | 031aef815af910b259da0fef7cca5bec02459006 | [
"MIT"
] | null | null | null | populate/populate.py | vascoalramos/roomie | 031aef815af910b259da0fef7cca5bec02459006 | [
"MIT"
] | 2 | 2021-06-16T07:12:35.000Z | 2021-06-16T22:47:46.000Z | from faker import Faker
import os, random, requests, json
fake = Faker()
BASE_URL = "http://localhost:8083/api"
cities = [
"Braga",
"Viseu",
"Porto",
"Lisboa",
"Guimarães",
"Leiria",
"Coimbra",
"Santarém",
"Guarda",
"Aveiro",
"Faro",
"Portimão",
"Beja",
"Évora",
]
def register_landlord():
profile = fake.simple_profile()
payload = {
"email": "l_" + str(fake.random_int(0, 100)) + profile["mail"],
"password": fake.password(),
"username": profile["username"],
"name": profile["name"],
"birthDate": "2021-04-14",
"sex": "male",
"nif": "111111111",
"address": "Av Test B1 2E, Viseu",
"phone": "9111111111",
}
response = requests.post(
f"{BASE_URL}/landlords",
data=payload,
files={
"file": open("./avatars/" + random.choice(os.listdir("./avatars")), "rb")
},
)
payload["id"] = response.json()["id"]
return payload
def register_tenant():
profile = fake.simple_profile()
payload = {
"email": "t_" + str(fake.random_int(0, 100)) + profile["mail"],
"password": fake.password(),
"username": profile["username"],
"name": profile["name"],
"birthDate": "2021-04-14",
"sex": "male",
"nif": "111111111",
"nationality": "PT",
"occupation": "Test occupation",
"phone": "9111111111",
}
response = requests.post(
f"{BASE_URL}/tenants",
data=payload,
files={
"file": open("./avatars/" + random.choice(os.listdir("./avatars")), "rb")
},
)
payload["id"] = response.json()["id"]
return payload
def login(email, password):
headers = {"content-type": "application/json"}
payload = {"email": email, "password": password}
response = requests.post(f"{BASE_URL}/auth/login", json=payload, headers=headers)
return response.json()["token"]
def post_house(token):
headers = {"Authorization": "Bearer " + token}
payload = {
"title": fake.text(),
"rooms": fake.random_int(0, 6),
"availableRooms": fake.random_int(0, 5),
"bathRooms": fake.random_int(0, 3),
"minPrice": 250,
"maxPrice": 300,
"description": fake.text(max_nb_chars=500).replace("\n", " "),
"features": "feat1,feat2,feat3,feat4",
"address": "Av Test B1 2E, " + random.choice(cities),
}
files = []
for _i in range(0, random.randint(1, 7)):
files.append(
("files", open("./houses/" + random.choice(os.listdir("./houses")), "rb"))
)
response = requests.post(
f"{BASE_URL}/houses",
data=payload,
files=files,
headers=headers,
)
return response.json()
def main():
landlords = []
tenants = []
houses = []
for _i in range(0, 50):
landlord = register_landlord()
landlord["token"] = login(landlord["email"], landlord["password"])
landlords.append(landlord)
for _i in range(0, 50):
tenant = register_tenant()
tenant["token"] = login(tenant["email"], tenant["password"])
tenants.append(tenant)
for _i in range(0, 500):
landlord = random.choice(landlords)
house = post_house(landlord["token"])
houses.append(house)
users = {"landlords": landlords, "tenants": tenants}
with open("./users.json", "w") as file:
json.dump(users, file, indent=4, sort_keys=True)
if __name__ == "__main__":
main()
| 25.755396 | 86 | 0.548603 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,002 | 0.279576 |
3736aa39ffdd286fb231d2f6df918307638bcf17 | 1,797 | py | Python | python/deploy-cpu-alarms.py | earthlab/aws-ops | e4310c994a93511783a947eebdf13a52c7760edb | [
"BSD-2-Clause"
] | 1 | 2019-03-22T15:16:50.000Z | 2019-03-22T15:16:50.000Z | python/deploy-cpu-alarms.py | earthlab/aws-ops | e4310c994a93511783a947eebdf13a52c7760edb | [
"BSD-2-Clause"
] | 3 | 2018-12-04T00:59:57.000Z | 2021-02-15T19:47:38.000Z | python/deploy-cpu-alarms.py | earthlab/aws-ops | e4310c994a93511783a947eebdf13a52c7760edb | [
"BSD-2-Clause"
] | 1 | 2019-11-14T13:49:41.000Z | 2019-11-14T13:49:41.000Z | # Deploy idle CPU alarms to stop EC2 instances
import boto3
account_id = boto3.client("sts").get_caller_identity().get("Account")
region = boto3.session.Session().region_name
client = boto3.client("cloudwatch")
ec = boto3.client("ec2")
reservations = ec.describe_instances()
exceptions = ["prf-"]
for r in reservations["Reservations"]:
for i in r["Instances"]:
instance_id = i["InstanceId"]
for t in i["Tags"]:
if t["Key"] == "Name":
iname = t["Value"]
name_excepted = any([e in iname for e in exceptions])
if name_excepted:
continue
else:
alarm_name = "CPU Alarm " + iname + instance_id
alarm = client.put_metric_alarm(
AlarmName=alarm_name,
MetricName="CPUUtilization",
Namespace="AWS/EC2",
Statistic="Maximum",
ComparisonOperator="LessThanOrEqualToThreshold",
Threshold=1.0,
Period=60 * 60, # in seconds
EvaluationPeriods=2,
Dimensions=[{"Name": "InstanceId", "Value": instance_id}],
Unit="Percent",
ActionsEnabled=True,
AlarmActions=[
":".join(
[
"arn:aws:swf",
region,
account_id,
"action/actions/AWS_EC2.InstanceId.Stop/1.0",
]
)
],
)
| 39.065217 | 82 | 0.42571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 324 | 0.180301 |
3736d63ff73d0b10a801f51f602f6aaddc8db142 | 746 | py | Python | Exam/oppgave_2.py | Chillu1/PythonUiS | 7169f0d625d6419a3e002b1e3285ca08fc99c020 | [
"MIT"
] | null | null | null | Exam/oppgave_2.py | Chillu1/PythonUiS | 7169f0d625d6419a3e002b1e3285ca08fc99c020 | [
"MIT"
] | null | null | null | Exam/oppgave_2.py | Chillu1/PythonUiS | 7169f0d625d6419a3e002b1e3285ca08fc99c020 | [
"MIT"
] | 1 | 2021-04-26T14:32:52.000Z | 2021-04-26T14:32:52.000Z | import turtle
def stjerne(størrelse):
for i in range(24):
turtle.forward(størrelse)
turtle.backward(størrelse)
turtle.right(15)
def tegn_stjerner(stjerner):
turtle.speed(0)
for i in range(4):
for i in range(stjerner-1):#Ramme
stjerne(10)
turtle.penup()
turtle.forward(30)
turtle.pendown()
turtle.right(90)
turtle.penup()
turtle.right(45)
turtle.forward(((stjerner / 2) - 1) * 30*2.1)
#turtle.forward(stjerner**2 * (3.75**(stjerner/10)))
turtle.pendown()
stjerne(((stjerner / 2) - 1) * 30)
if __name__ == "__main__":
turtle.tracer(0, 0)
tegn_stjerner(7)
turtle.update()
turtle.mainloop()
input() | 21.314286 | 56 | 0.577748 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.090788 |
3737f665e231d9f0a760461d0daa2bfa21140b4e | 2,103 | py | Python | YatzyPy/tests.py | markomanninen/YatzyPy | a6904b22473ae909f588e3b82a67b8b4f2dce0f2 | [
"MIT"
] | null | null | null | YatzyPy/tests.py | markomanninen/YatzyPy | a6904b22473ae909f588e3b82a67b8b4f2dce0f2 | [
"MIT"
] | null | null | null | YatzyPy/tests.py | markomanninen/YatzyPy | a6904b22473ae909f588e3b82a67b8b4f2dce0f2 | [
"MIT"
] | null | null | null | # tests.py
from . main import Yatzy
def runTests():
c = Yatzy([5, 5, 6, 5, 6])
s = c.getScoreTable()
assert s['change'] == 27 and s['fullhouse'] == 27
assert s['double'] == 12 and s['six'] == 12
assert s['five'] == 15 and s['triple'] == 15
assert s['pair'] == 22
c = Yatzy([5, 5, 5, 5, 5])
s = c.getScoreTable()
assert s['change'] == 25 and s['fullhouse'] == 25 and s['five'] == 25
assert s['double'] == 10
assert s['triple'] == 15
assert s['pair'] == 20 and s['quadruple'] == 20
assert s['yatzy'] == 50
c = Yatzy([4,4,4,4,1])
s = c.getScoreTable()
assert s['change'] == 17
assert s['double'] == 8
assert s['triple'] == 12
assert s['one'] == 1
assert s['pair'] == 16 and s['quadruple'] == 16
c = Yatzy([3,3,3,2,1])
s = c.getScoreTable()
assert s['change'] == 12
assert s['double'] == 6
assert s['triple'] == 9 and s['three'] == 9
assert s['one'] == 1
assert s['two'] == 2
c = Yatzy([3,3,4,2,1])
s = c.getScoreTable()
assert s['change'] == 13
assert s['one'] == 1
assert s['two'] == 2
assert s['four'] == 4
assert s['three'] == 6 and s['double'] == 6
c = Yatzy([3,5,4,2,1])
s = c.getScoreTable()
assert s['change'] == 15 and s['smallstraight'] == 15
assert s['one'] == 1
assert s['two'] == 2
assert s['three'] == 3
assert s['four'] == 4
assert s['five'] == 5
c = Yatzy([3,5,4,2,6])
s = c.getScoreTable()
assert s['change'] == 20 and s['largestraight'] == 20
assert s['six'] == 6
assert s['two'] == 2
assert s['three'] == 3
assert s['four'] == 4
assert s['five'] == 5
c = Yatzy([3,5,4,1,6])
s = c.getScoreTable()
assert s['change'] == 19
assert s['six'] == 6
assert s['one'] == 1
assert s['three'] == 3
assert s['four'] == 4
assert s['five'] == 5
c = Yatzy([3,3,4,4,5])
s = c.getScoreTable()
assert s['change'] == 19
assert s['three'] == 6
assert s['four'] == 8 and s['double'] == 8
assert s['five'] == 5
assert s['pair'] == 14
| 26.961538 | 73 | 0.495483 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 435 | 0.206847 |
3738e841ef076c03ff9662d1673998caf409d2bf | 19 | py | Python | main/libx11/update.py | RoastVeg/cports | 803c7f07af341eb32f791b6ec1f237edb2764bd5 | [
"BSD-2-Clause"
] | 46 | 2021-06-10T02:27:32.000Z | 2022-03-27T11:33:24.000Z | main/libx11/update.py | RoastVeg/cports | 803c7f07af341eb32f791b6ec1f237edb2764bd5 | [
"BSD-2-Clause"
] | 58 | 2021-07-03T13:58:20.000Z | 2022-03-13T16:45:35.000Z | main/libx11/update.py | RoastVeg/cports | 803c7f07af341eb32f791b6ec1f237edb2764bd5 | [
"BSD-2-Clause"
] | 6 | 2021-07-04T10:46:40.000Z | 2022-01-09T00:03:59.000Z | pkgname = "libX11"
| 9.5 | 18 | 0.684211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.421053 |
373946b2e35452274f3d0d871859ed8aa4319280 | 2,756 | py | Python | raiden/tests/integration/test_version.py | anmolshl/raiden | f1cecb68cb43a2c00b2f719eadbe83137611a92a | [
"MIT"
] | null | null | null | raiden/tests/integration/test_version.py | anmolshl/raiden | f1cecb68cb43a2c00b2f719eadbe83137611a92a | [
"MIT"
] | null | null | null | raiden/tests/integration/test_version.py | anmolshl/raiden | f1cecb68cb43a2c00b2f719eadbe83137611a92a | [
"MIT"
] | null | null | null | import pytest
import tempfile
import re
import os
import shutil
from raiden.utils import get_contract_path
from raiden.utils.solc import compile_files_cwd
from raiden.exceptions import ContractVersionMismatch
from raiden.blockchain.abi import CONTRACT_VERSION_RE, CONTRACT_MANAGER, CONTRACT_CHANNEL_MANAGER
def replace_contract_version(file_path, new_version):
version_re = re.compile(CONTRACT_VERSION_RE)
with open(file_path, 'r') as original:
replaced = tempfile.NamedTemporaryFile()
for line in original.readlines():
if version_re.match(line):
line = re.sub(r'[0-9]+\.[0-9]+\.[0-9\_]', new_version, line)
replaced.write(line.encode())
replaced.flush()
shutil.copy2(replaced.name, file_path)
class TempSolidityDir:
def __init__(self, original_directory, tmpdir):
tempdir = tmpdir.mkdir(os.path.basename(original_directory))
self.name = tempdir.strpath
os.rmdir(self.name) # directory must not exist when using shutil.copytree()
shutil.copytree(original_directory, self.name)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_deploy_contract(raiden_network, deploy_client, tmpdir):
"""Test deploying contract with different version than the one we have set in Registry.sol.
This test makes sense only for geth backend, tester uses mocked Registry class.
"""
contract_path = get_contract_path('Registry.sol')
# Create temporary directory to put all files required to compile the changed contract to.
# Why? Solidity uses first 40 characters of the file path as a library symbol.
# It would be nice to just do a copy of 'Registry.sol', replace version and include statements
# and then by path substitution argument of solc set the path to something like
# raiden-contracts=/path/to/your/raiden/source/contracts. But then if the path is too long,
# Python solidity compiler will fail because of duplicate library symbol.
temp_dir = TempSolidityDir(os.path.dirname(contract_path), tmpdir)
replaced_registry_path = os.path.join(temp_dir.name, 'Registry.sol')
CONTRACT_MANAGER.get_contract_abi(CONTRACT_CHANNEL_MANAGER)
replace_contract_version(replaced_registry_path, '0.0.31415')
contracts = compile_files_cwd([replaced_registry_path])
contract_proxy = deploy_client.deploy_solidity_contract(
'Registry',
contracts,
dict(),
None,
contract_path=replaced_registry_path,
)
contract_address = contract_proxy.contract_address
app0 = raiden_network[0]
with pytest.raises(ContractVersionMismatch):
app0.raiden.chain.registry(contract_address)
| 41.134328 | 99 | 0.736938 | 319 | 0.115747 | 0 | 0 | 1,653 | 0.599782 | 0 | 0 | 863 | 0.313135 |
373f83cec206a62336b452fd4c464d8bd69932f0 | 2,518 | py | Python | quex/engine/state_machine/algebra/TESTS/additional_laws/TEST/complement-relative.py | smmckay/quex-mirror | 7d75ed560e9f3a591935e59243188676eecb112a | [
"MIT"
] | null | null | null | quex/engine/state_machine/algebra/TESTS/additional_laws/TEST/complement-relative.py | smmckay/quex-mirror | 7d75ed560e9f3a591935e59243188676eecb112a | [
"MIT"
] | null | null | null | quex/engine/state_machine/algebra/TESTS/additional_laws/TEST/complement-relative.py | smmckay/quex-mirror | 7d75ed560e9f3a591935e59243188676eecb112a | [
"MIT"
] | null | null | null | import os
import sys
sys.path.insert(0, os.environ["QUEX_PATH"])
from quex.engine.state_machine.core import DFA
from quex.engine.state_machine.algebra.TESTS.helper import test2, test1, test3, union, \
intersection, \
identity, \
complement, \
difference, \
add_more_DFAs, sample_DFAs
if "--hwut-info" in sys.argv:
print "Complement: Relativity in difference operations;"
print "CHOICES: 1, 2, 3;"
print "HAPPY: [0-9]+;"
sys.exit()
count = 0
def one(A):
global count
assert identity(difference(A, A), DFA.Empty())
assert identity(difference(DFA.Empty(), A), DFA.Empty())
assert identity(difference(A, DFA.Empty()), A)
assert identity(difference(DFA.Universal(), A), complement(A))
assert identity(difference(A, DFA.Universal()), DFA.Empty())
count += 1
def two(A, B):
global count
assert identity(difference(B, A), intersection([complement(A), B]))
assert identity(complement(difference(B, A)), union([A, complement(B)]))
count += 1
def three(A, B, C):
global count
diff_C_B = difference(C.clone(), B.clone())
diff_C_A = difference(C.clone(), A.clone())
diff_B_A = difference(B.clone(), A.clone())
assert identity(difference(C.clone(), intersection([A.clone(), B.clone()])),
union([diff_C_A.clone(), diff_C_B.clone()]))
assert identity(difference(C.clone(), union([A.clone(), B.clone()])),
intersection([diff_C_A.clone(), diff_C_B.clone()]))
assert identity(difference(C.clone(), diff_B_A.clone()),
union([intersection([A.clone(), C.clone()]), diff_C_B.clone()]))
tmp = intersection([diff_B_A.clone(), C.clone()])
assert identity(tmp, difference(intersection([B.clone(), C.clone()]), A.clone()))
assert identity(tmp, intersection([B.clone(), diff_C_A.clone()]))
assert identity(union([diff_B_A.clone(), C.clone()]),
difference(union([B.clone(), C.clone()]), difference(A.clone(), C.clone())))
count += 1
if "1" in sys.argv:
add_more_DFAs()
test1(one)
elif "2" in sys.argv:
test2(two)
elif "3" in sys.argv:
sample_DFAs(3)
test3(three)
print "<terminated: %i>" % count
| 36.492754 | 96 | 0.554011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 136 | 0.054011 |
37495bbd87853858c8dd154c007737ded2ed7429 | 9,920 | py | Python | Pyrado/scripts/sandbox/sb_mg.py | theogruner/SimuRLacra | 4893514ccdeb10a736c55de9aa7753fd51c5afec | [
"DOC",
"Zlib",
"BSD-3-Clause"
] | 52 | 2020-05-02T13:55:09.000Z | 2022-03-09T14:49:36.000Z | Pyrado/scripts/sandbox/sb_mg.py | theogruner/SimuRLacra | 4893514ccdeb10a736c55de9aa7753fd51c5afec | [
"DOC",
"Zlib",
"BSD-3-Clause"
] | 40 | 2020-09-01T15:19:22.000Z | 2021-11-02T14:51:41.000Z | Pyrado/scripts/sandbox/sb_mg.py | theogruner/SimuRLacra | 4893514ccdeb10a736c55de9aa7753fd51c5afec | [
"DOC",
"Zlib",
"BSD-3-Clause"
] | 13 | 2020-07-03T11:39:21.000Z | 2022-02-20T01:12:42.000Z | # Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Script to test the simplified box flipping task using a hard-coded time-based policy
"""
import math
import rcsenv
import torch as to
import pyrado
from pyrado.domain_randomization.domain_parameter import UniformDomainParam
from pyrado.domain_randomization.domain_randomizer import DomainRandomizer
from pyrado.environment_wrappers.domain_randomization import DomainRandWrapperLive
from pyrado.environments.rcspysim.mini_golf import MiniGolfIKSim, MiniGolfJointCtrlSim
from pyrado.policies.features import FeatureStack, const_feat
from pyrado.policies.feed_back.linear import LinearPolicy
from pyrado.policies.feed_forward.dummy import IdlePolicy
from pyrado.policies.feed_forward.poly_time import PolySplineTimePolicy
from pyrado.policies.special.environment_specific import create_mg_joint_pos_policy
from pyrado.sampling.rollout import after_rollout_query, rollout
from pyrado.utils.data_types import RenderMode
from pyrado.utils.input_output import print_cbt
rcsenv.setLogLevel(2)
def create_idle_setup(physicsEngine: str, dt: float, max_steps: int, checkJointLimits: bool):
# Set up environment
env = MiniGolfIKSim(
usePhysicsNode=True,
physicsEngine=physicsEngine,
dt=dt,
max_steps=max_steps,
checkJointLimits=checkJointLimits,
fixedInitState=True,
observeForceTorque=True,
)
# Set up policy
policy = IdlePolicy(env.spec) # don't move at all
return env, policy
def create_pst_setup(physicsEngine: str, dt: float, max_steps: int, checkJointLimits: bool):
# Set up environment
relativeZdTask = True
print_cbt(f"relativeZdTask = {relativeZdTask}", "c", bright=True)
env = MiniGolfIKSim(
relativeZdTask=relativeZdTask,
usePhysicsNode=True,
physicsEngine=physicsEngine,
dt=dt,
max_steps=max_steps,
checkJointLimits=checkJointLimits,
fixedInitState=True,
observeForceTorque=False,
collisionAvoidanceIK=True,
)
# Set up policy
if relativeZdTask:
policy_hparam = dict(
t_end=0.6,
cond_lvl="vel",
# Zd (rel), Y (rel), Zdist (abs), PHI (abs), THETA (abs)
cond_final=[
[0.0, 0.0, 0.01, math.pi / 2, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
cond_init=[
[-100.0, 0.0, 0.01, math.pi / 2, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
overtime_behavior="hold",
)
else:
policy_hparam = dict(
t_end=3.0,
cond_lvl="vel",
# X (abs), Y (rel), Z (abs), A (abs), C (abs)
# cond_final=[[0.5, 0.0, 0.04, -0.876], [0.5, 0.0, 0.0, 0.0]],
# cond_init=[[0.1, 0.0, 0.04, -0.876], [0.0, 0.0, 0.0, 0.0]],
# X (abs), Y (rel), Zdist (abs), PHI (abs), THETA (abs)
cond_final=[
[0.9, 0.0, 0.005, math.pi / 2, 0.0], # math.pi / 2 - 0.4
[0.0, 0.0, 0.0, 0.0, 0.0],
],
cond_init=[
[0.3, 0.0, 0.01, math.pi / 2, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
overtime_behavior="hold",
)
policy = PolySplineTimePolicy(env.spec, dt, **policy_hparam)
return env, policy
def create_lin_setup(physicsEngine: str, dt: float, max_steps: int, checkJointLimits: bool):
# Set up environment
env = MiniGolfIKSim(
usePhysicsNode=True,
physicsEngine=physicsEngine,
dt=dt,
max_steps=max_steps,
checkJointLimits=checkJointLimits,
fixedInitState=True,
)
# Set up policy
policy = LinearPolicy(env.spec, FeatureStack([const_feat]))
policy.param_values = to.tensor([0.6, 0.0, 0.03]) # X (abs), Y (rel), Z (abs), C (abs)
return env, policy
def create_time_setup(physicsEngine: str, dt: float, max_steps: int, checkJointLimits: bool):
# Set up environment
env = MiniGolfJointCtrlSim(
usePhysicsNode=True,
physicsEngine=physicsEngine,
dt=dt,
max_steps=max_steps,
checkJointLimits=checkJointLimits,
fixedInitState=True,
collisionAvoidanceIK=False,
graphFileName="gMiniGolf_gt.xml",
physicsConfigFile="pMiniGolf_gt.xml",
)
# Set up policy
policy = create_mg_joint_pos_policy(env, t_strike_end=0.5)
return env, policy
if __name__ == "__main__":
# Choose setup
setup_type = "pst" # idle, pst, lin, or time
physicsEngine = "Bullet" # Bullet or Vortex
dt = 1 / 100.0
max_steps = int(8 / dt)
checkJointLimits = True
randomize = False
if setup_type == "idle":
env, policy = create_idle_setup(physicsEngine, dt, max_steps, checkJointLimits)
elif setup_type == "pst":
env, policy = create_pst_setup(physicsEngine, dt, max_steps, checkJointLimits)
elif setup_type == "lin":
env, policy = create_lin_setup(physicsEngine, dt, max_steps, checkJointLimits)
elif setup_type == "time":
env, policy = create_time_setup(physicsEngine, dt, max_steps, checkJointLimits)
else:
raise pyrado.ValueErr(given=setup_type, eq_constraint="idle, pst, lin, or time")
if randomize:
dp_nom = env.get_nominal_domain_param()
randomizer = DomainRandomizer(
UniformDomainParam(
name="ball_restitution",
mean=dp_nom["ball_restitution"],
halfspan=dp_nom["ball_restitution"],
),
UniformDomainParam(
name="ball_radius", mean=dp_nom["ball_radius"], halfspan=dp_nom["ball_radius"] / 5, clip_lo=5e-3
),
UniformDomainParam(name="ball_mass", mean=dp_nom["ball_mass"], halfspan=dp_nom["ball_mass"] / 2, clip_lo=0),
UniformDomainParam(name="club_mass", mean=dp_nom["club_mass"], halfspan=dp_nom["club_mass"] / 5),
UniformDomainParam(
name="ball_friction_coefficient",
mean=dp_nom["ball_friction_coefficient"],
halfspan=dp_nom["ball_friction_coefficient"] / 4,
clip_lo=0,
),
UniformDomainParam(
name="ball_rolling_friction_coefficient",
mean=dp_nom["ball_rolling_friction_coefficient"],
halfspan=dp_nom["ball_rolling_friction_coefficient"] / 3,
clip_lo=0,
),
UniformDomainParam(
name="ground_friction_coefficient",
mean=dp_nom["ground_friction_coefficient"],
halfspan=dp_nom["ground_friction_coefficient"] / 4,
clip_lo=0,
),
UniformDomainParam(name="ball_slip", mean=dp_nom["ball_slip"], halfspan=dp_nom["ball_slip"] / 2, clip_lo=0),
UniformDomainParam(
name="ground_slip", mean=dp_nom["ground_slip"], halfspan=dp_nom["ground_slip"] / 2, clip_lo=0
),
UniformDomainParam(name="obstacleleft_pos_offset_x", mean=0, halfspan=0.03),
UniformDomainParam(name="obstacleleft_pos_offset_y", mean=0, halfspan=0.03),
UniformDomainParam(name="obstacleleft_rot_offset_c", mean=0 / 180 * math.pi, halfspan=5 / 180 * math.pi),
UniformDomainParam(name="obstacleright_pos_offset_x", mean=0, halfspan=0.03),
UniformDomainParam(name="obstacleright_pos_offset_y", mean=0, halfspan=0.03),
UniformDomainParam(name="obstacleright_rot_offset_c", mean=0 / 180 * math.pi, halfspan=5 / 180 * math.pi),
)
env = DomainRandWrapperLive(env, randomizer)
# Simulate and plot
print(env.obs_space)
done, param, state = False, None, None
while not done:
ro = rollout(
env,
policy,
render_mode=RenderMode(text=False, video=True),
eval=True,
max_steps=max_steps,
reset_kwargs=dict(domain_param=param, init_state=state),
stop_on_done=False,
)
print_cbt(f"Return: {ro.undiscounted_return()}", "g", bright=True)
done, state, param = after_rollout_query(env, policy, ro)
| 40.489796 | 120 | 0.65121 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,266 | 0.329234 |
374a11c37b91918c3d504846ee4f0ddf1051d985 | 1,161 | py | Python | src/leetcode_43_multiply_strings.py | sungho-joo/leetcode2github | ce7730ef40f6051df23681dd3c0e1e657abba620 | [
"MIT"
] | null | null | null | src/leetcode_43_multiply_strings.py | sungho-joo/leetcode2github | ce7730ef40f6051df23681dd3c0e1e657abba620 | [
"MIT"
] | null | null | null | src/leetcode_43_multiply_strings.py | sungho-joo/leetcode2github | ce7730ef40f6051df23681dd3c0e1e657abba620 | [
"MIT"
] | null | null | null | # @l2g 43 python3
# [43] Multiply Strings
# Difficulty: Medium
# https://leetcode.com/problems/multiply-strings
#
# Given two non-negative integers num1 and num2 represented as strings,
# return the product of num1 and num2,also represented as a string.
# Note: You must not use any built-in BigInteger library or convert the inputs to integer directly.
#
# Example 1:
# Input: num1 = "2", num2 = "3"
# Output: "6"
# Example 2:
# Input: num1 = "123", num2 = "456"
# Output: "56088"
#
#
# Constraints:
#
# 1 <= num1.length, num2.length <= 200
# num1 and num2 consist of digits only.
# Both num1 and num2 do not contain any leading zero, except the number 0 itself.
#
#
class Solution:
def multiply(self, num1: str, num2: str) -> str:
ans = []
for i, n1 in enumerate(reversed(num1)):
for j, n2 in enumerate(reversed(num2)):
f_num = (ord(n1) - ord("0")) * (10 ** i)
s_num = (ord(n2) - ord("0")) * (10 ** j)
ans.append(f_num * s_num)
return str(sum(ans))
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_43.py")])
| 25.23913 | 99 | 0.61671 | 372 | 0.319862 | 0 | 0 | 0 | 0 | 0 | 0 | 681 | 0.585555 |
374bb3040645d3989e57f95ec9ffc9f744b8af59 | 179 | py | Python | examples/helpers/post/unlike.py | javad94/instauto | 8d4d068863176b0a1df13e5be3d5e32388036921 | [
"MIT"
] | 79 | 2020-08-24T23:32:57.000Z | 2022-02-20T19:03:17.000Z | examples/helpers/post/unlike.py | klaytonpaiva/instauto | 7f8c26b22f84d3d966625c7fa656e91cc623bb2e | [
"MIT"
] | 146 | 2020-07-25T16:27:48.000Z | 2021-10-02T09:03:50.000Z | examples/helpers/post/unlike.py | klaytonpaiva/instauto | 7f8c26b22f84d3d966625c7fa656e91cc623bb2e | [
"MIT"
] | 41 | 2020-09-07T14:19:04.000Z | 2022-02-07T23:08:10.000Z | from instauto.api.client import ApiClient
from instauto.helpers.post import unlike_post
client = ApiClient.initiate_from_file('.instauto.save')
unlike_post(client, "media_id")
| 25.571429 | 55 | 0.815642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.145251 |
374c7d9cbe16ddf4267d0363ddef0fd64684f962 | 5,451 | py | Python | Fund/main.py | livi2000/FundSpider | c79407241fe189b61afc54dd2e5b73c906aae0b5 | [
"MIT"
] | null | null | null | Fund/main.py | livi2000/FundSpider | c79407241fe189b61afc54dd2e5b73c906aae0b5 | [
"MIT"
] | null | null | null | Fund/main.py | livi2000/FundSpider | c79407241fe189b61afc54dd2e5b73c906aae0b5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from url_manager import *
from downloader import *
from parser import *
from collector import *
from url_manager import FundURLIndex
class FundMain(object):
def __init__(self):
self.url_manager = FundURLManager()
self.html_downloader = FundDownloader()
self.html_paser = FundParser()
self.collector = FundCollector()
#先定接口,再做实现,其中首页特殊处理一下,基金三个月才出一次一次季报,如果不是数据结构改了大部分时间没必要全量更新
def crawl(self, homeurl, incremental=True):
# 先处理首页
home_content = self.html_downloader.download(homeurl)
if home_content is None:
return
funds_info = self.html_paser.parse_home(home_content)
if funds_info is None:
return
count = 0
finished_count = [0]
for fund_info_code in funds_info:
#全量更新或者新的基金才下载
if not incremental or not self.collector.fundexist(fund_info_code):
self.url_manager.add_url(fund_info_code)
count += 1
print '共需爬取基金详情 ' + str(count) + " 个"
def inner_crawl(isretry=False):
if isretry:
self.url_manager.transfer_url()
while (not self.url_manager.is_empyt() and not self.url_manager.is_overflow()):
urls = self.url_manager.pop_url()
fundcode = urls[FundURLIndex.CODE.value]
try:
#简化一下问题,只有所有相关页面都下载完毕才算ok
print 'start parse ' + urls[FundURLIndex.MAIN.value]
basecontent = self.html_downloader.download(urls[FundURLIndex.BASE.value])
ratiocontent = self.html_downloader.download(urls[FundURLIndex.RATIO.value])
statisticcontent = self.html_downloader.download(urls[FundURLIndex.STATISTIC.value])
stockscontent = self.html_downloader.download(urls[FundURLIndex.STOCKS.value])
annualcontent = self.html_downloader.download(urls[FundURLIndex.ANNUAL.value])
#只要有一个失败就都重试哦,其实也有个别网页是真的不存在,但懒得管了
if basecontent is None or len(basecontent) == 0 or ratiocontent is None or len(ratiocontent) == 0\
or statisticcontent is None or len(statisticcontent) == 0 or stockscontent is None or len(stockscontent) == 0 \
or annualcontent is None or len(annualcontent) == 0:
print 'download fund ' + fundcode + ' failed'
self.url_manager.fail_url(fundcode)
continue
self.url_manager.finish_url(fundcode)
result = self.html_paser.parse_fund(basecontent, ratiocontent, statisticcontent, stockscontent, annualcontent, urls[FundURLIndex.MAIN.value])
self.collector.addFund(result)
finished_count[0] += 1
print 'finish parse fund ' + fundcode + " " + str(finished_count[0]) + '/' + str(count)
except Exception as e:
print 'parse fund ' + fundcode + ' fail, cause ' + str(e)
self.url_manager.fail_url(fundcode)
#尝试重试两次吧,因为第一时间就重试其实很可能还是出错
inner_crawl()
inner_crawl(True)
inner_crawl(True)
print 'success finish parse url sum ' + str(finished_count[0])
print 'failed urls is'
self.url_manager.output_faileds()
if __name__ == "__main__":
icMain = FundMain()
icMain.crawl('http://fund.eastmoney.com/allfund.html', False)
# url_manager = SBURLManager()
# # http://m.zhcw.com/clienth5.do?lottery=FC_SSQ&kjissue=2005001&transactionType=300302&src=0000100001%7C6000003060
# for year in range(2005, 2018):
# for index in range(1, 160):
# url_manager.add_url("http://m.zhcw.com/clienth5.do?lottery=FC_SSQ&kjissue=" + str(year) + '{0:03}'.format(index) + "&transactionType=300302&src=0000100001%7C6000003060")
#
# import json
# downloader = SBDownloader()
# parse_count = 0
# areaDic = dict()
# while (not url_manager.is_empyt()):
# url = url_manager.pop_url()
# content = downloader.download(url)
# # 懒得重试了哦
# if content is not None and len(content) > 0:
# d = json.loads(content)
# l = d.get("dataList", None)
# if l is not None:
# parse_count += 1
# for info in l:
# area = info['dqname']
# ones = int(info["onez"])
# money = int(info['tzmoney'])
# sum = areaDic.get(area, None)
# if sum is None:
# areaDic[area] = (ones, money)
# else:
# areaDic[area] = (sum[0] + ones, sum[1] + money)
#
# # 最后输出结果
# print "统计双色球地域特性共" + str(parse_count) + "期"
#
# areaResult = dict()
# for area in areaDic:
# count = areaDic[area][0]
# money = areaDic[area][1]
# if count > 0:
# average = money / count
# else :
# average = 10000000000
# # print area + '购买彩票共' + str(money) + '元, 共', str(count) + "人中头奖, 平均每花" + average + "出一个头奖嘻嘻"
# areaResult[area] = average
#
# print '按照平均花费中头奖金额排序:'
# for key, value in sorted(areaResult.iteritems(), key=lambda (k,v): (v,k)):
# print "%s每花%d万可出一个头奖" % (key, value/10000)
| 42.255814 | 183 | 0.571455 | 3,579 | 0.606302 | 0 | 0 | 0 | 0 | 0 | 0 | 2,526 | 0.427918 |
374d67e614b5fff98eb17bf5586cf8d97b2d4b5e | 2,087 | py | Python | scenario/run.py | Abdelmouise/covid-19-pandemic-simulation | 154c05bbaff6fc4305e00a489abb0338c9a8530d | [
"MIT"
] | 3 | 2020-04-13T20:40:16.000Z | 2020-10-30T20:01:56.000Z | scenario/run.py | Abdelmouise/covid-19-pandemic-simulation | 154c05bbaff6fc4305e00a489abb0338c9a8530d | [
"MIT"
] | null | null | null | scenario/run.py | Abdelmouise/covid-19-pandemic-simulation | 154c05bbaff6fc4305e00a489abb0338c9a8530d | [
"MIT"
] | null | null | null | import random
import sys
import time
from scenario.example import sc1_simple_lockdown_removal, sc2_yoyo_lockdown_removal, sc0_base_lockdown, \
scx_base_just_a_flu, sc3_loose_lockdown, sc4_rogue_citizen, sc5_rogue_neighborhood, sc6_travelers
from simulator.constants.keys import scenario_id_key, random_seed_key, draw_graph_key
from simulator.helper.parser import get_parser
from simulator.helper.plot import chose_draw_plot
from simulator.helper.simulation import get_default_params
from simulator.helper.environment import get_environment_simulation
if __name__ == '__main__':
params = get_default_params()
args = get_parser().parse_args()
for arg in vars(args):
v = getattr(args, arg)
if arg in params and v is not None:
params[arg] = v
random.seed(params[random_seed_key])
t_start = time.time()
env_dic = get_environment_simulation(params)
if params[scenario_id_key] == -1:
stats_result = scx_base_just_a_flu.launch_run(params, env_dic)
elif params[scenario_id_key] == 0: # Total lockdown
stats_result = sc0_base_lockdown.launch_run(params, env_dic)
elif params[scenario_id_key] == 1: # Lockdown removal after N days
stats_result = sc1_simple_lockdown_removal.launch_run(params, env_dic)
elif params[scenario_id_key] == 2: # Yoyo lockdown removal
stats_result = sc2_yoyo_lockdown_removal.launch_run(params, env_dic)
elif params[scenario_id_key] == 3: # Yoyo lockdown removal
stats_result = sc3_loose_lockdown.launch_run(params, env_dic)
elif params[scenario_id_key] == 4: # Rogue citizen
stats_result = sc4_rogue_citizen.launch_run(params, env_dic)
elif params[scenario_id_key] == 5: # Rogue block
stats_result = sc5_rogue_neighborhood.launch_run(params, env_dic)
elif params[scenario_id_key] == 6: # Rogue block
stats_result = sc6_travelers.launch_run(params, env_dic)
else:
sys.exit(0)
print("It took : %.2f seconds" % (time.time() - t_start))
chose_draw_plot(params[draw_graph_key], stats_result) | 45.369565 | 105 | 0.744609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.080498 |
374f08a2bd0965255d3871d9a77cdb705b2dfb08 | 224 | py | Python | apps/wiki/admin.py | karpiq24/django-klima-kar | e62e79c66053749e249f55e1ab47f810f449f0aa | [
"MIT"
] | 2 | 2018-01-23T22:38:57.000Z | 2019-07-14T08:59:19.000Z | apps/wiki/admin.py | karpiq24/django-klima-kar | e62e79c66053749e249f55e1ab47f810f449f0aa | [
"MIT"
] | 237 | 2018-08-15T23:13:52.000Z | 2022-01-13T13:08:50.000Z | apps/wiki/admin.py | karpiq24/django-klima-kar | e62e79c66053749e249f55e1ab47f810f449f0aa | [
"MIT"
] | null | null | null | from django.contrib import admin
from apps.wiki.models import Article, Tag, ExternalLink, ArticleFile
admin.site.register(Article)
admin.site.register(Tag)
admin.site.register(ExternalLink)
admin.site.register(ArticleFile)
| 28 | 68 | 0.830357 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
375198759bc2c16a5d5f2fdb635b642d7532765c | 2,758 | py | Python | code/arrange-fastq.py | jdblischak/dox | c17b4496674876b26c3e7137e4a2e2657898ea4c | [
"Apache-2.0"
] | 4 | 2018-05-09T02:06:20.000Z | 2021-07-17T15:02:54.000Z | code/arrange-fastq.py | jdblischak/dox | c17b4496674876b26c3e7137e4a2e2657898ea4c | [
"Apache-2.0"
] | null | null | null | code/arrange-fastq.py | jdblischak/dox | c17b4496674876b26c3e7137e4a2e2657898ea4c | [
"Apache-2.0"
] | 3 | 2017-10-06T21:50:58.000Z | 2018-07-05T00:50:18.000Z | #!/usr/bin/env python3
# Move and rename fastq files downloaded from FGF's FTP site.
#
# Usage:
#
# python3 arrange-fastq.py indir outdir
#
# indir - Highly nested directory structure from downloading data
# outdir - New output directory (created if does not exist)
#
# Ex:
#
# python3 arrange-fastq.py fgfftp.uchicago.edu/Genomics_Data/NGS/160520_K00242_0070_BHCMNYBBXX-YG-Dox-781112/FastQ fastq
#
# Explanation:
#
# The core provides the samples with the following naming scheme:
#
# 160520_K00242_0070_BHCMNYBBXX-YG-Dox-781112/FastQ/YG-Dox-p11-s105-c29-1-5000_S25_L003_R1_001.fastq.gz
#
# To be extracted are the following variables:
#
# sample number: s105
# cell line num: c29-1
# treatment concentration: 5000
# flow cell id: HCMNYBBXX (the leading A or B is discarded)
# lane: L003
#
# These are converted into the following file naming scheme:
#
# s105-c29.1-5.000-HCMNYBBXX-l3.fastq.gz
#
# sample number: s105 (always has three digits)
# cell line num: c29.1
# treatment concentration: 5.000
# flow cell id: HCMNYBBXX
# lane: l3
#
import glob
import os
import shutil
import sys
# Input arguments
args = sys.argv
assert len(args) == 3, "Incorrect number of arguments.\nUsage: python3 arrange-fastq.py indir outdir"
indir = args[1]
outdir = args[2]
assert os.path.exists(indir), "Input directory does not exist: %s"%(indir)
if not os.path.exists(outdir):
os.mkdir(outdir)
# Add final forward slash if necessary
if indir[-1] != "/":
indir = indir + "/"
if outdir[-1] != "/":
outdir = outdir + "/"
# Obtain file names
files = glob.glob(indir + "/*fastq.gz")[:]
# Rename and move files
undetermined_count=0
for f in files:
path = f.rstrip('fastq.gz').split('/')
flow_cell = path[-3].split("_")[-1].split("-")[0][1:]
file_parts = path[-1].split('_')[:-1]
lane = "l" + file_parts[2][-1]
if file_parts[0] == "Undetermined":
sample_name = file_parts[0].lower()
undetermined_count += 1
else:
name_parts = file_parts[0].split("-")
sample_num = name_parts[3]
sample_num = "s%03d"%(int(sample_num[1:]))
if len(name_parts) == 6:
cell_num = name_parts[4]
elif len(name_parts) == 7:
cell_num = name_parts[4] + "." + name_parts[5]
else:
sys.exit("Input file naming scheme has changed. Code must be updated.")
treatment = name_parts[-1]
treatment = treatment[0] + "." + treatment[1:]
sample_name = "-".join([sample_num, cell_num, treatment])
new_name = outdir + sample_name + '-' + flow_cell + "-" + lane + '.fastq.gz'
sys.stderr.write("Moving:\n%s\n%s\n\n"%(new_name, f))
shutil.move(f, new_name)
sys.stderr.write("Moved %i files (%i Undetermined)\n" % (len(files), undetermined_count) )
| 30.307692 | 120 | 0.664975 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,430 | 0.518492 |
3751d3596a32979b95ddd2523fef9f29e3bf7492 | 173 | py | Python | sosia/establishing/__init__.py | sosia-dev/sosia | d4d2d5edb0cd1d085b5a457eb6d19bf8e9fea7f5 | [
"MIT"
] | 14 | 2019-03-12T22:07:47.000Z | 2022-03-08T14:05:05.000Z | sosia/establishing/__init__.py | sosia-dev/sosia | d4d2d5edb0cd1d085b5a457eb6d19bf8e9fea7f5 | [
"MIT"
] | 31 | 2018-10-15T16:02:44.000Z | 2021-04-09T08:13:44.000Z | sosia/establishing/__init__.py | sosia-dev/sosia | d4d2d5edb0cd1d085b5a457eb6d19bf8e9fea7f5 | [
"MIT"
] | 2 | 2020-01-09T06:47:09.000Z | 2020-12-05T13:21:03.000Z | from sosia.establishing.config import *
from sosia.establishing.constants import *
from sosia.establishing.database import *
from sosia.establishing.fields_sources import *
| 34.6 | 47 | 0.83815 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
3752032895f97132483d0cfa4339b37fb1eaf5c4 | 151 | py | Python | 3.py | guillesiesta/python_comprehensions | 4d2765b29b8165a5fa2488e6a50a49235238c82f | [
"Apache-2.0"
] | null | null | null | 3.py | guillesiesta/python_comprehensions | 4d2765b29b8165a5fa2488e6a50a49235238c82f | [
"Apache-2.0"
] | null | null | null | 3.py | guillesiesta/python_comprehensions | 4d2765b29b8165a5fa2488e6a50a49235238c82f | [
"Apache-2.0"
] | null | null | null | hola = True
adiosguillermomurielsanchezlafuente = True
if (adiosguillermomurielsanchezlafuente
and hola):
print("ok con nombre muy largo")
| 21.571429 | 42 | 0.761589 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.165563 |
375235f443ccc25c0883b3f00cf92f6d6e16776c | 157 | py | Python | problemas/1100/1144.py | filimor/uri-online-judge | 08b3bae3e02cc35ba8f6fba869d643ba3d028e58 | [
"MIT"
] | 10 | 2020-07-05T04:56:09.000Z | 2022-03-23T00:25:02.000Z | problemas/1100/1144.py | filimor/uri-online-judge | 08b3bae3e02cc35ba8f6fba869d643ba3d028e58 | [
"MIT"
] | 1 | 2021-12-30T05:18:59.000Z | 2021-12-30T05:18:59.000Z | problemas/1100/1144.py | filimor/uri-online-judge | 08b3bae3e02cc35ba8f6fba869d643ba3d028e58 | [
"MIT"
] | 5 | 2020-03-23T09:43:40.000Z | 2022-02-04T13:07:29.000Z | for i in range(1, int(input()) + 1):
quadrado = i ** 2
cubo = i ** 3
print(f'{i} {quadrado} {cubo}')
print(f'{i} {quadrado + 1} {cubo + 1}')
| 26.166667 | 43 | 0.490446 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.356688 |
375258e86a135ff7731a5d12af097e4a448f7742 | 19,933 | py | Python | source/origo_scrape/views.py | yinm8315/Origo_Scrape | 73f5782e9bd922777de03de7fc3da74965490fa1 | [
"BSD-3-Clause"
] | 1 | 2021-06-02T03:00:26.000Z | 2021-06-02T03:00:26.000Z | source/origo_scrape/views.py | yinm8315/Origo_Scrape | 73f5782e9bd922777de03de7fc3da74965490fa1 | [
"BSD-3-Clause"
] | null | null | null | source/origo_scrape/views.py | yinm8315/Origo_Scrape | 73f5782e9bd922777de03de7fc3da74965490fa1 | [
"BSD-3-Clause"
] | null | null | null | # Create your views here.
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.template import loader
from django.forms.utils import ErrorList
from django.http import HttpResponse
from .origo import Origo_Thread
from .origo_category import Origo_Category_Thread
from .supply_it import Supply_it_Thread
from .furlongflooring import FF_Thread
from .reydonsports import RDS_Thread
from .reydonsports_category import RDS_Category_Thread
from .totalimports import TotalImports_Thread
from .totalimports_category import TotalImports_Category_Thread
from os.path import join, dirname
# from .origo import scrape_status as origo_scrape_status
import glob, os, zipfile, openpyxl, xlsxwriter
from os import path
from django.contrib.auth.decorators import login_required
from bs4 import BeautifulSoup
import requests, time, math
from datetime import datetime
# from filewrap import Filewrapper
# dotenv_path = join(dirname(__file__), '.env')
# load_dotenv(dotenv_path)
cur_path = dirname(__file__)
root_path = cur_path[:cur_path.rfind(os.path.sep)]
# root_path = root_path[:root_path.rfind(os.path.sep)]
cur_site = ""
# t_origo = None
t_origo = []
t_origo_cat = None
t_supply_it = None
t_ff = None
t_rds = []
t_rds_cat = None
t_totalimports = []
t_totalimports_cat = None
t_totalimports_delay = []
# sites = [{"url": "https://origo-online.origo.ie", "short": "origo"}, {"url": "https://www.supply-it.ie/", "short": "supply_it"}, {"url": "https://online.furlongflooring.com/", "short": "furlongflooring"}]
# sites = [{"url": "https://www.reydonsports.com/", "short": "reydonsports"}]
# sites = [{"url": "https://www.supply-it.ie/", "short": "supply_it"}]
sites = [{"url": "http://totalimports.ie/", "short": "totalimports"}]
# sites = [{"url": "https://origo-online.origo.ie", "short": "origo"}]
# sites = [{"url": "https://online.furlongflooring.com/", "short": "furlongflooring"}]
scrape_status = None
THREAD_COUNT = 5
ALLOW_DELAY = 120
@login_required
def index(request):
global sites
context = {}
context['sites'] = sites
html_template = loader.get_template( 'main/index.html' )
return HttpResponse(html_template.render(context, request))
@login_required
def start_scrape(request):
global t_origo, t_supply_it, t_ff, t_rds, t_totalimports, t_totalimports_cat, t_totalimports_delay, cur_site, stock_scrape
print("start_scrape")
cur_site = request.GET["site"]
scrape_type = request.GET["scrape_type"]
if cur_site == "origo":
if len(t_origo) == 0 and t_origo_cat == None:
stock_scrape = 0
if scrape_type == "stock": stock_scrape = 1
origo_category_scrape(stock_scrape)
# totalimports_scrape(stock_scrape)
# if t_origo == None or t_origo.status == "scraping is ended":
# t_origo = Origo_Thread(scrape_type)
# t_origo.start()
elif cur_site == "supply_it":
if t_supply_it == None:
t_supply_it = Supply_it_Thread(scrape_type)
t_supply_it.start()
elif cur_site == "furlongflooring":
if t_ff == None or t_ff.status == "scraping is ended":
t_ff = FF_Thread(scrape_type)
t_ff.start()
elif cur_site == "reydonsports":
if len(t_rds) == 0 and t_rds_cat == None:
stock_scrape = 0
if scrape_type == "stock": stock_scrape = 1
reydonsports_scrape(stock_scrape)
elif cur_site == "totalimports":
if len(t_totalimports) == 0 and t_totalimports_cat == None:
stock_scrape = 0
if scrape_type == "stock": stock_scrape = 1
totalimports_category_scrape(stock_scrape)
# totalimports_scrape(stock_scrape)
return HttpResponse(root_path)
@login_required
def get_scraping_status(request):
global t_origo, t_origo_cat, t_supply_it, t_ff, t_rds, t_rds_cat, t_totalimports, t_totalimports_cat, t_totalimports_delay, stock_scrape, scrape_status
res = ""
cur_site = request.GET["site"]
if cur_site == "origo" :
# res = t_origo.status
if len(t_origo) > 0:
scrape_status = ""
for tt in t_origo:
try:
scrape_status += tt.status + "\n"
except:
scrape_status += "\n"
# scrape_status = "\n".join([tt.status for tt in t_origo if tt != None])
i = 0
for t in t_origo:
i += 1
try:
if t.status != "ended":
break
except:
pass
if i == len(t_origo):
# generate .xlsx file name
timestamp = datetime.now().strftime("%Y-%m%d-%H%M%S")
xlsfile_name = 'products-' + timestamp + '.xlsx'
if stock_scrape == 1: xlsfile_name = 'stock-' + timestamp + '.xlsx'
xlsfile_name = join(root_path, "xls", "origo", xlsfile_name)
workbook = xlsxwriter.Workbook(xlsfile_name)
worksheet = workbook.add_worksheet()
row_num = 0
for j in range(THREAD_COUNT):
tmp_wb_obj = openpyxl.load_workbook(join(root_path, "xls", "origo", str(j) + "-temp.xlsx"))
sheet = tmp_wb_obj.active
for k, row in enumerate(sheet.iter_rows(values_only=True)):
if k == 0:
if j == 0:
# Write Header
for val, col in zip(row, range(len(row))):
worksheet.write(0, col, val)
else:
row_num += 1
for val, col in zip(row, range(len(row))):
worksheet.write(row_num, col, val)
tmp_wb_obj.close()
workbook.close()
scrape_status = "scraping is ended"
break
elif t_origo_cat != None:
scrape_status = t_origo_cat.status
if scrape_status == "ended":
t_origo_cat = None
origo_scrape(stock_scrape)
# totalimports_scrape()
res = scrape_status
if scrape_status == "scraping is ended":
t_origo.clear()
elif cur_site == "supply_it" :
res = t_supply_it.status
elif cur_site == "furlongflooring" :
res = t_ff.status
elif cur_site == "reydonsports" :
if len(t_rds) > 0:
scrape_status = ""
for tt in t_rds:
try:
scrape_status += tt.status + "\n"
except:
scrape_status += "\n"
# scrape_status = "\n".join([tt.status for tt in t_rds if tt != None])
i = 0
for t in t_rds:
i += 1
try:
if t.status != "ended":
break
except:
pass
if i == len(t_rds):
# generate .xlsx file name
timestamp = datetime.now().strftime("%Y-%m%d-%H%M%S")
xlsfile_name = 'products-' + timestamp + '.xlsx'
if stock_scrape == 1: xlsfile_name = 'stock-' + timestamp + '.xlsx'
xlsfile_name = join(root_path, "xls", "reydonsports", xlsfile_name)
workbook = xlsxwriter.Workbook(xlsfile_name)
worksheet = workbook.add_worksheet()
row_num = 0
for j in range(THREAD_COUNT):
tmp_wb_obj = openpyxl.load_workbook(join(root_path, "xls", "reydonsports", str(j) + "-temp.xlsx"))
sheet = tmp_wb_obj.active
for k, row in enumerate(sheet.iter_rows(values_only=True)):
if k == 0:
if j == 0:
# Write Header
for val, col in zip(row, range(len(row))):
worksheet.write(0, col, val)
else:
row_num += 1
for val, col in zip(row, range(len(row))):
worksheet.write(row_num, col, val)
tmp_wb_obj.close()
workbook.close()
scrape_status = "scraping is ended"
break
elif t_rds_cat != None:
scrape_status = t_rds_cat.status
if scrape_status == "ended":
t_rds_cat = None
reydonsports_scrape(stock_scrape)
# totalimports_scrape()
res = scrape_status
if scrape_status == "scraping is ended":
t_rds.clear()
elif cur_site == "totalimports" :
if len(t_totalimports) > 0:
# check if thread works fine
pre_scrape_status = []
if scrape_status != None: pre_scrape_status = scrape_status.split("\n")
scrape_status = ""
for tt, i in zip(t_totalimports, range(len(t_totalimports))):
if tt.status != "ended" and len(pre_scrape_status) > i and pre_scrape_status[i] == tt.status:
t_totalimports_delay[i] += 1
if t_totalimports_delay[i] >= ALLOW_DELAY:
totalimports_thread_start(i, stock_scrape)
else:
t_totalimports_delay[i] = 0
try:
scrape_status += tt.status + "\n"
except:
scrape_status += "\n"
# scrape_status = "\n".join([tt.status for tt in t_totalimports if tt != None])
i = 0
for t in t_totalimports:
i += 1
try:
if t.status != "ended":
break
except:
pass
if i == len(t_totalimports):
# generate .xlsx file name
timestamp = datetime.now().strftime("%Y-%m%d-%H%M%S")
xlsfile_name = 'products-' + timestamp + '.xlsx'
if stock_scrape == 1: xlsfile_name = 'stock-' + timestamp + '.xlsx'
xlsfile_name = join(root_path, "xls", "totalimports", xlsfile_name)
workbook = xlsxwriter.Workbook(xlsfile_name)
worksheet = workbook.add_worksheet()
row_num = 0
for j in range(THREAD_COUNT):
tmp_wb_obj = openpyxl.load_workbook(join(root_path, "xls", "totalimports", str(j) + "-temp.xlsx"))
sheet = tmp_wb_obj.active
for k, row in enumerate(sheet.iter_rows(values_only=True)):
if k == 0:
if j == 0:
# Write Header
for val, col in zip(row, range(len(row))):
worksheet.write(0, col, val)
else:
row_num += 1
for val, col in zip(row, range(len(row))):
worksheet.write(row_num, col, val)
tmp_wb_obj.close()
workbook.close()
scrape_status = "scraping is ended"
break
elif t_totalimports_cat != None:
scrape_status = t_totalimports_cat.status
if scrape_status == "ended":
t_totalimports_cat = None
# reydonsports_scrape()
totalimports_scrape(stock_scrape)
res = scrape_status
if scrape_status == "scraping is ended":
t_totalimports.clear()
return HttpResponse(res)
@login_required
def get_xls_list(request):
global root_path
res = ""
for site in sites:
products_arr = []
stock_arr = []
for file in glob.glob(join(root_path, "xls", site["short"], "products-2*.xlsx")):
products_arr.append(file[file.rfind(os.path.sep) + 10 : -5])
for file in glob.glob(join(root_path, "xls", site["short"], "stock-2*.xlsx")):
stock_arr.append(file[file.rfind(os.path.sep) + 7 : -5])
products_arr.sort(reverse=True)
stock_arr.sort(reverse=True)
if res != "": res += ", "
res += '"' + site["short"] + '": {"full": "' + '_'.join(products_arr) + '", "stock": "' + '_'.join(stock_arr) + '"}'
res = '{' + res + '}'
return HttpResponse(res)
@login_required
def download(request):
# Create file_name & file_path
site = request.GET["site"]
stock = request.GET["stock"]
diff = request.GET["diff"]
recent = request.GET["recent"]
compare = request.GET["compare"]
file_prefix = "products-"
if stock == "1" : file_prefix = "stock-"
file_name = file_prefix
if diff == "1" : file_name += "diff-"
file_name += recent
if diff == "1" : file_name += "_" + compare
zipfile_name = site + "-" + file_name + ".zip"
file_name += ".xlsx"
file_path = []
if diff =="1":
file_path.append(os.path.join(root_path, "xls", site, file_prefix + "add-" + recent + "_" + compare + ".xlsx"))
file_path.append(os.path.join(root_path, "xls", site, file_prefix + "remove-" + recent + "_" + compare + ".xlsx"))
zipfile_name = site + "-" + file_prefix + "compare-" + recent + "_" + compare + ".zip"
else:
file_path.append(os.path.join(root_path, "xls", site, file_name))
response = HttpResponse(content_type='application/zip')
zf = zipfile.ZipFile(response, 'w')
for file in file_path:
# Generate if there is no different .xlsx file
if diff == "1" and not path.exists(file) :
compare_xlsx(site, stock, recent, compare)
with open(file, 'rb') as fh:
zf.writestr(file[file.rfind(os.path.sep) + 1:], fh.read())
# return as zipfile
response['Content-Disposition'] = f'attachment; filename={zipfile_name}'
return response
@login_required
def compare_xlsx(site, stock, recent, compare) :
global root_path
# fields = ['id', 'category', 'title', 'stock', 'list price', 'nett price', 'description', 'URL', 'image']
fields = []
file_prefix = "products-"
if stock == "1":
# fields = ['id', 'stock']
file_prefix = "stock-"
add_file_name = file_prefix + "add-" + recent + "_" + compare + ".xlsx"
remove_file_name = file_prefix + "remove-" + recent + "_" + compare + ".xlsx"
older_products = {}
newer_products = {}
wb_obj = openpyxl.load_workbook(join(root_path, "xls", site, file_prefix + compare + ".xlsx"))
sheet = wb_obj.active
older_products = {}
for i, row in enumerate(sheet.iter_rows(values_only=True)):
if i == 0:
fields = row
else:
try:
if row[0] in older_products: continue
except:
pass
older_products[row[0]] = row
wb_obj = openpyxl.load_workbook(join(root_path, "xls", site, file_prefix + recent + ".xlsx"))
sheet = wb_obj.active
newer_products = {}
for i, row in enumerate(sheet.iter_rows(values_only=True)):
if i > 0:
try:
if row[0] in newer_products: continue
except:
pass
newer_products[row[0]] = row
older_products_2 = older_products.copy()
for row in older_products_2:
try:
if row in newer_products:
del older_products[row]
del newer_products[row]
except:
pass
workbook = xlsxwriter.Workbook(join(root_path, "xls", site, add_file_name))
worksheet = workbook.add_worksheet("Add")
i = -1
for val in fields:
i += 1
worksheet.write(0, i, val)
i = 0
for row in newer_products:
i += 1
j = -1
for val in newer_products[row]:
j += 1
worksheet.write(i, j, val)
workbook.close()
workbook = xlsxwriter.Workbook(join(root_path, "xls", site, remove_file_name))
worksheet = workbook.add_worksheet("Remove")
i = -1
for val in fields:
i += 1
worksheet.write(0, i, val)
i = 0
for row in older_products:
i += 1
j = -1
for val in older_products[row]:
j += 1
worksheet.write(i, j, val)
workbook.close()
def status_publishing(text) :
global scrape_status
scrape_status = text
def reydonsports_category_scrape(stock_scrape=0):
global t_rds_cat, t_rds
t_rds_cat = RDS_Category_Thread(stock_scrape)
t_rds_cat.start()
def reydonsports_scrape(stock_scrape=0):
global t_rds
products_url_txt = open("reydonsports_products_url.txt","r")
lines = len(products_url_txt.readlines())
start_index = 0
for i in range(THREAD_COUNT):
end_index = start_index + math.ceil(lines / THREAD_COUNT)
if end_index > lines + 1: end_index = lines + 1
th = RDS_Thread(i, start_index, end_index, stock_scrape)
th.start()
t_rds.append(th)
start_index = end_index
def totalimports_category_scrape(stock_scrape=0):
global t_totalimports_cat, t_totalimports
t_totalimports_cat = TotalImports_Category_Thread(stock_scrape)
t_totalimports_cat.start()
def totalimports_thread_start(thread_index, stock_scrape=0):
global t_totalimports, t_totalimports_delay
products_url_txt = open("totalimports_products_url.txt","r")
lines = len(products_url_txt.readlines())
start_index = 0
for i in range(THREAD_COUNT):
end_index = start_index + math.ceil(lines / THREAD_COUNT)
if end_index > lines + 1: end_index = lines + 1
if i == thread_index :
th = TotalImports_Thread(i, start_index, end_index, stock_scrape)
th.start()
if thread_index < len(t_totalimports):
t_totalimports[thread_index] = th
t_totalimports_delay[thread_index] = 0
else:
t_totalimports.append(th)
t_totalimports_delay.append(0)
break
start_index = end_index
def totalimports_scrape(stock_scrape=0):
for i in range(THREAD_COUNT):
totalimports_thread_start(i, stock_scrape)
def origo_category_scrape(stock_scrape=0):
global t_origo_cat, t_origo
t_origo_cat = Origo_Category_Thread(stock_scrape)
t_origo_cat.start()
def origo_scrape(stock_scrape=0):
global t_origo
products_url_txt = open("origo_products_url.txt","r")
lines = len(products_url_txt.readlines())
start_index = 0
for i in range(THREAD_COUNT):
end_index = start_index + math.ceil(lines / THREAD_COUNT)
if end_index > lines + 1: end_index = lines + 1
th = RDS_Thread(i, start_index, end_index, stock_scrape)
th.start()
t_origo.append(th)
start_index = end_index | 36.307832 | 206 | 0.541564 | 0 | 0 | 0 | 0 | 15,292 | 0.76717 | 0 | 0 | 2,984 | 0.149702 |
375318ac03b49dec7ec6b45c2f3221456f5baf1b | 8,670 | py | Python | sharpy/postproc/plotflowfield.py | ACea15/sharpy | c89ecb74be3cb9e37b23ac8a282c73b9b55dd792 | [
"BSD-3-Clause"
] | 80 | 2018-08-30T13:01:52.000Z | 2022-03-24T15:02:48.000Z | sharpy/postproc/plotflowfield.py | ACea15/sharpy | c89ecb74be3cb9e37b23ac8a282c73b9b55dd792 | [
"BSD-3-Clause"
] | 88 | 2018-05-17T16:18:58.000Z | 2022-03-11T21:05:48.000Z | sharpy/postproc/plotflowfield.py | ACea15/sharpy | c89ecb74be3cb9e37b23ac8a282c73b9b55dd792 | [
"BSD-3-Clause"
] | 44 | 2018-01-02T14:27:28.000Z | 2022-03-12T13:49:36.000Z | import os
import numpy as np
from tvtk.api import tvtk, write_data
from sharpy.utils.solver_interface import solver, BaseSolver
import sharpy.utils.generator_interface as gen_interface
import sharpy.utils.settings as settings
import sharpy.aero.utils.uvlmlib as uvlmlib
import ctypes as ct
from sharpy.utils.constants import vortex_radius_def
@solver
class PlotFlowField(BaseSolver):
"""
Plots the flow field in Paraview and computes the velocity at a set of points in a grid.
"""
solver_id = 'PlotFlowField'
solver_classification = 'post-processor'
settings_types = dict()
settings_default = dict()
settings_description = dict()
settings_options = dict()
settings_types['postproc_grid_generator'] = 'str'
settings_default['postproc_grid_generator'] = 'GridBox'
settings_description['postproc_grid_generator'] = 'Generator used to create grid and plot flow field'
settings_options['postproc_grid_generator'] = ['GridBox']
settings_types['postproc_grid_input'] = 'dict'
settings_default['postproc_grid_input'] = dict()
settings_description['postproc_grid_input'] = 'Dictionary containing settings for ``postproc_grid_generator``.'
settings_types['velocity_field_generator'] = 'str'
settings_default['velocity_field_generator'] = 'SteadyVelocityField'
settings_description['velocity_field_generator'] = 'Chosen velocity field generator'
settings_types['velocity_field_input'] = 'dict'
settings_default['velocity_field_input'] = dict()
settings_description['velocity_field_input'] = 'Dictionary containing settings for the selected ``velocity_field_generator``.'
settings_types['dt'] = 'float'
settings_default['dt'] = 0.1
settings_description['dt'] = 'Time step.'
settings_types['include_external'] = 'bool'
settings_default['include_external'] = True
settings_description['include_external'] = 'Include external velocities.'
settings_types['include_induced'] = 'bool'
settings_default['include_induced'] = True
settings_description['include_induced'] = 'Include induced velocities.'
settings_types['stride'] = 'int'
settings_default['stride'] = 1
settings_description['stride'] = 'Number of time steps between plots.'
settings_types['num_cores'] = 'int'
settings_default['num_cores'] = 1
settings_description['num_cores'] = 'Number of cores to use.'
settings_types['vortex_radius'] = 'float'
settings_default['vortex_radius'] = vortex_radius_def
settings_description['vortex_radius'] = 'Distance below which inductions are not computed.'
settings_table = settings.SettingsTable()
__doc__ += settings_table.generate(settings_types, settings_default, settings_description, settings_options)
def __init__(self):
self.settings = None
self.data = None
self.dir = 'output/'
self.caller = None
def initialise(self, data, custom_settings=None, caller=None):
self.data = data
if custom_settings is None:
self.settings = data.settings[self.solver_id]
else:
self.settings = custom_settings
settings.to_custom_types(self.settings, self.settings_types, self.settings_default,
self.settings_options)
self.dir = self.data.case_route + 'output/' + self.data.case_name + '/' + 'GenerateFlowField/'
if not os.path.isdir(self.dir):
os.makedirs(self.dir)
# init velocity generator
velocity_generator_type = gen_interface.generator_from_string(
self.settings['velocity_field_generator'])
self.velocity_generator = velocity_generator_type()
self.velocity_generator.initialise(self.settings['velocity_field_input'])
# init postproc grid generator
postproc_grid_generator_type = gen_interface.generator_from_string(
self.settings['postproc_grid_generator'])
self.postproc_grid_generator = postproc_grid_generator_type()
self.postproc_grid_generator.initialise(self.settings['postproc_grid_input'])
self.caller = caller
def output_velocity_field(self, ts):
# Notice that SHARPy utilities deal with several two-dimensional surfaces
# To be able to build 3D volumes, I will make use of the surface index as
# the third index in space
# It does not apply to the 'u' array because this way it is easier to
# write it in paraview
# Generate the grid
vtk_info, grid = self.postproc_grid_generator.generate({
'for_pos': self.data.structure.timestep_info[ts].for_pos[0:3]})
# Compute the induced velocities
nx = grid[0].shape[1]
ny = grid[0].shape[2]
nz = len(grid)
array_counter = 0
u_ind = np.zeros((nx, ny, nz, 3), dtype=float)
if self.settings['include_induced']:
target_triads = np.zeros((nx*ny*nz, 3))
ipoint = -1
for iz in range(nz):
for ix in range(nx):
for iy in range(ny):
ipoint += 1
target_triads[ipoint, :] = grid[iz][:, ix, iy].astype(dtype=ct.c_double, order='F', copy=True)
u_ind_points = uvlmlib.uvlm_calculate_total_induced_velocity_at_points(self.data.aero.timestep_info[ts],
target_triads,
self.settings['vortex_radius'],
self.data.structure.timestep_info[ts].for_pos[0:3],
self.settings['num_cores'])
ipoint = -1
for iz in range(nz):
for ix in range(nx):
for iy in range(ny):
ipoint += 1
u_ind[ix, iy, iz, :] = u_ind_points[ipoint, :]
# Write the data
vtk_info.point_data.add_array(u_ind.reshape((-1, u_ind.shape[-1]), order='F')) # Reshape the array except from the last dimension
vtk_info.point_data.get_array(array_counter).name = 'induced_velocity'
vtk_info.point_data.update()
array_counter += 1
# Add the external velocities
u_ext_out = np.zeros((nx, ny, nz, 3), dtype=float)
if self.settings['include_external']:
u_ext = []
for iz in range(nz):
u_ext.append(np.zeros((3, nx, ny), dtype=ct.c_double))
self.velocity_generator.generate({'zeta': grid,
'override': True,
't': ts*self.settings['dt'].value,
'ts': ts,
'dt': self.settings['dt'].value,
'for_pos': 0*self.data.structure.timestep_info[ts].for_pos},
u_ext)
for iz in range(nz):
for ix in range(nx):
for iy in range(ny):
u_ext_out[ix, iy, iz, :] += u_ext[iz][:, ix, iy]
# Write the data
vtk_info.point_data.add_array(u_ext_out.reshape((-1, u_ext_out.shape[-1]), order='F')) # Reshape the array except from the last dimension
vtk_info.point_data.get_array(array_counter).name = 'external_velocity'
vtk_info.point_data.update()
array_counter += 1
# add the data
u = u_ind + u_ext_out
# Write the data
vtk_info.point_data.add_array(u.reshape((-1, u.shape[-1]), order='F')) # Reshape the array except from the last dimension
vtk_info.point_data.get_array(array_counter).name = 'velocity'
vtk_info.point_data.update()
array_counter += 1
filename = self.dir + "VelocityField_" + '%06u' % ts + ".vtk"
write_data(vtk_info, filename)
def run(self, online=False):
if online:
if divmod(self.data.ts, self.settings['stride'].value)[1] == 0:
self.output_velocity_field(len(self.data.structure.timestep_info) - 1)
else:
for ts in range(0, len(self.data.structure.timestep_info)):
if not self.data.structure.timestep_info[ts] is None:
self.output_velocity_field(ts)
return self.data
| 45.15625 | 153 | 0.602999 | 8,316 | 0.95917 | 0 | 0 | 8,324 | 0.960092 | 0 | 0 | 2,124 | 0.244983 |
37562319f7afe5cba9d3144dd3ddb0395179ad06 | 455 | py | Python | what_apps/do/functions.py | SlashRoot/WHAT | 69e78d01065142446234e77ea7c8c31e3482af29 | [
"MIT"
] | null | null | null | what_apps/do/functions.py | SlashRoot/WHAT | 69e78d01065142446234e77ea7c8c31e3482af29 | [
"MIT"
] | null | null | null | what_apps/do/functions.py | SlashRoot/WHAT | 69e78d01065142446234e77ea7c8c31e3482af29 | [
"MIT"
] | null | null | null | from .models import Task
from django.contrib.auth.models import User
from django.template import loader, Context
def get_tasks_in_prototype_related_to_object(prototype_id, object):
from django.contrib.contenttypes.models import ContentType
user_contenttype = ContentType.objects.get_for_model(object)
return Task.objects.filter(prototype__id=prototype_id, related_objects__content_type=user_contenttype, related_objects__object_id=object.id) | 50.555556 | 144 | 0.850549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
3756410dd9d9c95d1fb6468b563488800ac6d65f | 6,840 | py | Python | distriploy/github.py | exmakhina/distriploy | ec8c7a30bdc2fa45a5b9816b33ef46283301aaf0 | [
"MIT"
] | 1 | 2020-07-07T21:19:41.000Z | 2020-07-07T21:19:41.000Z | distriploy/github.py | neuropoly/distriploy | ec8c7a30bdc2fa45a5b9816b33ef46283301aaf0 | [
"MIT"
] | 14 | 2020-07-07T14:03:04.000Z | 2021-03-03T17:47:02.000Z | distriploy/github.py | exmakhina/distriploy | ec8c7a30bdc2fa45a5b9816b33ef46283301aaf0 | [
"MIT"
] | 1 | 2020-10-30T14:43:38.000Z | 2020-10-30T14:43:38.000Z | #!/usr/bin/env python
# -*- coding: utf-8 vi:et
import sys, io, os, logging
import re
import json
import tempfile
import subprocess
import urllib.request, urllib.error
logger = logging.getLogger(__name__)
__all__ = (
"get_remote",
"create_release",
"download_default_release_asset",
"upload_release_asset",
"update_release_with_mirror_urls",
"get_repo_releases",
)
def get_remote(target_repo, cfg_root):
"""
Get the organization/repo_name from a repo
"""
remote = cfg_root.get("remote", "origin")
cmd = ["git", "config", f"remote.{remote}.url"]
res = subprocess.run(cmd, stdout=subprocess.PIPE, cwd=target_repo)
url = res.stdout.rstrip().decode()
m = re.match(r"^git@github.com:(?P<repo>\S+)\.git$", url)
if m is not None:
return m.group("repo")
m = re.match(r"^https://github.com/(?P<repo>\S+)\.git$", url)
if m is not None:
return m.group("repo")
raise ValueError(url)
def create_release(github_repo, git_tag, gh_token, cfg_root):
"""
Create a new release within the target repository.
:return: release metadata with id on success.
"""
logger.info("Creating a new release for %s at revision %s", github_repo, git_tag)
url = "https://api.github.com/repos/{}/releases".format(github_repo)
headers = {
"Authorization": "token {}".format(gh_token),
"Content-Type": "application/json",
}
root = {
"tag_name": git_tag,
"name": git_tag,
"draft": False,
"prerelease": False,
}
payload = json.dumps(root).encode("utf-8")
req = urllib.request.Request(url, headers=headers, method="POST", data=payload)
with urllib.request.urlopen(req) as resp:
if resp.getcode() != 201:
raise RuntimeError(
"Bad response: {} / {}".format(resp.getcode(), resp.read())
)
ret = json.loads(resp.read().decode("utf-8"))
logger.debug("ret: %s", ret)
release_id = ret["id"]
logger.info("Release (id:%s) successfully createad.", release_id)
return release_id
def download_default_release_asset(github_repo, release_id, gh_token, target_dir):
"""
Download the default asset of a given release
:return: relative path to downloaded file.
"""
logger.info("Downloading release default artifact...")
url = f"https://api.github.com/repos/{github_repo}/releases/{release_id}"
headers = {
"Authorization": f"token {gh_token}",
"Content-Type": "application/octet-stream",
}
req = urllib.request.Request(url, headers=headers, method="GET")
with urllib.request.urlopen(req) as resp:
if resp.getcode() != 200:
raise RuntimeError(
"Bad response: {} / {}".format(resp.getcode(), resp.read())
)
ret = json.loads(resp.read().decode("utf-8"))
logger.debug("ret: %s", ret)
pnpv = "{}-{}".format(github_repo.split("/")[-1], ret["tag_name"])
asset_name = f"{pnpv}.zip"
downloaded_asset_path = os.path.join(target_dir, asset_name)
urllib.request.urlretrieve(
ret["zipball_url"], downloaded_asset_path, #reporthook=...
)
return downloaded_asset_path
def upload_release_asset(github_repo, release_id, asset_path, gh_token):
"""
Uploads a release asset to a target release.
:return: Download link of the uploaded asset.
"""
logger.info("Uploading default release asset to sct-data/%s", github_repo)
asset_name = os.path.basename(asset_path)
url = f"https://uploads.github.com/repos/{github_repo}/releases/{release_id}/assets?name={asset_name}"
headers = {
"Authorization": f"token {gh_token}",
"Content-Type": "application/octet-stream",
}
with io.open(asset_path, "rb") as fi:
payload = fi.read()
req = urllib.request.Request(url, headers=headers, method="POST", data=payload)
with urllib.request.urlopen(req) as resp:
if resp.getcode() != 201:
raise RuntimeError(
"Bad response: {} / {}".format(resp.getcode(), resp.read())
)
ret = json.loads(resp.read().decode("utf-8"))
logger.debug("ret: %s", ret)
logger.info("Release asset uploaded successfully.")
return ret["browser_download_url"]
def update_release_with_mirror_urls(github_repo, release_id, gh_token, urls):
"""
Include osf download url (in case osf upload was performed) to the Github release
"""
logger.info("Uploading release with OSF download url.")
url = f"https://api.github.com/repos/{github_repo}/releases/{release_id}"
headers = {
"Authorization": f"token {gh_token}",
"Content-Type": "application/json",
}
body = "Asset also available at {}".format(urls)
root = {"body": body}
payload = json.dumps(body).encode("utf-8")
req = urllib.request.Request(url, headers=headers, method="PATCH", data=payload)
with urllib.request.urlopen(req) as resp:
if resp.getcode() != 200:
raise RuntimeError(
"Bad response: {} / {}".format(resp.getcode(), resp.read())
)
ret = json.loads(resp.read().decode("utf-8"))
return ret
def get_org_repos(org):
url = f"https://api.github.com/orgs/{org}/repos"
headers = {
"Content-Type": "application/json",
}
req = urllib.request.Request(url, headers=headers, method="GET")
with urllib.request.urlopen(req) as resp:
if resp.getcode() != 200:
raise RuntimeError(
"Bad response: {} / {}".format(resp.getcode(), resp.read())
)
ret = json.loads(resp.read().decode("utf-8"))
logger.debug("ret: %s", ret)
return [ repo["name"] for repo in ret ]
def get_repo_tags(github_repo):
url = f"https://api.github.com/repos/{github_repo}/tags"
headers = {
"Content-Type": "application/json",
}
req = urllib.request.Request(url, headers=headers, method="GET")
with urllib.request.urlopen(req) as resp:
if resp.getcode() != 200:
raise RuntimeError(
"Bad response: {} / {}".format(resp.getcode(), resp.read())
)
ret = json.loads(resp.read().decode("utf-8"))
logger.debug("ret: %s", ret)
return [ tag["name"] for tag in ret ]
def get_repo_releases(github_repo):
url = f"https://api.github.com/repos/{github_repo}/releases"
req = urllib.request.Request(url)
with urllib.request.urlopen(req) as resp:
if resp.getcode() != 200:
msg = "Bad response: {} / {}".format(resp.getcode(), resp.read())
raise RuntimeError(msg)
ret = json.loads(resp.read().decode("utf-8"))
return { rel["tag_name"]: rel["id"] for rel in ret }, { rel["id"]: rel for rel in ret }
| 29.106383 | 106 | 0.616228 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,313 | 0.338158 |
3756ed66b18a82931a6853bd82ac2dc78630d72b | 1,597 | py | Python | src/foxdot/sandbox/180823_0948_compo_036.py | Neko250/aisthesis | 1d4a2c3070d10596c28b25ea2170523583e7eff0 | [
"Apache-2.0"
] | 4 | 2018-06-29T18:39:34.000Z | 2021-06-20T16:44:29.000Z | src/foxdot/sandbox/180823_0948_compo_036.py | Neko250/aisthesis | 1d4a2c3070d10596c28b25ea2170523583e7eff0 | [
"Apache-2.0"
] | null | null | null | src/foxdot/sandbox/180823_0948_compo_036.py | Neko250/aisthesis | 1d4a2c3070d10596c28b25ea2170523583e7eff0 | [
"Apache-2.0"
] | null | null | null | Scale.default = Scale.chromatic
Root.default = 0
Clock.bpm = 120
var.ch = var(P[1,5,0,3],8)
~p1 >> play('m', amp=.8, dur=PDur(3,8), rate=[1,(1,2)])
~p2 >> play('-', amp=.5, dur=2, hpf=2000, hpr=linvar([.1,1],16), sample=1).often('stutter', 4, dur=3).every(8, 'sample.offadd', 1)
~p3 >> play('{ ppP[pP][Pp]}', amp=.8, dur=.5, sample=PRand(7), rate=PRand([.5,1,2]))
~p4 >> play('V', amp=.8, dur=1)
~p5 >> play('#', amp=1.2, dur=16, drive=.1, chop=128, formant=1)
~s1 >> glass(var.ch+(0,5,12), amp=1, dur=8, coarse=8)
~s2 >> piano(var.ch+(0,[5,5,3,7],12), amp=1, dur=8, delay=(0,.25,.5))
Group(p1, p2, p3).stop()
p4.lpf = linvar([4000,10],[32,0])
p4.stop()
s2.stop()
~s3 >> saw(var.ch+PWalk(), amp=PRand([0,.8])[:24], dur=PDur(3,8), scale=Scale.minor, oct=PRand([4,5,6])[:32], drive=.05, room=1, mix=.5).spread()
~s3 >> saw(var.ch+PWalk(), amp=PRand([0,.8])[:20], dur=PDur(5,8), scale=Scale.minor, oct=PRand([4,5,6])[:32], drive=.05, room=1, mix=.5).spread()
~s3 >> saw(var.ch+PWalk(), amp=PRand([0,.8])[:64], dur=.25, scale=Scale.minor, oct=PRand([4,5,6])[:32], drive=.05, room=1, mix=.5).spread()
~p4 >> play('V', amp=.5, dur=1, room=1, lpf=1200).every(7, 'stutter', cycle=16)
~p6 >> play('n', amp=.5, dur=1, delay=.5, room=1, hpf=linvar([2000,4000],16), hpr=.1)
s1.oct = 4
s1.formant = 1
~p3 >> play('{ ppP[pP][Pp]}', amp=.5, dur=.5, sample=PRand(7), rate=PRand([.5,1,2]), room=1, mix=.25)
Group(p6, s3).stop()
~s2 >> piano(var.ch+([12,0],[5,5,3,7],[0,12]), amp=1, dur=8, delay=(0,.25,.5), room=1, mix=.5, drive=.05, chop=32, echo=[1,2,1,4])
Group(p3, s1).stop()
Clock.clear()
| 33.270833 | 145 | 0.566061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 83 | 0.051972 |
37581aba7700b786b80fdf0d929cb3132078bc45 | 3,563 | py | Python | test/unit/report/test_table.py | colibri-coruscans/pyGSTi | da54f4abf668a28476030528f81afa46a1fbba33 | [
"Apache-2.0"
] | 73 | 2016-01-28T05:02:05.000Z | 2022-03-30T07:46:33.000Z | test/unit/report/test_table.py | colibri-coruscans/pyGSTi | da54f4abf668a28476030528f81afa46a1fbba33 | [
"Apache-2.0"
] | 113 | 2016-02-25T15:32:18.000Z | 2022-03-31T13:18:13.000Z | test/unit/report/test_table.py | colibri-coruscans/pyGSTi | da54f4abf668a28476030528f81afa46a1fbba33 | [
"Apache-2.0"
] | 41 | 2016-03-15T19:32:07.000Z | 2022-02-16T10:22:05.000Z | from pygsti.report.table import ReportTable
from ..util import BaseCase
class TableInstanceTester(BaseCase):
custom_headings = {
'html': 'test',
'python': 'test',
'latex': 'test'
}
def setUp(self):
self.table = ReportTable(self.custom_headings, ['Normal'] * 4) # Four formats
def test_element_accessors(self):
self.table.add_row(['1.0'], ['Normal'])
self.assertTrue('1.0' in self.table)
self.assertEqual(len(self.table), self.table.num_rows)
row_by_key = self.table.row(key=self.table.row_names[0])
row_by_idx = self.table.row(index=0)
self.assertEqual(row_by_key, row_by_idx)
col_by_key = self.table.col(key=self.table.col_names[0])
col_by_idx = self.table.col(index=0)
self.assertEqual(col_by_key, col_by_idx)
def test_to_string(self):
s = str(self.table)
# TODO assert correctness
def test_render_HTML(self):
self.table.add_row(['1.0'], ['Normal'])
self.table.add_row(['1.0'], ['Normal'])
render = self.table.render('html')
# TODO assert correctness
def test_render_LaTeX(self):
self.table.add_row(['1.0'], ['Normal'])
self.table.add_row(['1.0'], ['Normal'])
render = self.table.render('latex')
# TODO assert correctness
def test_finish(self):
self.table.add_row(['1.0'], ['Normal'])
self.table.finish()
# TODO assert correctness
def test_render_raises_on_unknown_format(self):
with self.assertRaises(NotImplementedError):
self.table.render('foobar')
def test_raise_on_invalid_accessor(self):
# XXX are these neccessary? EGN: maybe not - checks invalid inputs, which maybe shouldn't need testing?
with self.assertRaises(KeyError):
self.table['foobar']
with self.assertRaises(KeyError):
self.table.row(key='foobar') # invalid key
with self.assertRaises(ValueError):
self.table.row(index=100000) # out of bounds
with self.assertRaises(ValueError):
self.table.row() # must specify key or index
with self.assertRaises(ValueError):
self.table.row(key='foobar', index=1) # cannot specify key and index
with self.assertRaises(KeyError):
self.table.col(key='foobar') # invalid key
with self.assertRaises(ValueError):
self.table.col(index=100000) # out of bounds
with self.assertRaises(ValueError):
self.table.col() # must specify key or index
with self.assertRaises(ValueError):
self.table.col(key='foobar', index=1) # cannot specify key and index
class CustomHeadingTableTester(TableInstanceTester):
def setUp(self):
self.table = ReportTable([0.1], ['Normal'], self.custom_headings)
def test_labels(self):
self.table.add_row(['1.0'], ['Normal'])
self.assertTrue('1.0' in self.table)
rowLabels = list(self.table.keys())
self.assertEqual(rowLabels, self.table.row_names)
self.assertEqual(len(rowLabels), self.table.num_rows)
self.assertTrue(rowLabels[0] in self.table)
row1Data = self.table[rowLabels[0]]
colLabels = list(row1Data.keys())
self.assertEqual(colLabels, self.table.col_names)
self.assertEqual(len(colLabels), self.table.num_cols)
class CustomHeadingNoFormatTableTester(TableInstanceTester):
def setUp(self):
self.table = ReportTable(self.custom_headings, None)
| 35.989899 | 112 | 0.63963 | 3,482 | 0.977266 | 0 | 0 | 0 | 0 | 0 | 0 | 605 | 0.169801 |
3758975e21f34ce7477e14ad7bcebc66b331327c | 364 | py | Python | pyFunc/pyFunc_12.py | pemtash/pyrevision2022 | c1a9510729b44f61575f406865eb823cb7cabd63 | [
"Apache-2.0"
] | null | null | null | pyFunc/pyFunc_12.py | pemtash/pyrevision2022 | c1a9510729b44f61575f406865eb823cb7cabd63 | [
"Apache-2.0"
] | null | null | null | pyFunc/pyFunc_12.py | pemtash/pyrevision2022 | c1a9510729b44f61575f406865eb823cb7cabd63 | [
"Apache-2.0"
] | null | null | null | def namedArgumentFunction(a, b, c):
print("the values are a: {}, b: {}, c: {}".format(a,b,c))
namedArgumentFunction(100, 200, 300) # positional arguments
namedArgumentFunction(c=3, a=1, b=2) # named arguments
#namedArgumentFunction(181, a=102, b=103) # mix of position + name error
namedArgumentFunction(101, b=102, c=103) # mix of position + no error | 45.5 | 73 | 0.692308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 178 | 0.489011 |
37598780f2fe29fd5a5224fb42181e382bdf6a7e | 1,435 | py | Python | views.py | ezl/hnofficehours | 3729eca064998bd2d0a9ba1b4fe7e56ccc57324b | [
"MIT"
] | 2 | 2015-11-05T13:47:44.000Z | 2020-07-20T19:57:45.000Z | views.py | ezl/hnofficehours | 3729eca064998bd2d0a9ba1b4fe7e56ccc57324b | [
"MIT"
] | null | null | null | views.py | ezl/hnofficehours | 3729eca064998bd2d0a9ba1b4fe7e56ccc57324b | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from datetime import datetime, timedelta
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.views.generic.simple import direct_to_template
from schedule.models import Event
from schedule.periods import Period
def site_index(request, template_name='index.html'):
# most future office hours to show
MAX_FUTURE_OFFICE_HOURS = 30
# furthest into the future to display office hours
MAX_FUTURE_DAYS = 30
users_available_now = User.objects.filter(profile__is_available=True)
events = Event.objects.all()
now = Period(events=events, start=datetime.now(),
end=datetime.now() + timedelta(minutes=1))
occurences = now.get_occurrences()
users_holding_office_hours_now = map(lambda x: x.event.creator, occurences)
users = set(list(users_available_now) + users_holding_office_hours_now)
future = Period(events=events, start=datetime.now(),
end=datetime.now() + timedelta(days=MAX_FUTURE_DAYS))
upcoming_office_hours = future.get_occurrences()
upcoming_office_hours = upcoming_office_hours[:MAX_FUTURE_OFFICE_HOURS]
return direct_to_template(request, template_name, locals())
def about(request):
return direct_to_template(request, 'about.html')
| 44.84375 | 79 | 0.772822 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.075261 |
3759a78356966487f78b8550100b9d77dd7fd966 | 712 | py | Python | declarative/properties/__init__.py | jrollins/python-declarative | ac3ba9bf56611adefb4b2673e50bd8067c024e6b | [
"Apache-2.0"
] | 6 | 2018-02-28T18:32:06.000Z | 2022-03-20T13:04:05.000Z | declarative/properties/__init__.py | jrollins/python-declarative | ac3ba9bf56611adefb4b2673e50bd8067c024e6b | [
"Apache-2.0"
] | 2 | 2021-02-22T17:18:59.000Z | 2021-03-03T16:39:22.000Z | declarative/properties/__init__.py | jrollins/python-declarative | ac3ba9bf56611adefb4b2673e50bd8067c024e6b | [
"Apache-2.0"
] | 1 | 2021-02-09T18:58:53.000Z | 2021-02-09T18:58:53.000Z | # -*- coding: utf-8 -*-
"""
"""
from __future__ import (
division,
print_function,
absolute_import,
)
from .bases import (
PropertyTransforming,
HasDeclaritiveAttributes,
InnerException,
PropertyAttributeError,
)
from .memoized import (
memoized_class_property,
mproperty,
dproperty,
mproperty_plain,
dproperty_plain,
mproperty_fns,
dproperty_fns,
mfunction,
)
from .memoized_adv import (
mproperty_adv,
dproperty_adv,
)
from .memoized_adv_group import (
dproperty_adv_group,
mproperty_adv_group,
group_mproperty,
group_dproperty,
)
#because this is the critical unique object
from ..utilities.unique import (
NOARG,
)
| 16.181818 | 43 | 0.696629 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.102528 |
375a12f22fefa628ea296cc515c3de78bb252ecf | 4,454 | py | Python | src/models.py | TahjidEshan/keras-3dgan | c9b46945466189702976b9cd88df7e8418374fee | [
"MIT"
] | 22 | 2017-07-12T21:53:58.000Z | 2021-04-25T22:34:24.000Z | src/models.py | AlanMorningLight/keras-3dgan | 794af8ed8644d5f05403f97a9be9ed706324a89f | [
"MIT"
] | 1 | 2019-03-31T04:22:22.000Z | 2019-04-02T01:56:54.000Z | src/models.py | AlanMorningLight/keras-3dgan | 794af8ed8644d5f05403f97a9be9ed706324a89f | [
"MIT"
] | 7 | 2019-07-15T20:41:49.000Z | 2021-07-27T07:09:49.000Z | from keras.models import Model
from keras.layers import Input
from keras.layers.core import Activation
from keras.layers.convolutional import Conv3D, Deconv3D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
def generator(phase_train=True, params={'z_size':200, 'strides':(2,2,2), 'kernel_size':(4,4,4)}):
"""
Returns a Generator Model with input params and phase_train
Args:
phase_train (boolean): training phase or not
params (dict): Dictionary with model parameters
Returns:
model (keras.Model): Keras Generator model
"""
z_size = params['z_size']
strides = params['strides']
kernel_size = params['kernel_size']
inputs = Input(shape=(1, 1, 1, z_size))
g1 = Deconv3D(filters=512, kernel_size=kernel_size,
strides=(1, 1, 1), kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='valid')(inputs)
g1 = BatchNormalization()(g1, training=phase_train)
g1 = Activation(activation='relu')(g1)
g2 = Deconv3D(filters=256, kernel_size=kernel_size,
strides=strides, kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='same')(g1)
g2 = BatchNormalization()(g2, training=phase_train)
g2 = Activation(activation='relu')(g2)
g3 = Deconv3D(filters=128, kernel_size=kernel_size,
strides=strides, kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='same')(g2)
g3 = BatchNormalization()(g3, training=phase_train)
g3 = Activation(activation='relu')(g3)
g4 = Deconv3D(filters=64, kernel_size=kernel_size,
strides=strides, kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='same')(g3)
g4 = BatchNormalization()(g4, training=phase_train)
g4 = Activation(activation='relu')(g4)
g5 = Deconv3D(filters=1, kernel_size=kernel_size,
strides=strides, kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='same')(g4)
g5 = BatchNormalization()(g5, training=phase_train)
g5 = Activation(activation='sigmoid')(g5)
model = Model(inputs=inputs, outputs=g5)
model.summary()
return model
def discriminator(phase_train = True, params={'cube_len':64, 'strides':(2,2,2), 'kernel_size':(4,4,4), 'leak_value':0.2}):
"""
Returns a Discriminator Model with input params and phase_train
Args:
phase_train (boolean): training phase or not
params (dict): Dictionary with model parameters
Returns:
model (keras.Model): Keras Discriminator model
"""
cube_len = params['cube_len']
strides = params['strides']
kernel_size = params['kernel_size']
leak_value = params['leak_value']
inputs = Input(shape=(cube_len, cube_len, cube_len, 1))
d1 = Conv3D(filters=64, kernel_size=kernel_size,
strides=strides, kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='same')(inputs)
d1 = BatchNormalization()(d1, training=phase_train)
d1 = LeakyReLU(leak_value)(d1)
d2 = Conv3D(filters=128, kernel_size=kernel_size,
strides=strides, kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='same')(d1)
d2 = BatchNormalization()(d2, training=phase_train)
d2 = LeakyReLU(leak_value)(d2)
d3 = Conv3D(filters=256, kernel_size=kernel_size,
strides=strides, kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='same')(d2)
d3 = BatchNormalization()(d3, training=phase_train)
d3 = LeakyReLU(leak_value)(d3)
d4 = Conv3D(filters=512, kernel_size=kernel_size,
strides=strides, kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='same')(d3)
d4 = BatchNormalization()(d4, training=phase_train)
d4 = LeakyReLU(leak_value)(d4)
d5 = Conv3D(filters=1, kernel_size=kernel_size,
strides=(1, 1, 1), kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='valid')(d4)
d5 = BatchNormalization()(d5, training=phase_train)
d5 = Activation(activation='sigmoid')(d5)
model = Model(inputs=inputs, outputs=d5)
model.summary()
return model | 41.240741 | 122 | 0.660979 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,006 | 0.225864 |
375c4db97fae968fedba9552a5d467ff9af0a2d3 | 1,216 | py | Python | statmail/Types.py | birm/StatMail | 61e39346a4754f8e9bab1736caf02f4cb1c2c2a5 | [
"MIT"
] | null | null | null | statmail/Types.py | birm/StatMail | 61e39346a4754f8e9bab1736caf02f4cb1c2c2a5 | [
"MIT"
] | 7 | 2016-10-18T18:30:47.000Z | 2017-01-17T21:28:40.000Z | statmail/Types.py | birm/StatMail | 61e39346a4754f8e9bab1736caf02f4cb1c2c2a5 | [
"MIT"
] | null | null | null | from .SMBase import SMBase
""" A collection of builtin server types. """
class Types(SMBase):
"""A class for keeping track of all types supported."""
# TODO read from files later for types, but for now...
supported = ["minimal"]
@classmethod
def supported(self, stype):
"""Determine if the type given is supported."""
return stype in self.supported
@classmethod
def find_template(self, stype):
"""Find specific templates for types."""
"""template format is: for one server with {NAME}, and
{description}, {result} for each report item.
The returned list is [header, name section, result, footer] in html"""
# TODO use files for templates
if stype == "minimal":
return ["<html>", "{NAME}<br/>", "{description}:{result}<br/>", "</html>"]
else:
return False
@classmethod
def find_reporter(self, stype):
"""Get report items."""
""" should be list of [descrption, test] items"""
if stype == "minimal":
return [["CPU Usage", "mpstat | awk '$12 ~ /[0-9.]+/ { \
print 100 - $12\"%\" }'"]]
else:
return False
| 32.864865 | 86 | 0.564967 | 1,140 | 0.9375 | 0 | 0 | 954 | 0.784539 | 0 | 0 | 708 | 0.582237 |
375c83f1d331a617d630736291806350c6d98cad | 10,993 | py | Python | k8skiller.py | ech0png/k8skiller | 1f066a0c02acf2b71bb7805c18d08899ba7ac25f | [
"Apache-2.0"
] | null | null | null | k8skiller.py | ech0png/k8skiller | 1f066a0c02acf2b71bb7805c18d08899ba7ac25f | [
"Apache-2.0"
] | null | null | null | k8skiller.py | ech0png/k8skiller | 1f066a0c02acf2b71bb7805c18d08899ba7ac25f | [
"Apache-2.0"
] | null | null | null | import urllib3
from art import *
from terminaltables import AsciiTable
from vulnsVerify import *
from podTable import *
from listarPods import *
from menu import *
from shells import *
from podDeploy import *
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
tprint("K8SKILLER")
print("1 - Search for vulnerabilities in the host.")
print()
print("2 - Using Service Account.")
print()
opcao = int(input("Option: "))
print()
if opcao == 2:
host = input("Host: ")
sa = input("Service Account: ")
ns = input("Service Account Namespace: ")
print()
menu_service()
while True:
command = input("k8skiller: ")
print()
# Opção 1 - LISTAGEM DE PODS
if command == "1":
listar_pods_service(host, sa, ns)
# Opção 2 - SHELL SIMPLES NO POD ESCOLHIDO
elif command == "2":
pod_name = input("Pod Name: ")
if pod_name == "exit":
pass
else:
shell_service(host, sa, ns, pod_name)
# Opção 3 - DEPLOY DE POD MALICIOSO
elif command == "3":
tabela = [["ID", "POD", "DESCRIPTION"], ["1", "Busybox Mount Node Filesystem", "Monta o filesystem do Node."], ["2", "Busybox RCE Node", "Obtem uma shell no Node."]]
tabela_ascii = AsciiTable(tabela)
print(tabela_ascii.table)
print()
malicioso = input("Option ID: ")
if malicioso == "exit":
pass
else:
pod_deploy_service(host, sa, ns, int(malicioso))
# Opção 4 - DELETAR POD MALICIOSO
elif command == "4":
pod_name = input("Pod Name: ")
if pod_name == "exit":
pass
else:
pod_delete_service(host, sa, ns, pod_name)
# Opção menu - RETORNA AS OPÇÕES DO MENU
elif command == "menu":
menu_service()
# Opção exit - FECHA A FERRAMENTA
elif command == "exit":
break
elif opcao == 1:
host = input("Host: ")
print()
print("Searching Vulnerabilities...")
#Verificando se as vulnerabilidades existem;
kubelet, apiserver, hostFull = vuln_verify(host)
# Caso o cluster não esteja vulnerável a nenhum dos ataques!
if kubelet == False and apiserver == "False":
print("[-] Host not vulnerable to a Kubelet or API Server attack!")
# Caso o cluster esteja vulnerável ao ataque ao Kubelet
elif kubelet == True:
print()
print("[+] Host may be vulnerable to a Kubelet Attack!")
print()
menu_kubelet()
pod, namespace, container = listar_pods(hostFull)
while True:
command = input("k8skiller: ")
# Opção 1 - LISTAGEM DE PODS
if command == "1":
pod, namespace, container = listar_pods(hostFull)
podTable = pod_table_kubelet(pod, namespace, container)
print(podTable)
print()
print("--------------------------------------------------------------------------------------------------------")
print()
# Opção 2 - LISTAGEM DE SECRETS
elif command == "2":
id = 0
for i in range(len(pod)):
if "tiller" in pod[i]:
id = i
listar_secrets_kubelet(host, pod, container, id)
# Opção 3 - SHELL SIMPLES NO POD ESCOLHIDO
elif command == "3":
num = input("Pod ID: ")
if num == "exit":
pass
else:
id = int(num) - 1
while True:
print()
comando_exec = input(pod[id]+" # ")
shellPod = shell(comando_exec, hostFull, namespace, pod, container, id)
if shellPod == "exit":
break
else:
print(shell(comando_exec, hostFull, namespace, pod, container, id))
print()
print("--------------------------------------------------------------------------------------------------------")
print()
# Opção 4 - DEPLOY DE POD MALICIOSO
elif command == "4":
tabela = [["ID", "POD MALICIOSO", "DESCRIÇÃO"], ["1", "Busybox Mount Node Filesystem", "Monta o filesystem do Node."], ["2", "Busybox RCE Node", "Obtem uma shell no Node."]]
tabela_ascii = AsciiTable(tabela)
print(tabela_ascii.table)
print()
malicioso = int(input("Option ID: "))
if malicioso == "exit":
pass
else:
# Obtenção correta do id do pod com privilégios de criação de pods, que será utilizado para o RCE.
id = 0
pod, namespace, container = listar_pods(hostFull)
for i in range(len(pod)):
if "tiller" in pod[i]:
id = i + 1
pod_deploy(hostFull, pod, namespace, container, malicioso, id)
print()
print("--------------------------------------------------------------------------------------------------------")
print()
# Opção 5 - DELETAR POD MALICIOSO
elif command == "5":
pod, namespace, container = listar_pods(hostFull)
pod_id = 0
pod_id_malicioso = ""
malicioso = 0
for i in range(len(pod)):
if pod[i] == "busybox-rce" or pod[i] == "busybox-filesystem":
pod_id_malicioso = pod[i]
for y in range(len(container)):
if container[y] == "tiller":
pod_id = y
else:
pass
print("*** POD BUSYBOX SPOTTED! ***")
print()
malicioso = int(input("Are you sure? (1 - YES / 0 - NO): "))
if malicioso == 1:
pod_delete(hostFull, pod, namespace, container, pod_id, pod_id_malicioso)
print()
print("--------------------------------------------------------------------------------------------------------")
print()
# Opção 6 - OBTER ACESSO AO HOST
elif command == "6":
hostShell(host, hostFull, namespace, pod, container)
print()
print("--------------------------------------------------------------------------------------------------------")
print()
# Opção menu - RETORNA AS OPÇÕES DO MENU
elif command == "menu":
menu_kubelet()
# Opção exit - FECHA A FERRAMENTA
elif command == "exit":
break
# Caso o cluster esteja vulnerável ao ataque a API Server
if apiserver == True:
print("[+] Host may be vulnerable to an API Server attack!")
print()
menu_api()
while True:
command = input("k8skiller: ")
# Opção 1 - LISTAGEM DE SECRETS;
if command == "1":
print()
print("--------------------------------------------------------------------------------------------------------")
print()
listar_secrets(hostFull)
print("--------------------------------------------------------------------------------------------------------")
print()
# Opção 2 - LISTAGEM DE PODS;
elif command == "2":
print()
print("--------------------------------------------------------------------------------------------------------")
print()
listar_pods_api(hostFull)
print("--------------------------------------------------------------------------------------------------------")
print()
# Opção 3 - SHELL EM POD;
elif command == "3":
nome = input("Pod Name: ")
if nome == "exit":
pass
else:
namespace_str = input("Pod Namespace: ")
if namespace_str == "exit":
pass
else:
shell_api(hostFull, namespace_str, nome)
print()
print("--------------------------------------------------------------------------------------------------------")
print()
# Opção 4 - DEPLOY POD MALICIOSO;
elif command == "4":
tabela = [["ID", "POD MALICIOSO", "DESCRIÇÃO"], ["1", "Busybox Mount Node Filesystem", "Monta o filesystem do Node."], ["2", "Busybox RCE Node", "Obtem uma shell no Node."]]
tabela_ascii = AsciiTable(tabela)
print(tabela_ascii.table)
print()
malicioso = input("Option ID: ")
# Obtenção correta do id do pod com privilégios de criação de pods, que será utilizado para o RCE.
if malicioso == "exit":
pass
else:
pod_deploy_api(hostFull, int(malicioso))
print()
print("--------------------------------------------------------------------------------------------------------")
print()
# Opção 5 - DELETAR POD;
elif command == "5":
pod = input("Pod Name to delete: ")
if pod == "exit":
pass
else:
print()
ns = input("Pod Namespace: ")
if ns == "exit":
pass
else:
malicioso = int(input("Are you sure you want to delete the pod " +(pod)+ " in the namespace " + (ns) + " (1 - YES / 0 - NO): "))
if malicioso == 1:
pod_delete_api(hostFull, ns, pod)
print()
print("--------------------------------------------------------------------------------------------------------")
print()
# Opção menu - RETORNA AS OPÇÕES DO MENU
elif command == "menu":
menu_api()
# Opção exit - FECHA A FERRAMENTA
elif command == "exit":
break | 38.844523 | 189 | 0.395888 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,764 | 0.340295 |
375cd4d6dc56e6469cae24821b097bb1ab86ac19 | 1,676 | py | Python | pokershell/intro.py | fblaha/pokershell | 36a3bfff6ead7fef175e430dfdb88ac6f6a31d1f | [
"Apache-2.0"
] | 6 | 2016-05-13T07:39:37.000Z | 2022-03-05T07:23:46.000Z | pokershell/intro.py | fblaha/pokershell | 36a3bfff6ead7fef175e430dfdb88ac6f6a31d1f | [
"Apache-2.0"
] | 1 | 2017-12-18T09:08:28.000Z | 2017-12-31T01:48:32.000Z | pokershell/intro.py | fblaha/pokershell | 36a3bfff6ead7fef175e430dfdb88ac6f6a31d1f | [
"Apache-2.0"
] | 5 | 2016-10-11T23:54:35.000Z | 2022-03-05T07:23:47.000Z | import prettytable
def _create_intro():
intro_head = """
Texas hold'em command line calculator and simulator.
Simulation command example:
JdJc 6 0.2; QdAc8h 4 1.0; Jh 1.5; 2h 3 3.2
"""
token_col = 'Line Tokens'
explanation_col = 'Explanation'
t = prettytable.PrettyTable([token_col, explanation_col])
t.max_width[token_col] = 15
t.max_width[explanation_col] = 50
t.hrules = prettytable.ALL
t.add_row(["'JdJc'",
"Player's face-down cards. "
"These cards need to be specified before any other cards "
"on command line."])
t.add_row(["'5' '4' '2'",
"Number of players in given stage. The number is decreasing "
"as players fold."])
t.add_row(["'0.2' '1.0' '1.5' '3.2'",
"Pot size in given stage. The number is increasing "
"by continuous betting. The number must contain '.'"
" to be distinguishable from number of players."])
t.add_row(["';'",
"Separates game stages. The game stage means whenever game state "
"changes with (new common card, pot increases by betting "
"or some player folds). The user can go back in command line history "
"with up arrow and continue on previous line by writing separator ';' "
"and after separator writes only what changed since previous state."])
t.add_row(["'QdAc8h'", "Flop cards. Three common cards."])
t.add_row(["'Jh'", "Turn card. Fourth common card."])
t.add_row(["'2h'", "River card. Fifth common card."])
return '\n'.join((intro_head, str(t), ''))
INTRO = _create_intro()
| 40.878049 | 86 | 0.603819 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,002 | 0.597852 |
375d646ba6e1a05a1beb8bd9fc2faa1d4c02305c | 5,216 | py | Python | tests/archive/test_archive_value.py | heikomuller/histore | d600052514a1c5f672137f76a6e1388184b17cd4 | [
"BSD-3-Clause"
] | 2 | 2020-09-05T23:27:41.000Z | 2021-08-08T20:46:54.000Z | tests/archive/test_archive_value.py | heikomuller/histore | d600052514a1c5f672137f76a6e1388184b17cd4 | [
"BSD-3-Clause"
] | 22 | 2020-05-22T01:38:08.000Z | 2021-04-28T12:41:46.000Z | tests/archive/test_archive_value.py | heikomuller/histore | d600052514a1c5f672137f76a6e1388184b17cd4 | [
"BSD-3-Clause"
] | 1 | 2021-08-08T20:46:58.000Z | 2021-08-08T20:46:58.000Z | # This file is part of the History Store (histore).
#
# Copyright (C) 2018-2021 New York University.
#
# The History Store (histore) is released under the Revised BSD License. See
# file LICENSE for full license details.
"""Unit test for archived cell values."""
import pytest
from histore.archive.value import MultiVersionValue, SingleVersionValue
from histore.archive.timestamp import SingleVersion, Timestamp, TimeInterval
def test_cell_history():
"""Test adding values to the history of a dataset row cell."""
cell = SingleVersionValue(value=1, timestamp=SingleVersion(version=1))
assert cell.at_version(version=1) == 1
assert cell.is_single_version()
assert not cell.is_multi_version()
with pytest.raises(ValueError):
cell.at_version(version=2)
assert cell.at_version(version=2, raise_error=False) is None
cell = cell.merge(value=1, version=2)
assert cell.at_version(version=1) == 1
assert cell.at_version(version=2) == 1
assert cell.diff(original_version=1, new_version=2) is None
assert cell.at_version(version=3, raise_error=False) is None
prov = cell.diff(original_version=2, new_version=3)
assert prov is not None
assert prov.old_value == 1
assert prov.new_value is None
cell = SingleVersionValue(value=1, timestamp=SingleVersion(version=1))
cell = cell.merge(value='1', version=2)
assert len(cell.values) == 2
assert cell.at_version(version=1) == 1
assert cell.at_version(version=2) == '1'
prov = cell.diff(original_version=1, new_version=2)
assert prov is not None
assert prov.old_value == 1
assert prov.new_value == '1'
with pytest.raises(ValueError):
cell.at_version(version=3)
cell = cell.merge(value=1, version=3)
assert len(cell.values) == 2
assert cell.at_version(version=1) == 1
assert cell.at_version(version=2) == '1'
assert cell.at_version(version=3) == 1
assert not cell.is_single_version()
assert cell.is_multi_version()
def test_extend_cell_value_timestamp():
"""Test extending the timestamp of a cell value."""
cell = SingleVersionValue(value=1, timestamp=SingleVersion(version=1))
cell = cell.extend(version=2, origin=1)
assert not cell.timestamp.contains(0)
assert cell.timestamp.contains(1)
assert cell.timestamp.contains(2)
assert not cell.timestamp.contains(3)
cell = cell.extend(version=4, origin=0)
assert not cell.timestamp.contains(0)
assert cell.timestamp.contains(1)
assert cell.timestamp.contains(2)
assert not cell.timestamp.contains(3)
assert not cell.timestamp.contains(4)
cell = cell.merge(value='1', version=3)
cell = cell.merge(value=1, version=4)
cell = cell.extend(version=5, origin=4)
cell = cell.extend(version=6, origin=3)
assert cell.at_version(1) == 1
assert cell.at_version(2) == 1
assert cell.at_version(3) == '1'
assert cell.at_version(4) == 1
assert cell.at_version(5) == 1
assert cell.at_version(6) == '1'
with pytest.raises(ValueError):
cell.at_version(0)
def test_rollback_multi_value():
"""Test rollback for single version values."""
value = MultiVersionValue([
SingleVersionValue(
value=1,
timestamp=Timestamp(intervals=[TimeInterval(start=2, end=3)])
),
SingleVersionValue(
value=2,
timestamp=Timestamp(intervals=[TimeInterval(start=4, end=5)])
)
])
value = value.rollback(4)
assert isinstance(value, MultiVersionValue)
assert len(value.values) == 2
assert value.at_version(3) == 1
assert value.at_version(4) == 2
value = value.rollback(2)
assert isinstance(value, SingleVersionValue)
assert value.value == 1
# -- Rollback to version that did not contain the value -------------------
value = MultiVersionValue([
SingleVersionValue(
value=1,
timestamp=Timestamp(intervals=[TimeInterval(start=2, end=3)])
),
SingleVersionValue(
value=2,
timestamp=Timestamp(intervals=[TimeInterval(start=4, end=5)])
)
])
assert value.rollback(1) is None
def test_rollback_single_value():
"""Test rollback for single version values."""
value = SingleVersionValue(
value=1,
timestamp=Timestamp(intervals=[TimeInterval(start=1, end=3)])
)
value = value.rollback(2)
assert value.value == 1
assert value.timestamp.contains(1)
assert value.timestamp.contains(2)
assert not value.timestamp.contains(3)
assert value.rollback(0) is None
def test_value_repr():
"""Test string representations for archive values."""
value = SingleVersionValue(
value=1,
timestamp=Timestamp(intervals=[TimeInterval(start=1, end=3)])
)
assert str(value) == '(1 [[1, 3]])'
value = MultiVersionValue([
SingleVersionValue(
value=1,
timestamp=Timestamp(intervals=[TimeInterval(start=2, end=3)])
),
SingleVersionValue(
value=2,
timestamp=Timestamp(intervals=[TimeInterval(start=4, end=5)])
)
])
assert str(value) == '((1 [[2, 3]]), (2 [[4, 5]]))'
| 35.243243 | 79 | 0.665836 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 654 | 0.125383 |
375da91964c49efa8682191ed0413b8e77c32ee0 | 891 | py | Python | server/src/api/api_environment.py | SpongeyBob/ALFRED | f91a46c5f9ed0eb1cd37adcca4c9045b129e066d | [
"MIT"
] | null | null | null | server/src/api/api_environment.py | SpongeyBob/ALFRED | f91a46c5f9ed0eb1cd37adcca4c9045b129e066d | [
"MIT"
] | null | null | null | server/src/api/api_environment.py | SpongeyBob/ALFRED | f91a46c5f9ed0eb1cd37adcca4c9045b129e066d | [
"MIT"
] | null | null | null | # Add paths toward dependecies in different subdirectories
import os
import sys
sys.path.append(os.path.abspath('./drone'))
sys.path.append(os.path.abspath('./log'))
# Add dependencies
from drone_list import DroneList
from environment import Environment
from setup_logging import LogsConfig
logsConfig = LogsConfig()
logger = logsConfig.logger('EnvironmentApi')
def api_environment_set_mode(data):
mode = data['mode_chosen']
number_drones = data['number_of_drone']
DroneList.delete_drones()
Environment.set_mode(mode)
if (Environment.is_in_simulation()):
DroneList.createDrones(int(number_drones), mode)
Environment.launch_simulation(number_drones)
else:
DroneList.createDrones(int(number_drones), mode)
def api_environment_set_real_position(data):
DroneList.initial_posisitions.clear()
DroneList.initial_posisitions.extend(data) | 29.7 | 58 | 0.769921 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 138 | 0.154882 |
375e217f752444584219ab50db3fdf6f47a97b25 | 1,275 | py | Python | jts/backend/event/migrations/0002_auto_20191009_1119.py | goupaz/babylon | 4e638d02705469061e563fec349676d8faa9f648 | [
"MIT"
] | 1 | 2019-08-08T09:03:17.000Z | 2019-08-08T09:03:17.000Z | backend/event/migrations/0002_auto_20191009_1119.py | goupaz/website | ce1bc8b6c52ee0815a7b98842ec3bde0c20e0add | [
"Apache-2.0"
] | 2 | 2020-10-09T19:16:09.000Z | 2020-10-10T20:40:41.000Z | jts/backend/event/migrations/0002_auto_20191009_1119.py | goupaz/babylon-hackathon | 4e638d02705469061e563fec349676d8faa9f648 | [
"MIT"
] | 1 | 2019-07-21T01:42:21.000Z | 2019-07-21T01:42:21.000Z | # Generated by Django 2.2 on 2019-10-09 18:19
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('event', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='eventattendee',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='event_attendee', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='event',
name='event_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='event.EventType'),
),
migrations.AddField(
model_name='event',
name='host_user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='event',
name='user_types',
field=models.ManyToManyField(to='users.UserType'),
),
]
| 31.875 | 141 | 0.62902 | 1,118 | 0.876863 | 0 | 0 | 0 | 0 | 0 | 0 | 213 | 0.167059 |
37641cfd7f9be6481d06402662c737c2251b2be7 | 3,971 | py | Python | fennlp/layers/albert_transformer.py | transformerzhou/NLP | d3a50c7df735e97aeba70d40d1988ec4adb8f0af | [
"MIT"
] | 1 | 2020-08-15T09:32:23.000Z | 2020-08-15T09:32:23.000Z | fennlp/layers/albert_transformer.py | walker-liu/fennlp | 7432595342b2f2139a788187d3b46fd2097bb10a | [
"MIT"
] | null | null | null | fennlp/layers/albert_transformer.py | walker-liu/fennlp | 7432595342b2f2139a788187d3b46fd2097bb10a | [
"MIT"
] | null | null | null | #! usr/bin/env python3
# -*- coding:utf-8 -*-
"""
@Author:zhoukaiyin
"""
import tensorflow as tf
from fennlp.tools import create_initializer
from .attention import ALBERTAttention
from fennlp.layers import dense
class AlbertTransformer(tf.keras.layers.Layer):
def __init__(self,
hidden_size=768,
num_attention_heads=1,
attention_head_size=64,
attention_probs_dropout_prob=0.0,
intermediate_size=3072,
intermediate_act_fn=None,
initializer_range=0.02,
hidden_dropout_prob=0.0,
use_einsum=True,
name=None,
**kwargs):
super(AlbertTransformer, self).__init__(name=name, **kwargs)
self.hidden_size = hidden_size
self.use_einsum = use_einsum
self.attention_head_size = attention_head_size
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.intermediate_act_fn = intermediate_act_fn
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
def build(self, input_shape):
self.input_spec = tf.keras.layers.InputSpec(shape=input_shape)
self.attention = ALBERTAttention(
num_attention_heads=self.num_attention_heads,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
initializer_range=self.initializer_range,
use_einsum=True,
name='self',
)
self.dense_layer_3d_proj = dense.DenseLayer3dProj(
self.hidden_size,
self.attention_head_size,
create_initializer(self.initializer_range),
None,
use_einsum=self.use_einsum,
name="dense"
)
self.dense_layer_2d = dense.DenseLayer2d(
self.intermediate_size,
create_initializer(self.initializer_range),
self.intermediate_act_fn,
use_einsum=self.use_einsum,
num_attention_heads=self.num_attention_heads,
name="dense"
)
self.out_dense_layer_2d = dense.DenseLayer2d(
self.hidden_size,
create_initializer(self.initializer_range),
None,
use_einsum=self.use_einsum,
num_attention_heads=self.num_attention_heads,
name="dense"
)
self.attdropout = tf.keras.layers.Dropout(self.hidden_dropout_prob)
self.ffdropout = tf.keras.layers.Dropout(self.hidden_dropout_prob)
self.attlayer_norm = tf.keras.layers.LayerNormalization(axis=-1, name="LayerNorm")
self.ffnlayer_norm = tf.keras.layers.LayerNormalization(axis=-1, name="LayerNorm")
self.built = True
def call(self, input_tensor, attention_mask=None, is_training=True):
with tf.keras.backend.name_scope("attention_1"):
attention_output = self.attention(input_tensor, input_tensor,
attention_mask, True)
with tf.keras.backend.name_scope("output"):
attention_output = self.dense_layer_3d_proj(attention_output)
attention_output = self.attdropout(attention_output, training=is_training)
attention_output = self.attlayer_norm(attention_output + input_tensor)
with tf.keras.backend.name_scope("ffn_1"):
with tf.keras.backend.name_scope("intermediate"):
intermediate_output = self.dense_layer_2d(attention_output)
with tf.keras.backend.name_scope("output"):
ffn_output = self.out_dense_layer_2d(intermediate_output)
ffn_output = self.ffdropout(ffn_output, training=is_training)
ffn_output = self.ffnlayer_norm(ffn_output + attention_output)
return ffn_output
| 41.364583 | 90 | 0.648703 | 3,756 | 0.945857 | 0 | 0 | 0 | 0 | 0 | 0 | 169 | 0.042559 |
376519ec24ba2ad35ffc9686805878b26230c5a8 | 897 | py | Python | ansiblemetrics/playbook/num_included_vars.py | radon-h2020/AnsibleMetrics | 8a8e27d9b54fc1578d00526c8663184a2e686cb2 | [
"Apache-2.0"
] | 1 | 2020-04-24T16:09:14.000Z | 2020-04-24T16:09:14.000Z | ansiblemetrics/playbook/num_included_vars.py | radon-h2020/AnsibleMetrics | 8a8e27d9b54fc1578d00526c8663184a2e686cb2 | [
"Apache-2.0"
] | null | null | null | ansiblemetrics/playbook/num_included_vars.py | radon-h2020/AnsibleMetrics | 8a8e27d9b54fc1578d00526c8663184a2e686cb2 | [
"Apache-2.0"
] | null | null | null | import ansiblemetrics.utils as utils
from ansiblemetrics.ansible_metric import AnsibleMetric
class NumIncludedVars(AnsibleMetric):
""" This class measures the number of included variables in a playbook.
"""
def count(self):
"""Return the number of included variables.
Example
-------
.. highlight:: python
.. code-block:: python
from ansiblemetrics.general.num_included_vars import NumIncludedVars
playbook = '''
- name: Include a play after another play
include_vars: myvars.yaml
'''
NumIncludedVars(playbook).count()
>> 1
Returns
-------
int
number of included variables
"""
script = self.playbook
keys = utils.all_keys(script)
return sum(1 for i in keys if i == 'include_vars')
| 24.243243 | 80 | 0.57971 | 801 | 0.892977 | 0 | 0 | 0 | 0 | 0 | 0 | 614 | 0.684504 |
376705b0b8ad10c2fc0878dcf3a019ac3ddc7559 | 1,721 | py | Python | predict.py | smacawi/tweet-classifier | 948f7c4123e37f07071482e528d411203166e5f7 | [
"MIT"
] | null | null | null | predict.py | smacawi/tweet-classifier | 948f7c4123e37f07071482e528d411203166e5f7 | [
"MIT"
] | 10 | 2020-01-24T23:03:28.000Z | 2021-04-26T12:01:09.000Z | predict.py | smacawi/tweet-classifier | 948f7c4123e37f07071482e528d411203166e5f7 | [
"MIT"
] | 1 | 2019-12-23T23:46:47.000Z | 2019-12-23T23:46:47.000Z | from allennlp.data.vocabulary import Vocabulary
from content_analyzer.models.rnn_classifier import RnnClassifier
from allennlp.data.tokenizers.word_tokenizer import WordTokenizer
from content_analyzer.data.dataset_readers.twitter import TwitterNLPDatasetReader
from allennlp.data.token_indexers import PretrainedBertIndexer
from allennlp.modules.token_embedders import PretrainedBertEmbedder
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.seq2vec_encoders import Seq2VecEncoder, PytorchSeq2VecWrapper
import torch
from allennlp.predictors import Predictor
from allennlp.predictors.text_classifier import TextClassifierPredictor
import overrides
from allennlp.common.util import JsonDict
indexer = PretrainedBertIndexer('bert-base-uncased')
wt = WordTokenizer()
tdr = TwitterNLPDatasetReader({"tokens": indexer}, wt)
GRU_args = {
"bidirectional": True,
"input_size": 768,
"hidden_size": 768,
"num_layers": 1,
}
print("vocab")
vocab = Vocabulary.from_files("out/flood_model/vocabulary")
print("embedder")
token_embedder = PretrainedBertEmbedder("bert-base-uncased")
text_embedder = BasicTextFieldEmbedder({"tokens": token_embedder}, allow_unmatched_keys = True)
print("encoder")
seq2vec = PytorchSeq2VecWrapper(torch.nn.GRU(batch_first=True, **GRU_args))
print("model")
model = RnnClassifier(vocab, text_embedder, seq2vec)
print("model state")
with open("out/flood_model/best.th", 'rb') as f:
state_dict = torch.load(f)
model.load_state_dict(state_dict)
predictor = TextClassifierPredictor(model, tdr)
prediction = predictor.predict("five people missing according to state police. if you have any information please contact us.")
print(prediction) | 40.97619 | 127 | 0.818129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 304 | 0.176641 |
3769d73589d328a0932e33ad97e6ca3459c3dafc | 4,270 | py | Python | mmur/viz/generators.py | RUrlus/ModelMetricUncertaintyResearch | 37daa1421a3a45a6adaea3788e2d00493477ff96 | [
"Apache-2.0"
] | null | null | null | mmur/viz/generators.py | RUrlus/ModelMetricUncertaintyResearch | 37daa1421a3a45a6adaea3788e2d00493477ff96 | [
"Apache-2.0"
] | null | null | null | mmur/viz/generators.py | RUrlus/ModelMetricUncertaintyResearch | 37daa1421a3a45a6adaea3788e2d00493477ff96 | [
"Apache-2.0"
] | null | null | null | import math
import numpy as np
from scipy.special import expit, logit
import matplotlib.pyplot as plt
from mmur.viz import _set_plot_style
COLORS = _set_plot_style()
def plot_logstic_dgp(N=500, figsize=None):
"""Plot example of DGP as used in mmur.generators.LogisticGenerator.
Parameters
----------
N : int
number of points to generate in plot
figsize : tuple, default=None
figure passed to plt.subplots, default size is (12, 7)
Returns
-------
fig : matplotlib.figure.Figure
ax : matplotlib.axes._subplots.AxesSubplot
"""
betas = np.array((0.5, 1.2))
X = np.ones((N, 2))
X[:, 1] = np.random.uniform(-10., 10.1, size=N)
L = X.dot(betas)
gt_proba = expit(L)
proba_noisy = expit(L + np.random.normal(0, 0.5, size=N))
y = np.random.binomial(1, proba_noisy)
figsize = figsize or (12, 7)
fig, ax = plt.subplots(figsize=figsize)
sidx = np.argsort(X[:, 1])
x = X[sidx, 1]
ax.plot(x, gt_proba[sidx], label='true P', lw=2)
ax.scatter(x, proba_noisy[sidx], c='grey', marker='x', label='noisy P')
ax.scatter(x, y[sidx], c=COLORS[2], marker='x', s=50, label='y')
ax.legend(fontsize=14)
ax.set_ylabel('probability', fontsize=14)
ax.set_xlabel('X', fontsize=14)
ax.set_title('Logistic data generating process', fontsize=16)
return fig, ax
def plot_probas(
probas, ground_truth, n_sets=None, alt_label=None, axs=None
):
"""Plot sorted probabilities compared to ground truth probability.
Parameters
---------
probas : np.ndarray[float]
the classifier probabilities of shape (holdout_samples, n_sets)
ground_truth : np.ndarray[float]
ground truth probabilities, 1d array
n_sets : int, float, default=None
number of columns in proba to plot. If int it is interpreted as the
number of columns. If a float as a fraction of the columns. Default
is max(0.1 * probas.shape[1], 30)
alt_label : str, default=None
label for the source of probabilities, default is 'holdout'
axs : np.ndarray[matplotlib.axes._subplots.AxesSubplot], default=None
an array containing the axes to plot on, must be 1d and of length >= 2
Returns
-------
fig : matplotlib.figure.Figure, optional
the figure is returned when ``axs`` is None
axs : matplotlib.axes._subplots.AxesSubplot
the created or passed axes object
"""
if probas.ndim == 1:
probas = probas[:, None]
alt_label = alt_label or 'holdout'
if axs is None:
fig, axs = plt.subplots(figsize=(14, 7), nrows=1, ncols=2)
else:
fig = None
n_cols = probas.shape[1]
if isinstance(n_sets, int):
n_sets = max(n_cols, n_sets)
elif isinstance(n_sets, float):
n_sets = max(math.floor(n_sets * n_cols), n_cols)
else:
n_sets = max(math.floor(0.1 * probas.shape[1]), min(30, n_cols))
sorted_gt = np.sort(ground_truth)
xvals = logit(sorted_gt)
for i in range(n_sets - 1):
sarr = np.sort(probas[:, i])
axs[0].plot(xvals, sarr, c='grey', alpha=0.5)
axs[1].plot(sorted_gt, sarr, c='grey', alpha=0.5)
# plot outside loop for easier labelling
sarr = np.sort(probas[:, -1])
axs[0].plot(xvals, sarr, c='grey', alpha=0.5, label=alt_label)
axs[1].plot(sorted_gt, sarr, c='grey', alpha=0.5, label=alt_label)
# plot DGP
axs[0].plot(
xvals,
sorted_gt,
c='red',
ls='--',
lw=2,
zorder=10,
label='DGP',
)
axs[0].set_title('Probabilities', fontsize=18)
axs[0].set_ylabel('proba', fontsize=18)
axs[0].set_xlabel('DGP linear estimate', fontsize=18)
axs[0].tick_params(labelsize=16)
axs[0].legend(fontsize=18)
# plot DGP
axs[1].plot(
ground_truth,
ground_truth,
c='red',
ls='--',
lw=2,
zorder=10,
label='DGP'
)
axs[1].set_title('Q-Q ', fontsize=18)
axs[1].set_ylabel('proba -- ground truth', fontsize=18)
axs[1].set_xlabel('proba -- draws', fontsize=18)
axs[1].tick_params(labelsize=16)
axs[1].legend(fontsize=18)
if fig is not None:
fig.tight_layout()
return fig, axs
return axs
| 29.86014 | 78 | 0.616159 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,649 | 0.386183 |
376b0aa30b86ce7e9604bcd1dba410e16ebe3476 | 3,456 | py | Python | src/robust_deid/sequence_tagging/models/hf/crf/crf_bert_model_for_token_classification.py | obi-ml-public/ehr_deidentification | c9deaf30b8317689d28a4267d15ec13baa9791cd | [
"MIT"
] | null | null | null | src/robust_deid/sequence_tagging/models/hf/crf/crf_bert_model_for_token_classification.py | obi-ml-public/ehr_deidentification | c9deaf30b8317689d28a4267d15ec13baa9791cd | [
"MIT"
] | null | null | null | src/robust_deid/sequence_tagging/models/hf/crf/crf_bert_model_for_token_classification.py | obi-ml-public/ehr_deidentification | c9deaf30b8317689d28a4267d15ec13baa9791cd | [
"MIT"
] | null | null | null | from transformers import (
BertConfig,
BertForTokenClassification,
)
from .conditional_random_field_sub import ConditionalRandomFieldSub
from .crf_token_classifier_output import CRFTokenClassifierOutput
class CRFBertModelForTokenClassification(BertForTokenClassification):
def __init__(
self,
config: BertConfig,
crf_constraints
):
super().__init__(config)
self.crf = ConditionalRandomFieldSub(num_labels=config.num_labels, constraints=crf_constraints)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Or we use self.base_model - might work with auto model class
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
batch_size = logits.shape[0]
sequence_length = logits.shape[1]
loss = None
if labels is not None:
# Negative of the log likelihood.
# Loop through the batch here because of 2 reasons:
# 1- the CRF package assumes the mask tensor cannot have interleaved
# zeros and ones. In other words, the mask should start with True
# values, transition to False at some moment and never transition
# back to True. That can only happen for simple padded sequences.
# 2- The first column of mask tensor should be all True, and we
# cannot guarantee that because we have to mask all non-first
# subtokens of the WordPiece tokenization.
loss = 0
for seq_logits, seq_labels in zip(logits, labels):
# Index logits and labels using prediction mask to pass only the
# first subtoken of each word to CRF.
seq_mask = seq_labels != -100
seq_logits_crf = seq_logits[seq_mask].unsqueeze(0)
seq_labels_crf = seq_labels[seq_mask].unsqueeze(0)
loss -= self.crf(inputs=seq_logits_crf, tags=seq_labels_crf)
loss /= batch_size
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return CRFTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| 40.186047 | 115 | 0.620949 | 3,241 | 0.937789 | 0 | 0 | 0 | 0 | 0 | 0 | 926 | 0.26794 |
376dfd5a387065c547c2e044eaa01555cf75fa7a | 2,847 | py | Python | Python_ch8/Ch8_1_String_Op.py | ninhnguyen01/Python_Book | e5e372f1895b06e908cd0dd07dc68a260c34d7ad | [
"Apache-2.0"
] | null | null | null | Python_ch8/Ch8_1_String_Op.py | ninhnguyen01/Python_Book | e5e372f1895b06e908cd0dd07dc68a260c34d7ad | [
"Apache-2.0"
] | null | null | null | Python_ch8/Ch8_1_String_Op.py | ninhnguyen01/Python_Book | e5e372f1895b06e908cd0dd07dc68a260c34d7ad | [
"Apache-2.0"
] | null | null | null | # Basic String Operations (Title)
# Reading
# Iterating over a String with the 'for' Loop (section)
# General Format:
# for variable in string:
# statement
# statement
# etc.
name = 'Juliet'
for ch in name:
print(ch)
# This program counts the number of times the letter T
# (uppercase or lowercase) appears in a string.
# (with 'for' loop)
def main():
count = 0
my_string = input('Enter a sentence: ')
for ch in my_string:
if ch == 'T' or ch == 't':
count += 1
print(f'The letter T appears {count} times.')
if __name__ == '__main__':
main()
# Indexing (section)
my_string = 'Roses are red'
ch = my_string[6]
my_string = 'Roses are red'
print(my_string[0], my_string[6], my_string[10])
# negative numbers
my_string = 'Roses are red'
print(my_string[-1], my_string[-2], my_string[-13])
# IndexError Exceptions (section)
# Occur if index out of range for a particular string
city = 'Boston'
print(city[6])
city = 'Boston'
index = 0
while index < 7:
print(city[index])
index += 1
# The 'len' Function (section)
# useful to prevent loops from iterating beyond the end
# of a string.
city = 'Boston'
size = len(city)
print(size)
city = 'Boston'
index = 0
while index < len(city):
print(city[index])
index += 1
# String Concatenation (section)
name = 'Kelly'
name += ' '
name += 'Yvonne'
name += ' '
name += 'Smith'
print(name)
# Strings are immutable (section)
# This program concatenates strings.
def main():
name = 'Carmen'
print(f'The name is: {name}')
name = name + ' Brown'
print(f'Now the name is: {name}')
if __name__ == '__main__':
main()
# no string[index] on left side of an assignment operator
# Error below
friend = 'Bill'
friend[0] = 'J'
# End
# Checkpoint
# 8.1 Assume the variable 'name' references a string. Write a
# 'for' loop that prints each character in the string.
name = 'name'
for letter in name:
print(letter)
# 8.2 What is the index of the first character in a string?
# A. 0
# 8.3 If a string has 10 characters, what is the index of the
# last character?
# A. 9
# 8.4 What happeneds if you try to use an invalid index to
# access a character in a string?
# A. An IndexError exception will occur if you try to use an
# index that is out of range for a particular string.
# 8.5 How do you find the length of a string?
# A. Use the built-in len function.
# 8.6 What is wrong with the following code?
animal = 'Tiger'
animal [0] = 'L'
# A. The second statement attempts to assign a value to an
# individual character in the string. Strings are immutable,
# however, so the expression animal [0] cannot appear on the
# left side of an assignment operator.
# End | 22.776 | 63 | 0.636459 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,870 | 0.656832 |
376dfe0e1d26b493cacf40c7eb5f653447f4e5c8 | 204 | py | Python | moto/elb/__init__.py | argos83/moto | d3df810065c9c453d40fcc971f9be6b7b2846061 | [
"Apache-2.0"
] | 1 | 2021-03-06T22:01:41.000Z | 2021-03-06T22:01:41.000Z | moto/elb/__init__.py | marciogh/moto | d3df810065c9c453d40fcc971f9be6b7b2846061 | [
"Apache-2.0"
] | null | null | null | moto/elb/__init__.py | marciogh/moto | d3df810065c9c453d40fcc971f9be6b7b2846061 | [
"Apache-2.0"
] | 1 | 2017-10-19T00:53:28.000Z | 2017-10-19T00:53:28.000Z | from __future__ import unicode_literals
from .models import elb_backends
from ..core.models import MockAWS, base_decorator
elb_backend = elb_backends['us-east-1']
mock_elb = base_decorator(elb_backends)
| 29.142857 | 49 | 0.828431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.053922 |
3770da89f4542e5492348ec764138aaa4353f223 | 575 | py | Python | contenttype/tree/TestStandardTree_BlackBox.py | ytyaru/GitHub.Uploader.ContentType.201705020847__old | d20574ea8ed62672c1a89e9feef24da7f720f2de | [
"CC0-1.0"
] | null | null | null | contenttype/tree/TestStandardTree_BlackBox.py | ytyaru/GitHub.Uploader.ContentType.201705020847__old | d20574ea8ed62672c1a89e9feef24da7f720f2de | [
"CC0-1.0"
] | null | null | null | contenttype/tree/TestStandardTree_BlackBox.py | ytyaru/GitHub.Uploader.ContentType.201705020847__old | d20574ea8ed62672c1a89e9feef24da7f720f2de | [
"CC0-1.0"
] | null | null | null | import unittest
from SubTypeTree import SubTypeTreeFactory
from SubTypeTree import VenderTreeFactory
from SubTypeTree import SubTypeTree
from SubTypeTree import VenderTree
from SubTypeTree import GitHubVenderTree
from SubTypeTree import StandardTree
from SubTypeTree import ParsonalTree
from SubTypeTree import UnregisteredTree
class TestStandardTree_BlackBox(unittest.TestCase):
def test_Values(self):
tree_list = ['html']
tree = StandardTree(tree_list)
self.assertEqual(None, tree.GetFacet())
self.assertEqual(tree_list, tree.TreeList)
| 33.823529 | 51 | 0.805217 | 245 | 0.426087 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.010435 |
37726e7da8b2a5ad4ecc6708511432978e5980e1 | 5,331 | py | Python | ade25/widgets/widgets/image/image.py | ade25/ade25.widgets | 272cf1c74a3b97f4e25161c50f178ebe3c1a70d1 | [
"MIT"
] | null | null | null | ade25/widgets/widgets/image/image.py | ade25/ade25.widgets | 272cf1c74a3b97f4e25161c50f178ebe3c1a70d1 | [
"MIT"
] | null | null | null | ade25/widgets/widgets/image/image.py | ade25/ade25.widgets | 272cf1c74a3b97f4e25161c50f178ebe3c1a70d1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Module providing base widget"""
import uuid
import uuid as uuid_tool
from Acquisition import aq_inner
from Products.Five import BrowserView
from ade25.widgets.interfaces import IContentWidgets
from plone import api
from plone.api.exc import MissingParameterError
class WidgetImageInline(BrowserView):
""" Base widget used as placeholder """
def __call__(self,
widget_name='image-inline',
widget_type='base',
widget_mode='view',
widget_data=None,
**kw):
self.params = {
'widget_name': widget_name,
'widget_type': widget_type,
'widget_mode': widget_mode,
'widget_data': widget_data
}
return self.render()
def render(self):
return self.index()
@property
def edit_mode(self):
if self.params['widget_mode'] == 'edit':
return True
return False
@property
def record(self):
return self.params['widget_data']
def has_content(self):
if self.widget_text_block():
return True
return False
def widget_uid(self):
try:
widget_id = self.record['id']
except (KeyError, TypeError):
widget_id = str(uuid_tool.uuid4())
return widget_id
def widget_text_block(self):
try:
content = self.record['data']['content']['text_column_0']
except (KeyError, TypeError):
content = None
return content
class WidgetImageCover(BrowserView):
""" Base widget used as placeholder """
def __call__(self,
widget_name='image-cover',
widget_type='base',
widget_mode='view',
widget_data=None,
**kw):
self.params = {
'widget_name': widget_name,
'widget_type': widget_type,
'widget_mode': widget_mode,
'widget_data': widget_data
}
return self.render()
def render(self):
return self.index()
@property
def edit_mode(self):
if self.params['widget_mode'] == 'edit':
return True
return False
@property
def record(self):
return self.params['widget_data']
def has_content(self):
if self.widget_image_cover():
return True
return False
def widget_uid(self):
try:
widget_id = self.record['id']
except (KeyError, TypeError):
widget_id = str(uuid_tool.uuid4())
return widget_id
def has_lead_image(self):
context = aq_inner(self.context)
try:
lead_img = context.image
except AttributeError:
lead_img = None
if lead_img is not None:
return True
return False
@staticmethod
def has_stored_image(image_object):
context = image_object
try:
lead_img = context.image
except AttributeError:
lead_img = None
if lead_img is not None:
return True
return False
def image_scale(self):
registry_record = api.portal.get_registry_record(
'ade25.widgets.image_cover_scale'
)
widget_content = self.widget_stored_data()
image_scale = widget_content.get('image_scale', registry_record)
return image_scale
@staticmethod
def _compute_aspect_ratio(scale_name):
if scale_name.startswith('ratio'):
return scale_name.split('-')[1].replace(':', '/')
return '1'
def image_tag(self, image_uid):
image = api.content.get(UID=image_uid)
if self.has_stored_image(image):
figure = image.restrictedTraverse('@@figure')(
image_field_name='image',
caption_field_name='image_caption',
scale=self.image_scale(),
aspect_ratio=self._compute_aspect_ratio(self.image_scale()),
lqip=True,
lazy_load=True
)
return figure
return None
def widget_image_cover(self):
context = aq_inner(self.context)
storage = IContentWidgets(context)
content = storage.read_widget(self.widget_uid())
return content
def widget_stored_data(self):
context = aq_inner(self.context)
try:
storage = IContentWidgets(context)
content = storage.read_widget(self.widget_uid())
except TypeError:
content = dict()
return content
def widget_content(self):
widget_content = self.widget_stored_data()
image_uid = widget_content['image']
if 'image_related' in widget_content:
related_image_record = widget_content.get('image_related')
if related_image_record:
try:
related_uid = uuid.UUID(str(related_image_record))
image_uid = related_uid
except ValueError:
# TODO: Catch edge cases here if necessary
pass
data = {
'image': self.image_tag(image_uid),
'public': widget_content['is_public']
}
return data
| 28.66129 | 76 | 0.572688 | 5,034 | 0.944288 | 0 | 0 | 853 | 0.160008 | 0 | 0 | 594 | 0.111424 |
3773ee703ede0d70a929a64c5252f89281c909dd | 5,718 | py | Python | tests/test_tie_point_grid.py | Vasudha-AiDash/arosics | a5c32fab4b834938f646dc84979021e4969fdd86 | [
"Apache-2.0"
] | null | null | null | tests/test_tie_point_grid.py | Vasudha-AiDash/arosics | a5c32fab4b834938f646dc84979021e4969fdd86 | [
"Apache-2.0"
] | null | null | null | tests/test_tie_point_grid.py | Vasudha-AiDash/arosics | a5c32fab4b834938f646dc84979021e4969fdd86 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# AROSICS - Automated and Robust Open-Source Image Co-Registration Software
#
# Copyright (C) 2017-2021
# - Daniel Scheffler (GFZ Potsdam, daniel.scheffler@gfz-potsdam.de)
# - Helmholtz Centre Potsdam - GFZ German Research Centre for Geosciences Potsdam,
# Germany (https://www.gfz-potsdam.de/)
#
# This software was developed within the context of the GeoMultiSens project funded
# by the German Federal Ministry of Education and Research
# (project grant code: 01 IS 14 010 A-C).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the module arosics.Tie_Point_Grid."""
import unittest
import tempfile
import os
from pkgutil import find_loader
import shutil
import warnings
# custom
from .cases import test_cases
from arosics import COREG_LOCAL, Tie_Point_Grid
class Test_Tie_Point_Grid(unittest.TestCase):
@classmethod
def setUp(cls):
CRL = COREG_LOCAL(test_cases['INTER1']['ref_path'], test_cases['INTER1']['tgt_path'],
**test_cases['INTER1']['kwargs_local'])
cls.TPG = Tie_Point_Grid(CRL.COREG_obj, CRL.grid_res,
max_points=100, # limit to 100 to reduce computational load
outFillVal=CRL.outFillVal,
resamp_alg_calc=CRL.rspAlg_calc,
tieP_filter_level=CRL.tieP_filter_level,
outlDetect_settings=dict(
min_reliability=CRL.min_reliability,
rs_max_outlier=CRL.rs_max_outlier,
rs_tolerance=CRL.rs_tolerance),
dir_out=CRL.projectDir,
CPUs=CRL.CPUs,
progress=CRL.progress,
v=CRL.v,
q=CRL.q)
def tearDown(self):
if os.path.isdir(self.TPG.dir_out):
shutil.rmtree(self.TPG.dir_out)
def test_mean_shifts(self):
self.assertIsInstance(self.TPG.mean_x_shift_px, float)
self.assertIsInstance(self.TPG.mean_y_shift_px, float)
self.assertIsInstance(self.TPG.mean_x_shift_map, float)
self.assertIsInstance(self.TPG.mean_y_shift_map, float)
def test_get_CoRegPoints_table(self):
self.TPG.get_CoRegPoints_table()
def test_calc_rmse(self):
self.TPG.calc_rmse(include_outliers=False)
self.TPG.calc_rmse(include_outliers=True)
def test_calc_overall_ssim(self):
self.TPG.calc_overall_ssim(include_outliers=False, after_correction=True)
self.TPG.calc_overall_ssim(include_outliers=True, after_correction=False)
def test_calc_overall_stats(self):
stats_noOL = self.TPG.calc_overall_stats(include_outliers=False)
stats_OL = self.TPG.calc_overall_stats(include_outliers=True)
self.assertTrue(stats_noOL)
self.assertTrue(stats_OL)
self.assertIsInstance(stats_noOL, dict)
self.assertIsInstance(stats_OL, dict)
self.assertNotEqual(stats_noOL, stats_OL)
def test_plot_shift_distribution(self):
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', category=UserWarning, message='Matplotlib is currently using agg, '
'which is a non-GUI backend, so cannot show the figure.')
self.TPG.plot_shift_distribution()
def test_dump_CoRegPoints_table(self):
with tempfile.TemporaryDirectory() as tmpdir:
outpath = os.path.join(tmpdir, 'CoRegPoints_table.pkl')
self.TPG.dump_CoRegPoints_table(outpath)
self.assertTrue(os.path.isfile(outpath))
def test_to_GCPList(self):
self.TPG.to_GCPList()
def test_to_PointShapefile(self):
with tempfile.TemporaryDirectory() as tmpdir:
outpath = os.path.join(tmpdir, 'test_out_shapefile.shp')
self.TPG.to_PointShapefile(outpath)
self.assertTrue(os.path.isfile(outpath))
with tempfile.TemporaryDirectory() as tmpdir:
outpath = os.path.join(tmpdir, 'test_out_shapefile_incl_nodata.shp')
self.TPG.to_PointShapefile(outpath, skip_nodata=False)
self.assertTrue(os.path.isfile(outpath))
def test_to_vectorfield(self):
with tempfile.TemporaryDirectory() as tmpdir:
outpath = os.path.join(tmpdir, 'test_vectorfield.bsq')
self.TPG.to_vectorfield(outpath, fmt='ENVI', mode='md')
self.assertTrue(os.path.isfile(outpath))
self.TPG.to_vectorfield(outpath, fmt='ENVI', mode='uv')
self.assertTrue(os.path.isfile(outpath))
def test_to_Raster_using_Kriging(self):
if find_loader('pykrige.ok'):
with tempfile.TemporaryDirectory() as tmpdir:
outpath = os.path.join(tmpdir, 'X_SHIFT_M__interpolated.bsq')
self.TPG.to_Raster_using_Kriging(attrName='X_SHIFT_M', fName_out=outpath)
self.assertTrue(os.path.isfile(outpath))
if __name__ == '__main__':
import pytest
pytest.main()
| 41.136691 | 113 | 0.650927 | 4,330 | 0.757258 | 0 | 0 | 1,067 | 0.186604 | 0 | 0 | 1,497 | 0.261805 |
3774af41dcc95d857b67d1577a491813ead4a946 | 4,664 | py | Python | tests/system/action/meeting/test_delete_all_speakers_of_all_lists.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | null | null | null | tests/system/action/meeting/test_delete_all_speakers_of_all_lists.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | 19 | 2021-11-22T16:25:54.000Z | 2021-11-25T13:38:13.000Z | tests/system/action/meeting/test_delete_all_speakers_of_all_lists.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | null | null | null | from openslides_backend.permissions.permissions import Permissions
from tests.system.action.base import BaseActionTestCase
class MeetingDeleteAllSpeakersOfAllListsActionTest(BaseActionTestCase):
def setUp(self) -> None:
super().setUp()
self.permission_test_model = {
"list_of_speakers/11": {"meeting_id": 1, "speaker_ids": [1]},
"speaker/1": {"list_of_speakers_id": 11, "meeting_id": 1},
"meeting/1": {
"name": "name_srtgb123",
"list_of_speakers_ids": [11],
"speaker_ids": [1],
"is_active_in_organization_id": 1,
},
}
def test_no_los(self) -> None:
self.create_model(
"meeting/110",
{
"name": "name_srtgb123",
"list_of_speakers_ids": [],
"is_active_in_organization_id": 1,
},
)
response = self.request("meeting.delete_all_speakers_of_all_lists", {"id": 110})
self.assert_status_code(response, 200)
def test_one_los_empty(self) -> None:
self.set_models(
{
"list_of_speakers/11": {"meeting_id": 110, "speaker_ids": []},
"meeting/110": {
"name": "name_srtgb123",
"list_of_speakers_ids": [11],
"is_active_in_organization_id": 1,
},
}
)
response = self.request("meeting.delete_all_speakers_of_all_lists", {"id": 110})
self.assert_status_code(response, 200)
def test_1_los_1_speaker(self) -> None:
self.set_models(
{
"list_of_speakers/11": {"meeting_id": 110, "speaker_ids": [1]},
"speaker/1": {"list_of_speakers_id": 11, "meeting_id": 110},
"meeting/110": {
"name": "name_srtgb123",
"list_of_speakers_ids": [11],
"speaker_ids": [1],
"is_active_in_organization_id": 1,
},
}
)
response = self.request("meeting.delete_all_speakers_of_all_lists", {"id": 110})
self.assert_status_code(response, 200)
self.assert_model_deleted("speaker/1")
def test_1_los_2_speakers(self) -> None:
self.set_models(
{
"list_of_speakers/11": {"meeting_id": 110, "speaker_ids": [1, 2]},
"speaker/1": {"list_of_speakers_id": 11, "meeting_id": 110},
"speaker/2": {"list_of_speakers_id": 11, "meeting_id": 110},
"meeting/110": {
"name": "name_srtgb123",
"list_of_speakers_ids": [11],
"speaker_ids": [1, 2],
"is_active_in_organization_id": 1,
},
}
)
response = self.request("meeting.delete_all_speakers_of_all_lists", {"id": 110})
self.assert_status_code(response, 200)
self.assert_model_deleted("speaker/1")
self.assert_model_deleted("speaker/2")
def test_3_los(self) -> None:
self.set_models(
{
"list_of_speakers/11": {"meeting_id": 110, "speaker_ids": [1, 2]},
"speaker/1": {"list_of_speakers_id": 11, "meeting_id": 110},
"speaker/2": {"list_of_speakers_id": 11, "meeting_id": 110},
"list_of_speakers/12": {"meeting_id": 110, "speaker_ids": []},
"list_of_speakers/13": {"meeting_id": 110, "speaker_ids": [3]},
"speaker/3": {"list_of_speakers_id": 13, "meeting_id": 110},
"meeting/110": {
"name": "name_srtgb123",
"list_of_speakers_ids": [11, 12, 13],
"speaker_ids": [1, 2, 3],
"is_active_in_organization_id": 1,
},
}
)
response = self.request("meeting.delete_all_speakers_of_all_lists", {"id": 110})
self.assert_status_code(response, 200)
self.assert_model_deleted("speaker/1")
self.assert_model_deleted("speaker/2")
self.assert_model_deleted("speaker/3")
def test_no_permissions(self) -> None:
self.base_permission_test(
self.permission_test_model,
"meeting.delete_all_speakers_of_all_lists",
{"id": 1},
)
def test_permissions(self) -> None:
self.base_permission_test(
self.permission_test_model,
"meeting.delete_all_speakers_of_all_lists",
{"id": 1},
Permissions.ListOfSpeakers.CAN_MANAGE,
)
| 39.193277 | 88 | 0.534091 | 4,538 | 0.972985 | 0 | 0 | 0 | 0 | 0 | 0 | 1,584 | 0.339623 |
3774c0f2587f5eb80eeb06a2ac9483214cae39df | 589 | py | Python | jpyextra/__init__.py | metal3d/jupyter-extra | ee7bd80bea397e516ff2cf44f177fb696dbfd3f1 | [
"MIT"
] | null | null | null | jpyextra/__init__.py | metal3d/jupyter-extra | ee7bd80bea397e516ff2cf44f177fb696dbfd3f1 | [
"MIT"
] | null | null | null | jpyextra/__init__.py | metal3d/jupyter-extra | ee7bd80bea397e516ff2cf44f177fb696dbfd3f1 | [
"MIT"
] | null | null | null | import subprocess
from IPython.display import HTML, display
name = "jpyextra"
def datadoc(data):
""" Display sklearn dataset DESCR in well formed Markdown
You need "pandoc" tool. Install it with conda, or with your
package manager.
"""
doc = data.DESCR.replace('%PLU', '')
pdc = subprocess.Popen([
'pandoc',
'-t',
'html',
'-f',
'rst',
'--eol', 'lf'
], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None)
md, _ = pdc.communicate(bytes(doc, 'utf-8'))
md = md.decode()
display(HTML(data=md))
| 23.56 | 66 | 0.589134 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 214 | 0.363328 |
3777252434bf4837cbda3b2dcda94275b7648d0e | 654 | py | Python | backend/core/introduction_to_algorithms/logics/catalog.py | kehuo/myweb | 3c03cfb0e2380e5dc4e627e3d7decf9e07f7572f | [
"MIT"
] | null | null | null | backend/core/introduction_to_algorithms/logics/catalog.py | kehuo/myweb | 3c03cfb0e2380e5dc4e627e3d7decf9e07f7572f | [
"MIT"
] | null | null | null | backend/core/introduction_to_algorithms/logics/catalog.py | kehuo/myweb | 3c03cfb0e2380e5dc4e627e3d7decf9e07f7572f | [
"MIT"
] | null | null | null | # @File: catalog.py
# @Author: Kevin Huo
# @LastUpdate: 4/10/2020 10:13 AM
import json
from common.utils.http import load_json_file
from core.introduction_to_algorithms.logics.replace_n_to_br import replace_n_to_br_func
# 因为python manage.py runserver 是从backend更路径运行的, 所以 DATA_PATH 要以根路径为准
# BASE_DATA_PATH = "./data/introduction_to_algorithms/part1/chapter2/section1.json"
def get_catalog_json(args):
"""
data/introduction_to_algorithms/catalog.json
"""
DATA_PATH = "./data/introduction_to_algorithms/catalog.json"
page_data = load_json_file(DATA_PATH)
res = {"code": "SUCCESS",
"data": page_data}
return res
| 27.25 | 87 | 0.747706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 390 | 0.563584 |
377a0fd80ade074c00876ad92128f22a2396d2c3 | 2,772 | py | Python | editregions/tests/contrib/embeds/forms.py | kezabelle/django-editregions | 961ddeffb37d30d40fb4e3e9224bc3f956b7a5b5 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2015-01-11T18:21:27.000Z | 2015-01-11T18:21:27.000Z | editregions/tests/contrib/embeds/forms.py | kezabelle/django-editregions | 961ddeffb37d30d40fb4e3e9224bc3f956b7a5b5 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | editregions/tests/contrib/embeds/forms.py | kezabelle/django-editregions | 961ddeffb37d30d40fb4e3e9224bc3f956b7a5b5 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | # -*- coding: utf-8 -*-
from django.forms.models import modelform_factory
try:
from unittest.case import TestCase, expectedFailure
except ImportError:
from django.utils.unittest.case import TestCase, expectedFailure
from django_ace import AceWidget
from editregions.contrib.embeds.forms import JavaScriptEditorForm, StylesheetAssetForm, JavascriptAssetForm
from editregions.contrib.embeds.models import JavaScript, JavascriptAsset, StylesheetAsset
class JavaScriptEditorFormTestCase(TestCase):
def test_init(self):
form = modelform_factory(model=JavaScript, form=JavaScriptEditorForm,
fields=['content'])()
self.assertIsInstance(form.fields['content'].widget, AceWidget)
self.assertEqual(form.fields['content'].widget.mode, 'javascript')
self.assertEqual(form.fields['content'].widget.theme, 'chrome')
class StylesheetAssetFormTestCase(TestCase):
def test_init(self):
form = StylesheetAssetForm()
self.assertEqual(form.only_patterns, ('editregions/embeds/*.css',))
self.assertEqual(form.fields['local'].choices, [('', '---------')])
def test_found_patterns(self):
form = StylesheetAssetForm(only_patterns=('test-*.css',))
self.assertEqual(form.only_patterns, ('test-*.css',))
expected = sorted(list(form.fields['local'].choices))
self.assertEqual(expected, [
('test-1.css', 'test-1.css'), ('test-2.css', 'test-2.css')
])
@expectedFailure
def test_skipping_fields(self):
"""I dunno why this still has local in it afterwards. hmmms."""
form = modelform_factory(model=StylesheetAsset,
form=StylesheetAssetForm, fields=[],
exclude=['local'])()
self.assertNotIn('local', form.fields)
class JavascriptAssetFormTestCase(TestCase):
def test_init(self):
form = JavascriptAssetForm()
self.assertEqual(form.only_patterns, ('editregions/embeds/*.js',))
self.assertEqual(form.fields['local'].choices, [('', '---------')])
def test_found_patterns(self):
form = JavascriptAssetForm(only_patterns=('test-*.js',))
self.assertEqual(form.only_patterns, ('test-*.js',))
expected = sorted(list(form.fields['local'].choices))
self.assertEqual(expected, [
('test-1.js', 'test-1.js')
])
@expectedFailure
def test_skipping_fields(self):
"""I dunno why this still has local in it afterwards. hmmms."""
form = modelform_factory(model=JavascriptAsset,
form=JavascriptAssetForm, fields=[],
exclude=['local'])()
self.assertNotIn('local', form.fields)
| 42.646154 | 107 | 0.643218 | 2,307 | 0.832251 | 0 | 0 | 702 | 0.253247 | 0 | 0 | 454 | 0.163781 |
377a70f876fe9d1cf59a255b216f8b6eac6c5378 | 1,810 | py | Python | overplot.py | EmlynG/LowFMode | 7a575433f633e709ce67a6bb1bf12a22eff0d0c9 | [
"MIT"
] | null | null | null | overplot.py | EmlynG/LowFMode | 7a575433f633e709ce67a6bb1bf12a22eff0d0c9 | [
"MIT"
] | null | null | null | overplot.py | EmlynG/LowFMode | 7a575433f633e709ce67a6bb1bf12a22eff0d0c9 | [
"MIT"
] | null | null | null | from __future__ import print_function
import sys
import os
import re
import numpy as np
import subprocess
from matplotlib import pyplot as plt
inputpath = os.path.join(os.path.realpath('..'),'INPUT/')
print("Initialising")
fig, ax = plt.subplots()
n=0
for filenum in ['INPUT/0.txt','INPUT/1.txt','INPUT/2.txt']:
os.rename(filenum, 'INPUT/equilibrium.map')
subprocess.call(["csphoenix"])
os.rename('INPUT/equilibrium.map', filenum)
n_variable = 8
n_multiplier = n_variable * 8
omegafile = 'OUTPUT/omega_csp'
omega_min = -2.0
omega_max = 2.0
gamma_min = -0.1
gamma_max = 0.1
with open(omegafile, 'r') as f:
line = f.readline()
[m, nr] = map(int, line.split())
print('M = ', m)
print('NR = ', nr)
n_output = m * n_multiplier * nr
r = np.zeros(n_output)
q = np.zeros(n_output)
gamma = np.zeros(n_output)
omega = np.zeros(n_output)
i = 0
for line in f:
[rf, qf, omegaf, gammaf] = map(float, line.split())
#print(rf, qf, gammaf, omegaf)
r[i] = rf
q[i] = qf
gamma[i] = gammaf
omega[i] = omegaf
i = i + 1
f.close()
plt.scatter(r, omega, s=0.5, marker='x', label='flow='+str(n))
n=n+1
inner = 0.0
outer = 1.0
## NAME THE OUTPUT FILES
plt.xlim([np.min(r),np.max(r)])
plt.xlabel('s')
plt.ylim([omega_min,omega_max])
plt.ylabel('$\omega / \omega_{A0}$')
ax.legend()
plt.title('Continuous Spectrum Frequency')
plt.figure()
plt.show()
#inner = 0.0
#outer = 1.0
## NAME THE OUTPUT FILES
#plt.xlim([np.min(r),np.max(r)])
#plt.xlabel('s')
#plt.ylim([omega_min,omega_max])
#plt.ylabel('$\omega / \omega_{A0}$')
#ax.legend()
#plt.title('Continuous Spectrum Frequency')
#plt.savefig("/SecondDisk/PHOENIX_RUNS/NSTX/OVERPLOTnumeric012.png")
#print("Frequency continuum plot done")
| 25.492958 | 68 | 0.632044 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 619 | 0.341989 |
377aac633d51d5823372233e1db44733b25a6883 | 4,052 | py | Python | build_tflite.py | fazil47/UnityCharRecog | c6470a9e927c1a242d52e642ebcb35b2773d0dea | [
"MIT"
] | null | null | null | build_tflite.py | fazil47/UnityCharRecog | c6470a9e927c1a242d52e642ebcb35b2773d0dea | [
"MIT"
] | null | null | null | build_tflite.py | fazil47/UnityCharRecog | c6470a9e927c1a242d52e642ebcb35b2773d0dea | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import os
import platform
import shlex
import subprocess
PLUGIN_PATH=f'{os.getcwd()}/Assets/TensorFlowLite/Plugins'
TENSORFLOW_PATH=''
def run_cmd(cmd):
args = shlex.split(cmd)
subprocess.call(args, cwd=TENSORFLOW_PATH)
def copy(from_tf, to_unity):
subprocess.call(['cp', '-vf', f'{TENSORFLOW_PATH}/{from_tf}', f'{PLUGIN_PATH}/{to_unity}'])
def unzip(from_tf, to_unity):
subprocess.call(['unzip', '-o', f'{TENSORFLOW_PATH}/{from_tf}', '-d' f'{PLUGIN_PATH}/{to_unity}'])
def build_mac():
run_cmd('bazel build -c opt --cxxopt=--std=c++11 tensorflow/lite/c:tensorflowlite_c')
copy('bazel-bin/tensorflow/lite/c/libtensorflowlite_c.dylib', 'macOS/libtensorflowlite_c.dylib')
run_cmd('bazel build -c opt --copt -Os --copt -DTFLITE_GPU_BINARY_RELEASE --copt -fvisibility=hidden --linkopt -s --strip always --cxxopt=-std=c++14 --apple_platform_type=macos //tensorflow/lite/delegates/gpu:tensorflow_lite_gpu_dylib')
copy('bazel-bin/tensorflow/lite/delegates/gpu/tensorflow_lite_gpu_dylib.dylib', 'macOS/libtensorflowlite_metal_delegate.dylib')
def build_windows():
run_cmd('bazel build -c opt --cxxopt=--std=c++11 tensorflow/lite/c:tensorflowlite_c')
copy('bazel-bin/tensorflow/lite/c/tensorflowlite_c.dll', 'Windows/libtensorflowlite_c.dll')
def build_ios():
run_cmd('bazel build --config=ios_fat -c opt //tensorflow/lite/experimental/ios:TensorFlowLiteC_framework')
unzip('bazel-bin/tensorflow/lite/experimental/ios/TensorFlowLiteC_framework.zip', 'iOS')
run_cmd('bazel build -c opt --config=ios_fat --copt -Os --copt -DTFLITE_GPU_BINARY_RELEASE --copt -fvisibility=hidden --copt=-fembed-bitcode --linkopt -s --strip always --cxxopt=-std=c++14 //tensorflow/lite/delegates/gpu:tensorflow_lite_gpu_framework --apple_platform_type=ios')
unzip('bazel-bin/tensorflow/lite/delegates/gpu/tensorflow_lite_gpu_framework.zip', 'iOS')
def build_android():
run_cmd('bazel build -c opt --cxxopt=--std=c++11 --config=android_arm64 //tensorflow/lite/c:libtensorflowlite_c.so')
copy('bazel-bin/tensorflow/lite/c/libtensorflowlite_c.so', 'Android')
run_cmd('bazel build -c opt --config android_arm64 --copt -Os --copt -DTFLITE_GPU_BINARY_RELEASE --copt -fvisibility=hidden --linkopt -s --strip always //tensorflow/lite/delegates/gpu:libtensorflowlite_gpu_delegate.so')
copy('bazel-bin/tensorflow/lite/delegates/gpu/libtensorflowlite_gpu_delegate.so', 'Android')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Update TensorFlow Lite libraries for Unity')
parser.add_argument('--tfpath', default = '../tensorflow', type = str,
help = 'The path of the TensorFlow repository')
parser.add_argument('-macos', action = "store_true", default = False,
help = 'Build macOS')
parser.add_argument('-windows', action = "store_true", default = False,
help = 'Build Windows')
parser.add_argument('-ios', action = "store_true", default = False,
help = 'Build iOS')
parser.add_argument('-android', action = "store_true", default = False,
help = 'Build Android')
args = parser.parse_args()
TENSORFLOW_PATH = os.path.abspath(args.tfpath)
platform_name = platform.system()
if args.macos:
assert platform_name is 'Darwin', f'-macos not suppoted on the platfrom: {platform_name}'
print('Build macOS')
build_mac()
if args.windows:
assert platform_name is 'Windows', f'-windows not suppoted on the platfrom: {platform_name}'
print('Build Windows')
build_windows()
if args.ios:
assert platform_name is 'Darwin', f'-ios not suppoted on the platfrom: {platform_name}'
# Need to set iOS build option in ./configure
print('Build iOS')
build_ios()
if args.android:
# Need to set Android build option in ./configure
print('Build Android')
build_android()
| 47.116279 | 282 | 0.695212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,457 | 0.606367 |
377d4a85e353609bca7adf180de45b20e5d3856d | 2,207 | py | Python | tests/trainers/test_chesapeake.py | nilsleh/torchgeo | 744078fcff7e48957ffa5d0cd6b8acf3f5767b0a | [
"MIT"
] | null | null | null | tests/trainers/test_chesapeake.py | nilsleh/torchgeo | 744078fcff7e48957ffa5d0cd6b8acf3f5767b0a | [
"MIT"
] | null | null | null | tests/trainers/test_chesapeake.py | nilsleh/torchgeo | 744078fcff7e48957ffa5d0cd6b8acf3f5767b0a | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
from typing import Any, Dict, Generator, cast
import pytest
from _pytest.fixtures import SubRequest
from _pytest.monkeypatch import MonkeyPatch
from omegaconf import OmegaConf
from torchgeo.datamodules import ChesapeakeCVPRDataModule
from torchgeo.trainers.chesapeake import ChesapeakeCVPRSegmentationTask
from .test_utils import FakeTrainer, mocked_log
class TestChesapeakeCVPRSegmentationTask:
@pytest.fixture(scope="class", params=[5, 7])
def class_set(self, request: SubRequest) -> int:
return cast(int, request.param)
@pytest.fixture(scope="class")
def datamodule(self, class_set: int) -> ChesapeakeCVPRDataModule:
dm = ChesapeakeCVPRDataModule(
os.path.join("tests", "data", "chesapeake", "cvpr"),
["de-test"],
["de-test"],
["de-test"],
patch_size=32,
patches_per_tile=2,
batch_size=2,
num_workers=0,
class_set=class_set,
)
dm.prepare_data()
dm.setup()
return dm
@pytest.fixture
def config(self, class_set: int) -> Dict[str, Any]:
task_conf = OmegaConf.load(
os.path.join("conf", "task_defaults", f"chesapeake_cvpr_{class_set}.yaml")
)
task_args = OmegaConf.to_object(task_conf.experiment.module)
task_args = cast(Dict[str, Any], task_args)
return task_args
@pytest.fixture
def task(
self, config: Dict[str, Any], monkeypatch: Generator[MonkeyPatch, None, None]
) -> ChesapeakeCVPRSegmentationTask:
task = ChesapeakeCVPRSegmentationTask(**config)
trainer = FakeTrainer()
monkeypatch.setattr(task, "trainer", trainer) # type: ignore[attr-defined]
monkeypatch.setattr(task, "log", mocked_log) # type: ignore[attr-defined]
return task
def test_validation(
self, datamodule: ChesapeakeCVPRDataModule, task: ChesapeakeCVPRSegmentationTask
) -> None:
batch = next(iter(datamodule.val_dataloader()))
task.validation_step(batch, 0)
task.validation_epoch_end(0)
| 33.953846 | 88 | 0.666516 | 1,742 | 0.789307 | 0 | 0 | 1,416 | 0.641595 | 0 | 0 | 290 | 0.1314 |
377e726c09a9222d64f6dc7224202d694a239fb8 | 3,120 | py | Python | src/huggingmolecules/models/models_api.py | chrislybaer/huggingmolecules | 210239ac46b467e900a47e8f4520054636744ca6 | [
"Apache-2.0"
] | 60 | 2021-05-07T16:07:26.000Z | 2022-03-26T19:23:54.000Z | src/huggingmolecules/models/models_api.py | gabegomes/huggingmolecules | adc581c97fbc21d9967dd9334afa94b22fb77651 | [
"Apache-2.0"
] | 11 | 2021-05-07T16:01:35.000Z | 2022-03-09T13:06:05.000Z | src/huggingmolecules/models/models_api.py | gabegomes/huggingmolecules | adc581c97fbc21d9967dd9334afa94b22fb77651 | [
"Apache-2.0"
] | 12 | 2021-05-20T08:02:25.000Z | 2022-03-10T14:11:36.000Z | import logging
import os
from typing import Generic, List, Type, Any
import torch
import torch.nn as nn
from ..downloading.downloading_utils import from_cache
from ..featurization.featurization_api import T_BatchEncoding, T_Config, PretrainedFeaturizerMixin
class PretrainedModelBase(nn.Module, Generic[T_BatchEncoding, T_Config]):
def __init__(self, config: T_Config):
super().__init__()
self.config = config
def forward(self, batch: T_BatchEncoding):
raise NotImplementedError
@classmethod
def _get_archive_dict(cls) -> dict:
raise NotImplementedError
@classmethod
def get_config_cls(cls) -> Type[T_Config]:
raise NotImplementedError
@classmethod
def get_featurizer_cls(cls) -> Type[PretrainedFeaturizerMixin[Any, T_BatchEncoding, T_Config]]:
raise NotImplementedError
@classmethod
def from_pretrained(cls,
pretrained_name: str, *,
excluded: List[str] = None,
config: T_Config = None) -> "PretrainedModelBase[T_BatchEncoding, T_Config]":
archive_dict = cls._get_archive_dict()
file_path = from_cache(pretrained_name, archive_dict, 'pt')
if not file_path:
file_path = os.path.expanduser(pretrained_name)
if not os.path.exists(file_path):
raise FileNotFoundError(file_path)
if not config:
raise AttributeError('Set \'config\' attribute when using local path to weights.')
if not config:
config_cls = cls.get_config_cls()
config = config_cls.from_pretrained(pretrained_name)
model = cls(config)
model.load_weights(file_path, excluded=excluded)
return model
def init_weights(self, init_type: str):
for p in self.parameters():
if p.dim() > 1:
if init_type == 'uniform':
nn.init.xavier_uniform_(p)
elif init_type == 'normal':
nn.init.xavier_normal_(p)
else:
raise NotImplementedError()
def _remove_excluded(self, dictionary: dict, *, excluded: List[str] = None):
excluded = excluded if excluded else []
return {k: v for k, v in dictionary.items() if all(k.split('.')[0] != p for p in excluded)}
def load_weights(self, file_path: str, *, excluded: List[str] = None):
state_dict = torch.load(file_path, map_location='cpu')
state_dict = self._remove_excluded(state_dict, excluded=excluded)
result = self.load_state_dict(state_dict, strict=False)
if len(result.missing_keys) > 0:
logging.info(f'Missing keys when loading: {result.missing_keys}')
if len(result.unexpected_keys) > 0:
logging.warning(f'Unexpected keys when loading: {result.unexpected_keys}')
def save_weights(self, file_path: str, *, excluded: List[str] = None):
state_dict = self.state_dict()
state_dict = self._remove_excluded(state_dict, excluded=excluded)
torch.save(state_dict, file_path)
| 39.493671 | 101 | 0.644872 | 2,857 | 0.915705 | 0 | 0 | 1,233 | 0.395192 | 0 | 0 | 245 | 0.078526 |
377f113688c643f861a107b95cf2a20826771ea4 | 445 | py | Python | check_db.py | Yaremenko-R/python_training | f198b898bd9947afd4f71bc01f992909df392a57 | [
"Apache-2.0"
] | null | null | null | check_db.py | Yaremenko-R/python_training | f198b898bd9947afd4f71bc01f992909df392a57 | [
"Apache-2.0"
] | null | null | null | check_db.py | Yaremenko-R/python_training | f198b898bd9947afd4f71bc01f992909df392a57 | [
"Apache-2.0"
] | null | null | null | from fixture.orm import ORMFixture
from fixture.db import DbFixture
from model.group import Group
from model.contact import Contact
database = ORMFixture(host="localhost", name="addressbook", user="root", password="")
try:
l = database.get_contacts_in_group(Group(id="174"))
# l = sorted(database.get_groups_contact_added(Contact(id="1")), key=Group.id_or_max)
for item in l:
print(item)
print(len(l))
finally:
pass
| 27.8125 | 88 | 0.723596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.280899 |
3780509baacbecefbe37741bcabfeb4d8e9efcf0 | 5,161 | py | Python | meiduo_mall/meiduo_mall/apps/payment/views.py | ZHD165/Django_- | f89c80a22c5065b46900a20bd505614b5bcb2e6e | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/payment/views.py | ZHD165/Django_- | f89c80a22c5065b46900a20bd505614b5bcb2e6e | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/payment/views.py | ZHD165/Django_- | f89c80a22c5065b46900a20bd505614b5bcb2e6e | [
"MIT"
] | null | null | null | from django.http import JsonResponse
import os
from alipay import AliPay
from django.views import View
from django.conf import settings
from orders.models import OrderInfo
from payment.models import Payment
class PaymentsView(View):
def get(self, request, order_id):
'''支付的第一个接口'''
# 1.根据order_id获取订单
try:
order = OrderInfo.objects.get(order_id=order_id,
user=request.user,
status=1)
except Exception as e:
return JsonResponse({'code': 400,
'errmsg': 'order_id有误'})
# 2.调用python-alipay-sdk的类: Alipay
# 3.利用这个类, 生成对象 alipay
alipay = AliPay(
appid=settings.ALIPAY_APPID,
app_notify_url=None, # 默认回调url
app_private_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "keys/app_private_key.pem"),
alipay_public_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)),
"keys/alipay_public_key.pem"),
sign_type="RSA2",
debug=settings.ALIPAY_DEBUG
)
# 4.调用该对象的方法
order_string = alipay.api_alipay_trade_page_pay(
out_trade_no=order_id,
total_amount=str(order.total_amount),
subject="美多商城%s" % order_id,
return_url=settings.ALIPAY_RETURN_URL,
)
# 5.拼接url
url = settings.ALIPAY_URL + '?' + order_string
# 6.返回json
return JsonResponse({'code': 0,
'errmsg': 'ok',
'alipay_url': url})
class SavePaymentView(View):
def put(self, request):
'''保存支付结果'''
# 1.接收参数(查询字符串)
query_dict = request.GET
dict = query_dict.dict()
# 2.把查询字符串参数中的sign(k&v)剔除. 获取剔除的结果
signature = dict.pop('sign')
# 3.获取python-alipay-sdk的类, 用该类创建对象
alipay = AliPay(
appid=settings.ALIPAY_APPID,
app_notify_url=None, # 默认回调url
app_private_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "keys/app_private_key.pem"),
alipay_public_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)),
"keys/alipay_public_key.pem"),
sign_type="RSA2",
debug=settings.ALIPAY_DEBUG
)
# 4.调用对象的验证函数verify
isSuccess = alipay.verify(dict, signature)
# 5.判断验证的结果, 如果为True
if isSuccess:
# 6.从dict中获取order_id, 流水号
order_id = dict.get('out_trade_no')
trade_id = dict.get('trade_no')
# 7.保存order_id, 流水号到支付表中
try:
Payment.objects.create(
order_id=order_id,
trade_id=trade_id
)
# 8.更改订单的状态: 从未支付 ===> 未评论
OrderInfo.objects.filter(order_id=order_id,
status=1).update(status=4)
except Exception as e:
return JsonResponse({'code': 400,
'errmsg': '保存失败'})
# 9.拼接参数, 返回
return JsonResponse({'code': 0,
'errmsg': 'ok',
'trade_id': trade_id})
else:
# 10.如果结果为False, 警告
return JsonResponse({'code': 400,
'errmsg': '非法请求'})
class PaymentStatusView(View):
"""保存订单支付结果"""
def put(self, request):
# 获取前端传入的请求参数
query_dict = request.GET
data = query_dict.dict()
# 获取并从请求参数中剔除signature
signature = data.pop('sign')
# 创建支付宝支付对象
alipay = AliPay(
appid=settings.ALIPAY_APPID,
app_notify_url=None,
app_private_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "keys/app_private_key.pem"),
alipay_public_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "keys/alipay_public_key.pem"),
sign_type="RSA2",
debug=settings.ALIPAY_DEBUG
)
# 校验这个重定向是否是alipay重定向过来的
success = alipay.verify(data, signature)
if success:
# 读取order_id
order_id = data.get('out_trade_no')
# 读取支付宝流水号
trade_id = data.get('trade_no')
# 保存Payment模型类数据
Payment.objects.create(
order_id=order_id,
trade_id=trade_id
)
# 修改订单状态为待评价
OrderInfo.objects.filter(order_id=order_id,
status=OrderInfo.ORDER_STATUS_ENUM['UNPAID']).update(
status=OrderInfo.ORDER_STATUS_ENUM["UNCOMMENT"])
return JsonResponse({'code':0,
'errmsg':'ok',
'trade_id':trade_id})
else:
# 订单支付失败,重定向到我的订单
return JsonResponse({'code':400,
'errmsg':'非法请求'})
| 28.513812 | 122 | 0.519473 | 5,458 | 0.957376 | 0 | 0 | 0 | 0 | 0 | 0 | 1,509 | 0.26469 |
3780851862eb17225e7f115d2d0da648abc8841c | 1,579 | py | Python | viewer_process/ghcc/libs/config.py | lloesche/github_commit_crawler | 4d2f077d0835a5d2ea2587267e5afe5137b556d9 | [
"Apache-2.0"
] | null | null | null | viewer_process/ghcc/libs/config.py | lloesche/github_commit_crawler | 4d2f077d0835a5d2ea2587267e5afe5137b556d9 | [
"Apache-2.0"
] | 1 | 2021-03-26T00:23:26.000Z | 2021-03-26T00:23:26.000Z | viewer_process/ghcc/libs/config.py | lloesche/github_commit_crawler | 4d2f077d0835a5d2ea2587267e5afe5137b556d9 | [
"Apache-2.0"
] | null | null | null | import yaml
class ConfigChanger(object):
''' class to read/write the config file
'''
def __init__(self, location):
self.loc = location # path to yaml file
def config_file_ok(self):
''' returns boolean if config file is OK and contains good values,
or false if config needs to be edited
'''
# try to load the config
try:
f = open(self.loc, 'r')
except:
return False
# check for default values
try:
config = yaml.load(f)
if config['github']['accesstoken'] == 'secret_access_token':
return False
if config['github']['org_name'] == 'org_name':
return False
if config['github']['username'] == 'githubhandle':
return False
except:
return False
return True
def write_config(self, config):
''' write the yaml file '''
f = open(self.loc, 'w')
return yaml.dump(config, f)
def load_config(self):
''' load the yaml file; return it '''
f = open(self.loc, 'r')
return yaml.load(f)
def get_empty_config(self):
return {'github': {'accesstoken': 'secret_access_token',
'org_name': 'org_name',
'username': 'githubhandle'},
'log': {'dateformat': '%Y-%m-%d %H:%M:%S',
'file': 'ghcc.log',
'format': '[%(asctime)s] [%(levelname)s] - %(message)s'}
}
| 29.792453 | 80 | 0.49525 | 1,562 | 0.989234 | 0 | 0 | 0 | 0 | 0 | 0 | 610 | 0.38632 |
3782b5cf4bd2c686f5cfd2d7e7b639d2bb313ce6 | 128 | py | Python | fourtynine.py | glennandreph/learnpython | deeb48f9d2c38fcdb9f13119083f3cc7e4836e70 | [
"MIT"
] | 1 | 2017-12-16T16:44:05.000Z | 2017-12-16T16:44:05.000Z | fourtynine.py | glennandreph/learnpython | deeb48f9d2c38fcdb9f13119083f3cc7e4836e70 | [
"MIT"
] | null | null | null | fourtynine.py | glennandreph/learnpython | deeb48f9d2c38fcdb9f13119083f3cc7e4836e70 | [
"MIT"
] | null | null | null | def my_function_with_args(username, greeting):
print("Hello, %s , From My Function! I wish you %s" %(username, greeting))
| 42.666667 | 79 | 0.703125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.351563 |
3782d181d91c88f0ef3b74b5377a70db19324f60 | 3,938 | py | Python | utils.py | josephtjohnson/Meme_Generator | 227080aad0d22d249c6bf1893d726fd3e2b0ec84 | [
"MIT"
] | null | null | null | utils.py | josephtjohnson/Meme_Generator | 227080aad0d22d249c6bf1893d726fd3e2b0ec84 | [
"MIT"
] | null | null | null | utils.py | josephtjohnson/Meme_Generator | 227080aad0d22d249c6bf1893d726fd3e2b0ec84 | [
"MIT"
] | 1 | 2021-09-30T19:10:31.000Z | 2021-09-30T19:10:31.000Z | from QuoteEngine import Ingestor, QuoteModel
from MemeGenerator import MemeEngine
from PIL import Image
import argparse
import random
import os
import textwrap
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
file_handler = logging.FileHandler('utils.log')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
def open_image(category):
"""
Opens an image from a user-specified category.
Parameters
----------
category : str
image category (dog or book, default=dog)
"""
images = "./_data/photos/book/"
if category == 'dog':
images = "./_data/photos/dog/"
imgs = []
for root, dirs, files in os.walk(images):
imgs = [os.path.join(root, name) for name in files]
return random.choice(imgs)
def open_image_app():
"""
Returns images for building the meme.
Parameters
----------
category : str
image category (dog or book, default=dog)
"""
images = "./_data/photos/dog/"
imgs = []
for root, dirs, files in os.walk(images):
imgs = [os.path.join(root, name) for name in files]
return imgs
def open_quote(category):
"""
Opens a quote from a user-specified category.
Parameters
----------
category : str
image category (dog or book, default=dog)
"""
quote_files = ['./_data/BookQuotes/BookQuotesDOCX.docx']
if category == 'dog':
quote_files = ['./_data/DogQuotes/DogQuotesTXT.txt',
'./_data/DogQuotes/DogQuotesDOCX.docx',
'./_data/DogQuotes/DogQuotesPDF.pdf',
'./_data/DogQuotes/DogQuotesCSV.csv']
quotes = []
for f in quote_files:
quotes.extend(Ingestor.parse(f))
return random.choice(quotes)
def open_quote_app():
"""
Return quotes for building the meme.
Parameters
----------
category : str
image category (dog or book, default=dog)
"""
quote_files = ['./_data/DogQuotes/DogQuotesTXT.txt',
'./_data/DogQuotes/DogQuotesDOCX.docx',
'./_data/DogQuotes/DogQuotesPDF.pdf',
'./_data/DogQuotes/DogQuotesCSV.csv']
quotes = []
for f in quote_files:
quotes.extend(Ingestor.parse(f))
return quotes
def image_resize(img_path, width=500):
"""
Resize an image to be used by make_meme()
Paramters
---------
img_path : str
image file path
width : int
width of image in pixels (default = 500)
"""
MAX_WIDTH: int = 500
assert width is not None, 'Width is zero'
assert width >= MAX_WIDTH, 'Width > 500'
img_path = img_path
with Image.open(img_path) as img:
ratio = width/float(img.size[0])
height = int(ratio*img.size[1])
img = img.resize((width, height))
return img
def text_draw(draw, text, author, fill, font, width, height):
"""
Draw text in random location on image.
Paramters
---------
draw : image object
image
text : str
quote text
author : str
quote text
fill : tuple
text fill
font : font object
text font
width : int
image width
height : int
image height
"""
x_max = int(0.6*width)
y_max = int(0.8*height)
x = random.randint(15, x_max)
y = random.randint(20, y_max)
wrap_limit = (width - x)*0.08
text = textwrap.fill(text, wrap_limit)
if len(text+author) > (height-y)*0.5:
draw.text((20, 20), text=text+'\n'+'-'+author, fill=fill, font=font)
else:
draw.text((x, y), text=text+'\n'+'-'+author, fill=fill, font=font)
return draw
| 23.301775 | 76 | 0.607669 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,636 | 0.415439 |
378539d2d8f38f193773d342677f939cb42a4203 | 746 | py | Python | mdn_ik/test.py | uenian33/Franka_Panda_IK_Sensor | c9956fb7a7f1d570104296af72aa2a600085ae6e | [
"MIT"
] | null | null | null | mdn_ik/test.py | uenian33/Franka_Panda_IK_Sensor | c9956fb7a7f1d570104296af72aa2a600085ae6e | [
"MIT"
] | null | null | null | mdn_ik/test.py | uenian33/Franka_Panda_IK_Sensor | c9956fb7a7f1d570104296af72aa2a600085ae6e | [
"MIT"
] | 1 | 2021-12-07T11:47:03.000Z | 2021-12-07T11:47:03.000Z | import torch
a = torch.rand(3, 4)
#a = a.unsqueeze(0)
#print(a.reshape(3,4,1))
b = torch.rand(3, 4)
#b = b.unsqueeze(0)
print(b)
c = torch.stack([a, b, b, b, b], dim=1)
c = torch.rand(3, 20)
print(c)
c = c.reshape(3, 5, 4)
print(c.shape)
d = torch.rand(3, 5)
d = d.reshape(3,5,1)
print(d)
e = c*d
print(c*d)
print(torch.mean(e, axis=1))
print(torch.mean(e, axis=1).reshape(6,2))
f = torch.mean(e, axis=1).reshape(6,2)
print(f)
#f = f.reshape(f.shape[0],1,f.shape[1])
#print(f)
f = torch.stack([f,f,f], dim=1)
print(f)
f = f.reshape(f.shape[0]*f.shape[1], f.shape[2])
print(f)
"""
a = torch.rand(1, 3, 4)
print(a.shape)
b = torch.rand(3, 4)
print(b.shape)
b = b.unsqueeze(0)
print(b.shape)
c = torch.cat([a, b], dim=0)
print(c.shape)
""" | 15.87234 | 48 | 0.601877 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 270 | 0.36193 |
3787795e7d1f35522f3f9e121d7efa5012cb8f56 | 2,846 | py | Python | src/plugins/yiqing/data_source.py | wizardCRain/mini_jx3_bot | ecd3c7c852438227e237157f66f5ccccaa328b8c | [
"MIT"
] | 27 | 2021-12-24T15:59:41.000Z | 2022-03-24T04:22:26.000Z | src/plugins/yiqing/data_source.py | wizardCRain/mini_jx3_bot | ecd3c7c852438227e237157f66f5ccccaa328b8c | [
"MIT"
] | 3 | 2022-02-17T13:28:10.000Z | 2022-03-01T08:55:33.000Z | src/plugins/yiqing/data_source.py | byscut/mini_jx3_bot | 610b43ac8f51b1b0f6041258ded1687c88eaaf5d | [
"MIT"
] | 10 | 2022-01-19T02:47:59.000Z | 2022-03-13T15:18:43.000Z | from datetime import date
from typing import Optional, Tuple
from httpx import AsyncClient
from nonebot.adapters.onebot.v11.message import MessageSegment
from src.utils.browser import browser
from src.utils.log import logger
from .config import CITY_MAP
def _get_city(name: str) -> Tuple[bool, Optional[str], Optional[str]]:
'''
:说明
通过name获取参数
:返回
* bool:是否是合法参数
* str:省份名
* str:城市名
'''
city = CITY_MAP.get(name)
if city is None:
return False, None, None
if city == "":
return True, name, None
return True, city, name
async def get_data(name: str) -> MessageSegment:
'''获取数据'''
flag, province, city = _get_city(name)
if not flag:
return MessageSegment.text('查询失败,请检查参数!')
params = {"province": province}
if city:
params['city'] = city
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36",
"Accept-Charset": "utf-8",
}
async with AsyncClient(headers=headers) as client:
url = "https://api.yimian.xyz/coro/"
try:
req = await client.get(url=url, params=params)
result = req.json()
logger.debug(
f"<y>疫情查询</y> | 返回:{result}"
)
data = {}
if city:
data['name'] = result.get('cityName') if result.get('cityName') is not None else "-"
else:
data['name'] = result.get('provinceName') if result.get('provinceName') is not None else "-"
# 现存确诊
data['currentConfirmedCount'] = result.get(
'currentConfirmedCount') if result.get('currentConfirmedCount') is not None else "-"
data['confirmedCount'] = result.get('confirmedCount') if result.get(
'confirmedCount') is not None else "-" # 累计确诊
data['suspectedCount'] = result.get('suspectedCount') if result.get(
'suspectedCount') is not None else "-" # 疑似病例
data['curedCount'] = result.get('curedCount') if result.get('curedCount') is not None else "-" # 累计治愈
data['deadCount'] = result.get('deadCount') if result.get('deadCount') is not None else "-" # 累计死亡
data['highDangerCount'] = result.get('highDangerCount') if result.get(
'highDangerCount') is not None else "-" # 重症病例
except Exception as e:
logger.error(
f"<y>疫情查询</y> | 查询失败:{str(e)}"
)
return MessageSegment.text(f"查询失败,{str(e)}")
time_now = date.today()
data['time'] = time_now.strftime("%Y-%m-%d")
pagename = "yiqing.html"
img = await browser.template_to_image(pagename=pagename, data=data)
return MessageSegment.image(img)
| 36.025316 | 139 | 0.584329 | 0 | 0 | 0 | 0 | 0 | 0 | 2,361 | 0.782306 | 1,007 | 0.333665 |
378ca6c4004eb7f05493e60723c6ea0ea4a59fbb | 5,136 | py | Python | server/src/sdistance.py | bepnye/brat | 28acfb2d3cce20bd4d4ff1a67690e271675841f2 | [
"CC-BY-3.0"
] | 20 | 2015-01-26T01:39:44.000Z | 2020-05-30T19:04:14.000Z | server/src/sdistance.py | bepnye/brat | 28acfb2d3cce20bd4d4ff1a67690e271675841f2 | [
"CC-BY-3.0"
] | 7 | 2015-04-11T12:57:42.000Z | 2016-04-08T13:43:44.000Z | server/src/sdistance.py | bepnye/brat | 28acfb2d3cce20bd4d4ff1a67690e271675841f2 | [
"CC-BY-3.0"
] | 13 | 2015-01-26T01:39:45.000Z | 2022-03-09T16:45:09.000Z | #!/usr/bin/env python
'''
Various string distance measures.
Author: Pontus Stenetorp <pontus stenetorp se>
Version: 2011-08-09
'''
from string import digits, lowercase
from sys import maxint
DIGITS = set(digits)
LOWERCASE = set(lowercase)
TSURUOKA_2004_INS_CHEAP = set((' ', '-', ))
TSURUOKA_2004_DEL_CHEAP = TSURUOKA_2004_INS_CHEAP
TSURUOKA_2004_REPL_CHEAP = set([(a, b) for a in DIGITS for b in DIGITS] +
[(a, a.upper()) for a in LOWERCASE] +
[(a.upper(), a) for a in LOWERCASE] +
[(' ', '-'), ('-', '_')])
# Testing; not sure number replacements should be cheap.
NONNUM_T2004_REPL_CHEAP = set([(a, a.upper()) for a in LOWERCASE] +
[(a.upper(), a) for a in LOWERCASE] +
[(' ', '-'), ('-', '_')])
TSURUOKA_INS = dict([(c, 10) for c in TSURUOKA_2004_INS_CHEAP])
TSURUOKA_DEL = dict([(c, 10) for c in TSURUOKA_2004_DEL_CHEAP])
#TSURUOKA_REPL = dict([(c, 10) for c in TSURUOKA_2004_REPL_CHEAP])
TSURUOKA_REPL = dict([(c, 10) for c in NONNUM_T2004_REPL_CHEAP])
def tsuruoka(a, b):
# Special case for empties
if len(a) == 0 or len(b) == 0:
return 100*max(len(a),len(b))
# Initialise the first column
prev_min_col = [0]
for b_c in b:
prev_min_col.append(prev_min_col[-1] + TSURUOKA_INS.get(b_c, 100))
curr_min_col = prev_min_col
for a_c in a:
curr_min_col = [prev_min_col[0] + TSURUOKA_DEL.get(a_c, 100)]
for b_i, b_c in enumerate(b):
if b_c == a_c:
curr_min_col.append(prev_min_col[b_i])
else:
curr_min_col.append(min(
prev_min_col[b_i + 1] + TSURUOKA_DEL.get(a_c, 100),
curr_min_col[-1] + TSURUOKA_INS.get(b_c, 100),
prev_min_col[b_i] + TSURUOKA_REPL.get((a_c, b_c), 50)
))
prev_min_col = curr_min_col
return curr_min_col[-1]
def tsuruoka_local(a, b, edge_insert_cost=1, max_cost=maxint):
# Variant of the tsuruoka metric for local (substring) alignment:
# penalizes initial or final insertion for a by a different
# (normally small or zero) cost than middle insertion.
# If the current cost at any point exceeds max_cost, returns
# max_cost, which may allow early return.
# Special cases for empties
if len(a) == 0:
return len(b)*edge_insert_cost
if len(b) == 0:
return 100*len(b)
# Shortcut: strict containment
if a in b:
cost = (len(b)-len(a)) * edge_insert_cost
return cost if cost < max_cost else max_cost
# Initialise the first column. Any sequence of initial inserts
# have edge_insert_cost.
prev_min_col = [0]
for b_c in b:
prev_min_col.append(prev_min_col[-1] + edge_insert_cost)
curr_min_col = prev_min_col
for a_c in a:
curr_min_col = [prev_min_col[0] + TSURUOKA_DEL.get(a_c, 100)]
for b_i, b_c in enumerate(b):
if b_c == a_c:
curr_min_col.append(prev_min_col[b_i])
else:
curr_min_col.append(min(
prev_min_col[b_i + 1] + TSURUOKA_DEL.get(a_c, 100),
curr_min_col[-1] + TSURUOKA_INS.get(b_c, 100),
prev_min_col[b_i] + TSURUOKA_REPL.get((a_c, b_c), 50)
))
# early return
if min(curr_min_col) >= max_cost:
return max_cost
prev_min_col = curr_min_col
# Any number of trailing inserts have edge_insert_cost
min_cost = curr_min_col[-1]
for i in range(len(curr_min_col)):
cost = curr_min_col[i] + edge_insert_cost * (len(curr_min_col)-i-1)
min_cost = min(min_cost, cost)
if min_cost < max_cost:
return min_cost
else:
return max_cost
def tsuruoka_norm(a, b):
return 1 - (tsuruoka(a,b) / (max(len(a),len(b)) * 100.))
def levenshtein(a, b):
# Special case for empties
if len(a) == 0 or len(b) == 0:
return max(len(a),len(b))
# Initialise the first column
prev_min_col = [0]
for b_c in b:
prev_min_col.append(prev_min_col[-1] + 1)
curr_min_col = prev_min_col
for a_c in a:
curr_min_col = [prev_min_col[0] + 1]
for b_i, b_c in enumerate(b):
if b_c == a_c:
curr_min_col.append(prev_min_col[b_i])
else:
curr_min_col.append(min(
prev_min_col[b_i + 1] + 1,
curr_min_col[-1] + 1,
prev_min_col[b_i] + 1
))
prev_min_col = curr_min_col
return curr_min_col[-1]
if __name__ == '__main__':
for a, b in (('kitten', 'sitting'), ('Saturday', 'Sunday'), ('Caps', 'caps'), ('', 'bar'), ('dog', 'dog'), ('dog', '___dog__'), ('dog', '__d_o_g__')):
print 'levenshtein', a, b, levenshtein(a,b)
print 'tsuruoka', a, b, tsuruoka(a,b)
print 'tsuruoka_local', a, b, tsuruoka_local(a,b)
print 'tsuruoka_norm', a, b, tsuruoka_norm(a,b)
| 34.013245 | 154 | 0.573988 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,052 | 0.204829 |
378cc1e59dd6dd0b75d637ce5507619c66eb1093 | 2,431 | py | Python | expressmanage/customers/views.py | abbas133/expressmanage-free | cd4b5a37fa012781c70ade933885b1c63bc7f2df | [
"MIT"
] | null | null | null | expressmanage/customers/views.py | abbas133/expressmanage-free | cd4b5a37fa012781c70ade933885b1c63bc7f2df | [
"MIT"
] | null | null | null | expressmanage/customers/views.py | abbas133/expressmanage-free | cd4b5a37fa012781c70ade933885b1c63bc7f2df | [
"MIT"
] | null | null | null | from django.views import generic
from django.urls import reverse_lazy
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from .forms import CustomerForm
from .models import Customer
from .helper import CustomerSummary
class Customer_IndexView(LoginRequiredMixin, generic.ListView):
template_name = 'customers/index.html'
def get_queryset(self):
return Customer.objects.all()
class Customer_DetailView(LoginRequiredMixin, PermissionRequiredMixin, generic.DetailView):
raise_exception = True
permission_required = ('customers.view_customer')
model = Customer
template_name = 'customers/detail.html'
object = None
def get(self, request, *args, **kwargs):
self.object = self.get_object()
recent_invoices = CustomerSummary.get_recent_invoices(self.object)[:3]
active_lots = CustomerSummary.get_active_lots(self.object)
active_invoices = CustomerSummary.get_active_invoices(self.object)
pending_amount = CustomerSummary.get_pending_amount(self.object)
return self.render_to_response(
self.get_context_data(
recent_invoices=recent_invoices,
active_lots=active_lots,
active_invoices=active_invoices,
pending_amount=pending_amount
)
)
class Customer_CreateView(LoginRequiredMixin, PermissionRequiredMixin, generic.CreateView):
raise_exception = True
permission_required = ('customers.add_customer')
model = Customer
form_class = CustomerForm
template_name = 'customers/edit.html'
def get_success_url(self):
return reverse_lazy('customers:customer_detail', kwargs={'pk': self.object.pk})
class Customer_UpdateView(LoginRequiredMixin, PermissionRequiredMixin, generic.UpdateView):
raise_exception = True
permission_required = ('customers.change_customer')
model = Customer
form_class = CustomerForm
template_name = 'customers/edit.html'
def get_success_url(self):
return reverse_lazy('customers:customer_detail', kwargs={'pk': self.object.pk})
class Customer_DeleteView(LoginRequiredMixin, PermissionRequiredMixin, generic.DeleteView):
raise_exception = True
permission_required = ('customers.delete_customer')
model = Customer
template_name = 'customers/delete.html'
success_url = reverse_lazy('customers:customer_index') | 33.763889 | 91 | 0.739202 | 2,167 | 0.891403 | 0 | 0 | 0 | 0 | 0 | 0 | 301 | 0.123817 |
378d2e428384e36ea1ae3ada251fec85dc8bfc58 | 1,945 | py | Python | orbitals.py | inconvergent/orbitals_speedup | 5a93e98dbb334946002df572d618d0d767a910a9 | [
"MIT"
] | 39 | 2015-01-21T17:39:25.000Z | 2022-03-12T21:05:31.000Z | orbitals.py | inconvergent/orbitals_speedup | 5a93e98dbb334946002df572d618d0d767a910a9 | [
"MIT"
] | null | null | null | orbitals.py | inconvergent/orbitals_speedup | 5a93e98dbb334946002df572d618d0d767a910a9 | [
"MIT"
] | 3 | 2016-02-24T22:35:46.000Z | 2020-12-15T20:19:05.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
from numpy.random import random, randint
from numpy import zeros, sin, cos
class Orbitals(object):
def __init__(self,num,stp,farl,nearl,friendship_ratio,
friendship_initiate_prob,maxfs):
self.num = num
self.stp = stp
self.farl = farl
self.nearl = nearl
self.friendship_ratio = friendship_ratio
self.friendship_initiate_prob = friendship_initiate_prob
self.maxfs = maxfs
self.X = zeros(num,'float')
self.Y = zeros(num,'float')
self.R = zeros((num,num),'float')
self.A = zeros((num,num),'float')
self.F = zeros((num,num),'int')
def make_friends(self,i):
cand_num = self.F.sum(axis=1)
maxfs = self.maxfs
F = self.F
if cand_num[i]>=maxfs:
return
cand_mask = cand_num<maxfs
cand_mask[i] = 0
cand_ind = cand_mask.nonzero()[0]
cand_dist = self.R[i,cand_ind].flatten()
cand_sorted_dist = cand_dist.argsort()
cand_ind = cand_ind[cand_sorted_dist]
cand_n = len(cand_ind)
if cand_n<1:
return
for k in xrange(cand_n):
if random()<self.friendship_ratio:
j = cand_ind[k]
F[[i,j],[j,i]] = 1
return
def init(self, rad):
from numpy import pi
for i in xrange(self.num):
the = random()*pi*2
phi = random()*pi*2
x = rad * sin(the)
y = rad * cos(the)
self.X[i] = 0.5+x + cos(phi)*rad*0.05
self.Y[i] = 0.5+y + sin(phi)*rad*0.05
def step(self):
from speedup.speedup import pyx_set_distances
from speedup.speedup import pyx_iteration
pyx_set_distances(self.X,self.Y,self.A,self.R,self.num)
pyx_iteration(self.X,self.Y,self.A,self.R,self.F,self.num,
self.stp,self.farl,self.nearl)
if random()<self.friendship_initiate_prob:
k = randint(self.num)
self.make_friends(k)
def get_render_data(self):
return self.X,self.Y,self.F,self.A,self.R
| 22.356322 | 62 | 0.624679 | 1,823 | 0.937275 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.037532 |
378e3c2cc2b4aed3dd1d5afa919592ab4bfb7f68 | 468 | py | Python | surfactant_example/micelle/micelle_factory.py | force-h2020/force-bdss-plugin-surfactant-example | ba442f2b39919f7d071f4384f8eaba0d99f44b1f | [
"BSD-2-Clause",
"MIT"
] | null | null | null | surfactant_example/micelle/micelle_factory.py | force-h2020/force-bdss-plugin-surfactant-example | ba442f2b39919f7d071f4384f8eaba0d99f44b1f | [
"BSD-2-Clause",
"MIT"
] | null | null | null | surfactant_example/micelle/micelle_factory.py | force-h2020/force-bdss-plugin-surfactant-example | ba442f2b39919f7d071f4384f8eaba0d99f44b1f | [
"BSD-2-Clause",
"MIT"
] | null | null | null | from force_bdss.api import BaseDataSourceFactory
from .micelle_model import MicelleDataSourceModel
from .micelle_data_source import MicelleDataSource
class MicelleFactory(BaseDataSourceFactory):
def get_identifier(self):
return "micelle"
def get_name(self):
return "Micelle Aggregation Calculator"
def get_model_class(self):
return MicelleDataSourceModel
def get_data_source_class(self):
return MicelleDataSource
| 23.4 | 50 | 0.767094 | 314 | 0.67094 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.087607 |
378e8237793b5c3217bc562c15e5d1954a382294 | 835 | py | Python | tests/test_image_upload.py | ephes/django-cast | 34b6aab98f7e9a750116ec2949e9cda4f2dcb127 | [
"BSD-3-Clause"
] | 11 | 2018-12-23T15:58:35.000Z | 2021-10-04T12:14:46.000Z | tests/test_image_upload.py | ephes/django-cast | 34b6aab98f7e9a750116ec2949e9cda4f2dcb127 | [
"BSD-3-Clause"
] | 9 | 2018-11-18T12:12:29.000Z | 2022-02-27T09:51:36.000Z | tests/test_image_upload.py | ephes/django-cast | 34b6aab98f7e9a750116ec2949e9cda4f2dcb127 | [
"BSD-3-Clause"
] | 12 | 2018-11-17T15:13:09.000Z | 2020-05-02T00:10:07.000Z | import pytest
from django.urls import reverse
class TestImageUpload:
@pytest.mark.django_db
def test_upload_image_not_authenticated(self, client, small_jpeg_io):
upload_url = reverse("cast:api:upload_image")
small_jpeg_io.seek(0)
r = client.post(upload_url, {"original": small_jpeg_io})
# redirect to login
assert r.status_code == 302
@pytest.mark.django_db
def test_upload_image_authenticated(self, client, user, small_jpeg_io):
# login
r = client.login(username=user.username, password=user._password)
# upload
upload_url = reverse("cast:api:upload_image")
small_jpeg_io.seek(0)
r = client.post(upload_url, {"original": small_jpeg_io})
assert r.status_code == 201
assert int(r.content.decode("utf-8")) > 0
| 29.821429 | 75 | 0.670659 | 785 | 0.94012 | 0 | 0 | 752 | 0.900599 | 0 | 0 | 107 | 0.128144 |
378ea332c2db853a04b4cbceabe21b235b6c359d | 925 | py | Python | commit.py | Delostik/gitlab-statistics | bda26bda9e4c0fb28dd2a48b65bd1047dc85f4b9 | [
"MIT"
] | null | null | null | commit.py | Delostik/gitlab-statistics | bda26bda9e4c0fb28dd2a48b65bd1047dc85f4b9 | [
"MIT"
] | null | null | null | commit.py | Delostik/gitlab-statistics | bda26bda9e4c0fb28dd2a48b65bd1047dc85f4b9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import requests
def get_all_commits(base_url, token, project_id, filter_author=''):
res = []
next_page = 1
url_format = '{}/api/v4/projects/{}/repository/commits?ref=master&per_page=100&page={}'
while next_page != '':
url = url_format.format(base_url, project_id, next_page)
resp = requests.get(url, headers={'Private-Token': token})
next_page = resp.headers.get('X-Next-Page')
if filter_author == '':
res.extend(resp.json())
else:
for commit in resp.json():
if commit['author_name'] == filter_author:
res.append(commit)
return res
def get_commit_detail(base_url, token, project_id, commit_id):
url = '{}/api/v4/projects/{}/repository/commits/{}'.format(base_url, project_id, commit_id)
resp = requests.get(url, headers={'Private-Token': token})
return resp.json()
| 35.576923 | 95 | 0.620541 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.220541 |
378eda99db9461519b18902ce8432550ad1b4efa | 80 | py | Python | read.py | sundeepsingh1984/openinsiderscrapper | b23d51139031380bcb34f10dcaac92e4d5803dd6 | [
"MIT"
] | null | null | null | read.py | sundeepsingh1984/openinsiderscrapper | b23d51139031380bcb34f10dcaac92e4d5803dd6 | [
"MIT"
] | null | null | null | read.py | sundeepsingh1984/openinsiderscrapper | b23d51139031380bcb34f10dcaac92e4d5803dd6 | [
"MIT"
] | null | null | null | import pandas as pd
df=pd.read_json("D:\eiaScrapper\eio.jl")
print(df.info()) | 13.333333 | 40 | 0.7125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.2875 |
378fa60e9ce1cd0fbc51113706daba7125c8fc17 | 163 | py | Python | src/adafruit_blinka/microcontroller/amlogic/s905x3/pin.py | Jcc99/Adafruit_Blinka | 41f8155bab83039ed9d45276addd3d501e83f3e6 | [
"MIT"
] | 294 | 2018-06-30T19:08:27.000Z | 2022-03-26T21:08:47.000Z | src/adafruit_blinka/microcontroller/amlogic/s905x3/pin.py | Jcc99/Adafruit_Blinka | 41f8155bab83039ed9d45276addd3d501e83f3e6 | [
"MIT"
] | 421 | 2018-06-30T20:54:46.000Z | 2022-03-31T15:08:37.000Z | src/adafruit_blinka/microcontroller/amlogic/s905x3/pin.py | Jcc99/Adafruit_Blinka | 41f8155bab83039ed9d45276addd3d501e83f3e6 | [
"MIT"
] | 234 | 2018-07-23T18:49:16.000Z | 2022-03-28T16:59:48.000Z | """AmLogic s905x3 pin names"""
# pylint: disable=wildcard-import,unused-wildcard-import
from adafruit_blinka.microcontroller.amlogic.meson_g12_common.pin import *
| 40.75 | 74 | 0.822086 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.527607 |
37928600ec08774278fa5bb3307d12131ec68be2 | 30,697 | py | Python | qube/drivers/NI6733.py | ClementGeffroy/qube | 89de914656854aaec1590ce588eac6657a31e6b7 | [
"BSD-3-Clause"
] | 1 | 2022-03-02T12:54:43.000Z | 2022-03-02T12:54:43.000Z | qube/drivers/NI6733.py | ClementGeffroy/qube | 89de914656854aaec1590ce588eac6657a31e6b7 | [
"BSD-3-Clause"
] | null | null | null | qube/drivers/NI6733.py | ClementGeffroy/qube | 89de914656854aaec1590ce588eac6657a31e6b7 | [
"BSD-3-Clause"
] | 1 | 2022-03-02T12:54:45.000Z | 2022-03-02T12:54:45.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 21 20:00:50 2020
@author: takada
"""
import logging
import numpy as np
import functools
import operator
from typing import List, Dict, Callable
import time
import nidaqmx
from nidaqmx.stream_writers import (
DigitalSingleChannelWriter, AnalogMultiChannelWriter)
from qcodes import Instrument, VisaInstrument, validators as vals
from qcodes.instrument.channel import InstrumentChannel
from qcodes.instrument.parameter import ArrayParameter, Parameter
from qcodes.dataset.sqlite.database import connect
from qcodes.dataset.sqlite.queries import get_last_run
from qcodes.dataset.data_set import load_by_id
log = logging.getLogger(__name__)
class NI6733_ao_voltage_trace(ArrayParameter):
def __init__(self, name:str, instrument: InstrumentChannel,
channum: int) -> None:
"""
This voltage trace parameter is attached to a channel of the analog output.
Parameters
----------
name : str
Name of the trace.
instrument : InstrumentChannel
Instrument channel, where the trace is attached.
channum : int
Integer number of the channel, where the trace is attached.
Returns
-------
None
DESCRIPTION.
"""
super().__init__(name=name,
shape=(1,),
label = 'voltage',
unit='V',
setpoint_names=('Count',),
setpoint_labels=('Count',),
setpoint_units=('pts',),
setpoints = None,
docstring='Holds analog output trace')
self.channum = channum
self._instrument = instrument
def get_raw(self):
pass
class NI6733_ao_voltage_channel(InstrumentChannel):
def __init__(self, parent: Instrument, name:str,
slot_num:int, channum: int, min_val:float=-10.0,
fast_sequence:bool=False, fast_sequence_delta:float = -0.1,
max_val:float= 10.0) -> None:
"""
Parameters
----------
parent : Instrument
Host instrument handler
name : str
Given name of the channel
slot_num : int
Slot number of the channel
channum : int
Channel number
min_val : float, optional
Minimum value of the channel voltage value. The default is -10.0.
max_val : float, optional
Maximum value of the channel voltage value. The default is 10.0.
fast_sequence : bool, optional
Whether this dac is used for fast sequence or not.
fast_sequence_delta: float
How far the voltage is moved by the fast sequence from its original position.
Returns
-------
None
DESCRIPTION.
"""
super().__init__(parent, name)
self.instrument = parent
self.slot_num = slot_num
self.channum = channum
self._min_val = min_val
self._max_val = max_val
self._current_val = 0.0
self._target_val = None
self._fast_sequence = fast_sequence
self._fast_sequence_delta = fast_sequence_delta
self.add_parameter('min_val',
label = 'Minimum value',
unit = 'V',
get_cmd=self.get_min_val,
set_cmd=self.set_min_val,
vals = vals.Numbers(-10.0, 10.0)
)
self.add_parameter('max_val',
label = 'Maximum value',
unit = 'V',
get_cmd=self.get_max_val,
set_cmd=self.set_max_val,
vals = vals.Numbers(-10.0, 10.0)
)
self.add_parameter('cv',
label = 'Current value',
unit = 'V',
get_cmd=self.get_current_val,
set_cmd=self.set_current_val,
vals = vals.Numbers(-5.0, 5.0)
)
self.add_parameter('fs',
label='fast sequence',
get_cmd = self.get_fast_sequence,
set_cmd = self.set_fast_sequence,
)
self.add_parameter('fs_delta',
label = 'fast sequence delta',
unit = 'V',
get_cmd = self.get_fast_sequence_delta,
set_cmd = self.set_fast_sequence_delta,
vals = vals.Numbers(-1.0, 1.0)
)
def get_min_val(self):
return self._min_val
def set_min_val(self, val:float):
self._min_val = val
def get_max_val(self):
return self._max_val
def set_max_val(self, val:float):
self._max_val = val
def get_current_val(self):
return self._current_val
def set_current_val(self, val:float):
self._target_val = val
def get_fast_sequence(self):
return self._fast_sequence
def set_fast_sequence(self, val:bool):
self._fast_sequence = val
self.instrument._fs_ready = False
def get_fast_sequence_delta(self):
return self._fast_sequence_delta
def set_fast_sequence_delta(self, val:float):
self._fast_sequence_delta = val
self.instrument._fs_ready = False
class NI6733(Instrument):
def __init__(self, name:str, device_name:str = 'PXI2',
slots:List[int]=[3,4,], ms2wait:float = 2.0,
fast_sequence_divider:float = 2.0, fs_pts:int = 101,
**kwargs):
"""
This is the qcodes driver for NI6733 16 bit Analog Output.
Args:
name (str): Given name of the DAC
device_name (str): Name of the PXI device. Default value is 'PXI2'.
slots(List[int]): List of DAC slots. Each slot has 8 DAC channels.
ms2wait (float): Wait time between minimum resolution DAC movement in [ms].
fast_sequence_divider (float): Time between fast sequence movement in [ms].
fs_pts (int): Length of the fast sequence.
"""
super().__init__(name, **kwargs)
self.device_name = device_name
self.slots = slots
self._ms2wait = ms2wait
self._fast_sequence_divider = fast_sequence_divider
self._fs_pts = fs_pts
self._fs_ready = False
self._fast_move_slot_list = list()
self._fast_move_channel_list = dict()
self._fast_move_list = dict()
self._move_points = None
self.write_task = dict()
self.fast_seq_task = dict()
for slot in self.slots:
self.write_task[slot] = nidaqmx.Task()
self.write_task['{:d}'.format(slot)] = False
self.fast_seq_task[slot] = nidaqmx.Task()
self.fast_seq_task['{:d}'.format(slot)] = False
self.ctr_task = nidaqmx.Task()
self.ctr_task_isClosed = False
self.do_task = nidaqmx.Task()
self.do_task_isClosed = False
self.add_parameter('ms2wait',
label = 'ms to wait',
unit = 'ms',
get_cmd = self.get_ms2wait,
set_cmd = self.set_ms2wait,
vals = vals.Numbers(0.0, 100.0))
self.add_parameter('fs_div',
label = 'fast sequence divider',
unit = 'ms',
get_cmd = self.get_fast_sequence_divider,
set_cmd = self.set_fast_sequence_divider,
vals = vals.Numbers(0.0, 100.0))
self.add_parameter('fs_pts',
label = 'fast sequence size',
unit = 'pts',
get_cmd = self.get_fs_pts,
set_cmd = self.set_fs_pts,
vals = vals.Ints(2, 100000)
)
######################
# Add channels to the instrument
for slot in self.slots:
for i in range(8):
chan = NI6733_ao_voltage_channel(self,
'analog_output_s{:d}c{:d}'.format(slot, i),
slot_num = slot,
channum = i)
self.add_submodule('s{:d}c{:d}'.format(slot, i), chan)
###########################
# Function for parameters
###########################
def get_ms2wait(self):
return self._ms2wait
def set_ms2wait(self, val:float):
self._ms2wait = val
def get_fast_sequence_divider(self):
return self._fast_sequence_divider
def set_fast_sequence_divider(self, val:float):
self._fast_sequence_divider = val
self._fs_ready = False
def get_fs_pts(self):
return self._fs_pts
def set_fs_pts(self, val:int):
self._fs_pts = val
self._fs_ready = False
###########################
# Utility functions
###########################
def move_all_dac(self, v:float = 0.0):
"""
Move all the dac to the given value.
Scaling factor for each dac is not applied in this operation.
Parameters
----------
v : float, optional
Target voltage in volt. The default is 0.0.
Returns
-------
None.
"""
for s in self.slots:
for i in range(8):
chan = getattr(self, 's{:d}c{:d}'.format(s, i))
chan._target_val = v
self.DAC_move()
def init2zero(self):
"""
Initialise all the DAC values to be 0.0 V after moving once to -10 mV.
"""
self.move_all_dac()(-0.01)
self.move_all_dac()(0.0)
def load_current_values_from_database(self,
db_path:str = './experiments.db',
run_id:int = None,
):
"""
Load current DAC values from the specified database and run_id.
If run_id is not given, we load from the latest run_id.
Args:
db_path (str): Path to the database.
run_id (int): run_id of the recovered run.
"""
# Connect to the database
conn = connect(db_path)
if run_id == None:
# Get last run id
run_id = get_last_run(conn)
# Load dataset
dataset = load_by_id(run_id)
# Whether return to initial sweep position after the measurment or not
return2initial = dataset.snapshot['station']['instruments']['measurement_information']['parameters']['return2initial']['value']
# Collect information from sweeping parameters
data = dataset.get_parameter_data()
data_dict = dict()
for key in data.keys():
d = data[key]
for k in d.keys():
if not k in data_dict.keys():
data_dict[k] = d[k]
# Check whether measurement was complelted or not from data size
ar_size = d[k].size
fast_sweep = dataset.snapshot['station']['instruments']['measurement_information']['parameters']['fast_sweep']['value']
sweep_dims = dataset.snapshot['station']['instruments']['measurement_information']['parameters']['sweep_dims']['value']
if fast_sweep:
first_dim_size = dataset.snapshot['station']['instruments'][self.name]['parameters']['fs_pts']['value']
else:
first_dim_size = 1
total_pts = int(functools.reduce(operator.mul, sweep_dims, 1) * first_dim_size)
if not ar_size == total_pts:
completed = False
else:
completed = True
# Set current value of each dac from static values
for sm in dataset.snapshot['station']['instruments'][self.name]['submodules'].keys():
# Get raw value of each dac
cv = dataset.snapshot['station']['instruments'][self.name]['submodules'][sm]['parameters']['cv']['raw_value']
chan = getattr(self, sm)
sm_fullname = dataset.snapshot['station']['instruments'][self.name]['submodules'][sm]['parameters']['cv']['full_name']
if sm_fullname in data_dict.keys():
if return2initial and completed:
cv = data_dict[sm_fullname][0]
else:
cv = data_dict[sm_fullname][-1]
chan._current_val = cv
conn.close()
def init_tasks(self):
"""
Close all the task, which is opend. Then open it again.
"""
if not self.do_task_isClosed:
self.do_task.close()
self.do_task = nidaqmx.Task()
if not self.ctr_task_isClosed:
self.ctr_task.close()
self.ctr_task = nidaqmx.Task()
for slot in self.slots:
if not self.write_task['{:d}'.format(slot)]:
self.write_task[slot].close()
self.write_task[slot] = nidaqmx.Task()
if not self.fast_seq_task['{:d}'.format(slot)]:
self.fast_seq_task[slot].close()
self.fast_seq_task[slot] = nidaqmx.Task()
###################################
# Base functions for voltage output
###################################
def ctr_setup(self,
task:nidaqmx.Task = None,
slot_num:int = 3,
no_of_samples:int = None,
trigger_delay:int = 0.0,
):
"""
This function setup a counter output for the counter 0 for the given slot.
Args:
task(nidaqmx.Task): Task counter is set.
slot_num(int): Slot number of the trigger out
no_of_samples (int): Number of trigger generated. If it is None, a trigger is generated continuously.
trigger_delay (int): Delay of the counter in seconds.
"""
# Create counter output channel
task.co_channels.add_co_pulse_chan_freq('{}Slot{:d}/ctr0'.format(self.device_name, slot_num),
units = nidaqmx.constants.FrequencyUnits.HZ,
idle_state = nidaqmx.constants.Level.LOW,
initial_delay = trigger_delay,
freq = 1000.0/self._fast_sequence_divider,
duty_cycle = 0.5,
)
# Set sample generation mode and number of samples to be generated.
# Comment: Incrase 'samps_per_chan' by 3 since some trigger is missed by analog output.
task.timing.cfg_implicit_timing(samps_per_chan = no_of_samples+3,
sample_mode = nidaqmx.constants.AcquisitionType.FINITE)
def do_setup(self,
task:nidaqmx.Task = None,
slot_num:int = 3,
port_num:int = 0,
line_num:int = 0,
initial_delay:int = 1,
trigger_length:int = 2,
sample_clk_src:str = '/PXI2Slot3/Ctr0InternalOutput',
):
"""
This function setup digital output task used to trigger ADC.
Parameters
----------
task : nidaqmx.Task, optional
task, where the digital output channel is set.
slot_num : int, optional
Slot number. The default is 3.
port_num : int, optional
Port number of digital output. The default is 0.
line_num : int, optional
Line number of digital output. The default is 0.
initial_delay : int, optional
Initial delay of the generated start trigger in a unit of a clock. The default is 1.
trigger_length : int, optional
Length of the trigger in a unit of a clock sample. The default is 2.
sample_clk_src : str, optional
Sample clock source. The default is '/PXI2Slot3/Ctr0InternalOutput'.
: TYPE
DESCRIPTION.
Returns
-------
None.
"""
# Calculate number of points for the trigger
points = initial_delay + trigger_length + 10
# Create digital output channel
task.do_channels.add_do_chan(lines = '{}Slot{:d}/port{:d}/line{:d}'.format(self.device_name, slot_num, port_num, line_num))
# Setup timing
task.timing.cfg_samp_clk_timing(rate = 100000,
source = sample_clk_src,
active_edge=nidaqmx.constants.Edge.RISING,
sample_mode=nidaqmx.constants.AcquisitionType.FINITE,
samps_per_chan = points
)
# Write array information of the pulse
writer = DigitalSingleChannelWriter(task.out_stream)
ar = np.zeros((points,), dtype=np.uint8)
ar[initial_delay:initial_delay+trigger_length] = 2 ** line_num
writer.write_many_sample_port_byte(ar)
def set_sample_clock(self,
task:nidaqmx.Task = None,
no_of_samples:int=None,
sample_rate:float=500.0,
sample_clk_src:str=None,
):
"""
This function setup the sample clock timing.
Parameters
----------
task : nidaqmx.Task, optional
task, where the sample clock to be set.
no_of_samples : int, optional
Number of samples (data points) to be generated. If it is None, clock mode becomes
continuous.
sample_rate : float, optional
Sampling rate in Hz. The default is 500.0 Hz.
samle_clk_src : str, optional
Sample clock source. We can set extra source. If it is None,
we use a default onboard clock.
Returns
-------
None.
"""
if sample_clk_src == None:
sample_clk_src = 'OnboardClock'
task.timing.cfg_samp_clk_timing(sample_rate,
source = sample_clk_src,
active_edge=nidaqmx.constants.Edge.RISING,
sample_mode=nidaqmx.constants.AcquisitionType.FINITE,
samps_per_chan = no_of_samples)
def DAC_move(self,
task_preparation:bool=True,
clear_task:bool=True):
"""
This function moves the DAC values, whose target value is changed.
Args:
task_preparation (bool): Whether prepare analog output and sample clock to the task.
clear_task (bool): Whether we clear the task after the movement or not.
"""
move_slot_list = list()
move_channel_list = dict()
move_list = dict()
largest_move = 0.0
for slot in self.slots:
move_channel_list[slot] = list()
move_list[slot] = list()
for i in range(8):
chan = getattr(self, 's{:d}c{:d}'.format(slot, i))
if not chan._target_val == None:
move_channel_list[slot].append(chan)
move_slot_list.append(slot)
cv = chan._current_val # Current DAC value
tv = chan._target_val # Target DAC value
move_list[slot].append((cv, tv)) # Keep the value
delta = abs(tv - cv) # Size of the movement
if delta > largest_move:
# Check largest movement to determine number of points.
largest_move = delta
# Convert move_slot_list to set
move_slot_list = set(move_slot_list)
# Calculate points
points = max(2, int((largest_move/(20/2.0**16)//2.0)*2.0))
# Keep points and re-define task when it changes
if not self._move_points == points:
self._move_points = points
task_preparation = True
# Create array for movement
ar = dict()
for slot in move_slot_list:
ar_list = list()
for v in move_list[slot]:
ar_list.append(np.linspace(v[0],v[1], self._move_points,dtype=float))
ar[slot] = np.vstack(tuple(ar_list))
if task_preparation:
# Clear task (It takes a few ms.)
for slot in move_slot_list:
if not self.write_task['{:d}'.format(slot)]:
self.write_task[slot].close()
self.write_task[slot] = nidaqmx.Task()
self.write_task['{:d}'.format(slot)] = False
# Create analog output channel in the task
for chan in move_channel_list[slot]:
self.write_task[slot].ao_channels.add_ao_voltage_chan(physical_channel = '{}Slot{:d}/ao{:d}'.format(self.device_name, chan.slot_num, chan.channum),
min_val = chan.min_val(),
max_val = chan.max_val(),
units = nidaqmx.constants.VoltageUnits.VOLTS)
# Setup sample clock
self.set_sample_clock(task = self.write_task[slot],
no_of_samples = self._move_points,
sample_rate = 1000.0/self.ms2wait(),
sample_clk_src = None,)
writer = dict()
for slot in move_slot_list:
# Output voltage
writer[slot] = AnalogMultiChannelWriter(self.write_task[slot].out_stream)
writer[slot].write_many_sample(ar[slot])
for slot in move_slot_list:
self.write_task[slot].start()
for slot in move_slot_list:
self.write_task[slot].wait_until_done(timeout=nidaqmx.constants.WAIT_INFINITELY)
self.write_task[slot].stop()
if clear_task:
# Clear task (It takes a few ms.)
for slot in move_slot_list:
self.write_task[slot].close()
self.write_task['{:d}'.format(slot)] = True
# Update information for the moved channels
for slot in move_slot_list:
for chan in move_channel_list[slot]:
chan._current_val = chan._target_val
chan._target_val = None
def prepare_fast_move(self):
"""
This function prepare the task for fast movement.
"""
self._fast_move_slot_list = list()
self._fast_move_channel_list = dict()
self._fast_move_list = dict()
for slot in self.slots:
self._fast_move_channel_list[slot] = list()
self._fast_move_list[slot] = list()
for i in range(8):
chan = getattr(self, 's{:d}c{:d}'.format(slot, i))
if chan.fs():
self._fast_move_slot_list.append(slot)
self._fast_move_channel_list[slot].append(chan)
v0 = chan._current_val
v1 = chan._current_val + chan._fast_sequence_delta
self._fast_move_list[slot].append((v0, v1))
# Convert fast_move_slot_list to set.
self._fast_move_slot_list = set(self._fast_move_slot_list)
# Clear the counter task
if not self.ctr_task_isClosed:
self.ctr_task.close()
self.ctr_task = nidaqmx.Task()
self.ctr_task_isClosed = False
# Setup counter
self.ctr_setup(task = self.ctr_task,
slot_num = self.slots[0],
no_of_samples = self.fs_pts(),
trigger_delay = 0.0,
)
# Clear the digital out task
if not self.do_task_isClosed:
self.do_task.close()
self.do_task = nidaqmx.Task()
self.do_task_isClosed = False
# Setup digital output
self.do_setup(task = self.do_task,
slot_num = self.slots[0],
port_num = 0,
line_num = 0,
initial_delay = 0,
trigger_length = 1,
sample_clk_src = '/{}Slot{:d}/Ctr0InternalOutput'.format(self.device_name, self.slots[0]),
)
self._fs_ready = True
def DAC_fast_move(self):
"""
This function makes fast sequence of the DAC.
--> This function gets a problem when we use in a QuCoDeS. It is not possible
to use DAC_move task and DAC_fast move task at the same time.
"""
if not self._fs_ready:
raise ValueError('Fase sequence is not ready. Please perform "prepare_fast_move".')
# Number of array points has to be even. I adjust for that.
if int(self.fs_pts()%2) == 0:
points = self.fs_pts()+1
else:
points = self.fs_pts()
# Set up channels
for slot in self._fast_move_slot_list:
# Define fast sequence task
if not self.fast_seq_task['{:d}'.format(slot)]:
self.fast_seq_task[slot].close()
self.fast_seq_task[slot] = nidaqmx.Task()
self.fast_seq_task['{:d}'.format(slot)] = False
# Create analog output channel in the task
for chan in self._fast_move_channel_list[slot]:
self.fast_seq_task[slot].ao_channels.add_ao_voltage_chan(physical_channel = '{}Slot{:d}/ao{:d}'.format(self.device_name, chan.slot_num, chan.channum),
min_val = chan.min_val(),
max_val = chan.max_val(),
units = nidaqmx.constants.VoltageUnits.VOLTS)
# Setup sample clock
self.set_sample_clock(task = self.fast_seq_task[slot],
no_of_samples=points+1,
sample_rate=1000.0/self._fast_sequence_divider,
sample_clk_src='/{}Slot{:d}/Ctr0InternalOutput'.format(self.device_name, self.slots[0]),)
ar_dict = dict()
writer = dict()
for slot in self._fast_move_slot_list:
# Create array for fast movement
ar_list = list()
for chan in self._fast_move_channel_list[slot]:
v0 = chan._current_val
v1 = chan._current_val + chan._fast_sequence_delta
ar = np.empty((points+1,), dtype=float)
ar[0:self.fs_pts()] = np.linspace(v0, v1, self.fs_pts(), dtype=float)
ar[self.fs_pts()] = v0
if int(self.fs_pts()%2) == 0:
ar[self.fs_pts()+1] = v0
ar_list.append(ar)
ar_dict[slot] = np.vstack(tuple(ar_list))
# Output voltage
writer[slot] = AnalogMultiChannelWriter(self.fast_seq_task[slot].out_stream)
writer[slot].write_many_sample(ar_dict[slot])
for slot in self._fast_move_slot_list:
self.fast_seq_task[slot].start()
self.do_task.start()
self.ctr_task.start()
for slot in self._fast_move_slot_list:
self.fast_seq_task[slot].wait_until_done(timeout=nidaqmx.constants.WAIT_INFINITELY)
self.fast_seq_task[slot].stop()
self.fast_seq_task[slot].close()
self.fast_seq_task['{:d}'.format(slot)] = True
self.do_task.wait_until_done(timeout=nidaqmx.constants.WAIT_INFINITELY)
self.do_task.stop()
self.ctr_task.wait_until_done(timeout=nidaqmx.constants.WAIT_INFINITELY)
self.ctr_task.stop()
if __name__ == '__main__':
t = time.time()
dac = NI6733(name = 'dac',
device_name = 'PXI2',
slots=[3,4,],
ms2wait = 2.0,
fast_sequence_divider = 2.0,
fs_pts = 201,
)
# # DAC movement test
# dac.s3c0.cv(-0.1)
# dac.s4c0.cv(-0.3)
# dac.DAC_move(task_preparation = True,
# clear_task = False)
# dac.s3c0.cv(-0.3)
# dac.s4c0.cv(-0.5)
# dac.DAC_move(task_preparation = False,
# clear_task = False)
# dac.s3c0.cv(-0.5)
# dac.s4c0.cv(-0.7)
# dac.DAC_move(task_preparation = False,
# clear_task = False)
# dac.s3c0.cv(0.0)
# dac.s4c0.cv(0.0)
# dac.DAC_move(task_preparation = False,
# clear_task = True)
# # Trigger test
# dac.ctr_setup(slot_num = 3,
# no_of_samples = 20,
# trigger_delay = 0.1)
# dac.ctr_task.start()
# dac.ctr_task.wait_until_done()
# # time.sleep(5)
# dac.ctr_task.stop()
# Fast sequence test
dac.fs_pts(201)
dac.fs_div(2.0)
dac.s3c0.fs(True)
dac.s3c0.fs_delta(-1.0)
dac.prepare_fast_move()
dac.DAC_fast_move()
print('Execution time {:f}'.format(time.time() - t)) | 39.814527 | 167 | 0.516207 | 28,653 | 0.933414 | 0 | 0 | 0 | 0 | 0 | 0 | 8,872 | 0.289018 |
3792acb568ef9d07771f1dfc4d370f93d704c7e2 | 402 | py | Python | v2_trip/urls.py | ruslan-ok/ruslan | fc402e53d2683581e13f4d6c69a6f21e5c2ca1f8 | [
"MIT"
] | null | null | null | v2_trip/urls.py | ruslan-ok/ruslan | fc402e53d2683581e13f4d6c69a6f21e5c2ca1f8 | [
"MIT"
] | null | null | null | v2_trip/urls.py | ruslan-ok/ruslan | fc402e53d2683581e13f4d6c69a6f21e5c2ca1f8 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
app_name = 'v2_trip'
urlpatterns = [
path('', views.main, name='main'),
path('<int:pk>/', views.item_form, name='item_form'),
path('persons/', views.go_persons, name='go_persons'),
path('trips/', views.go_trips, name='go_trips'),
path('entity/<str:name>/<int:pk>/', views.trip_entity, name = 'trip_entity'),
]
| 33.5 | 81 | 0.619403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 121 | 0.300995 |
3792efaebf3dc5a8b5c922f66b2df8883d7ff1ec | 1,055 | py | Python | Sorting Algorithms/quick_sort.py | Divyamop/Python-DSA | 43cc8ffddd632ba07ef91ac4d4daeede949341c6 | [
"MIT"
] | 13 | 2021-10-02T09:25:07.000Z | 2022-01-30T17:49:52.000Z | Sorting Algorithms/quick_sort.py | Divyamop/Python-DSA | 43cc8ffddd632ba07ef91ac4d4daeede949341c6 | [
"MIT"
] | 14 | 2021-10-01T12:58:14.000Z | 2021-10-05T15:42:52.000Z | Sorting Algorithms/quick_sort.py | Divyamop/Python-DSA | 43cc8ffddd632ba07ef91ac4d4daeede949341c6 | [
"MIT"
] | 32 | 2021-10-01T12:40:00.000Z | 2021-10-14T05:09:14.000Z | """
Quick sort is a divide and conquer algorithm
Steps:
1. We first select an element randomly which we call pivot element. We can choose any element as pivot element.
But for consistency and performce purposes we select middle element of array as the pivot element.
2. Then we move all the elments lower than pivot to the left and higher than pivot to the right to the pivot
3. Then we recursively apply the above 2 steps seperately to each of the sub-arrays of
element smaller and larger than last pivot
3. Then we recursively apply the above 2 steps seperately to each of the sub-arrays of
element smaller and larger than last pivot
"""
Scanner sc=new Scanner(System.in);
int n=sc.nextInt();
int st=1;int sp=n/2;
for(int i=1;i<=n;i++) {
for(int j=1;j<=sp;j++) {
if(i==n/2+1) {
System.out.print("* ");
}
else {
System.out.print(" ");
}
}
for(int j=1;j<=st;j++) {
System.out.print("* ");
}
if(i<=n/2) {
st++;
}
else {
st--;
}
System.out.println();
}
}
| 25.119048 | 111 | 0.641706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 670 | 0.635071 |
3792f978ea808a632abc7ad8cfbb0bfad7875985 | 209 | py | Python | my_dataclasses/plays.py | GudniNatan/GSKI-PA6 | a0f9a38bc0d2f6710f803a77276e6a76cd6f4471 | [
"MIT"
] | null | null | null | my_dataclasses/plays.py | GudniNatan/GSKI-PA6 | a0f9a38bc0d2f6710f803a77276e6a76cd6f4471 | [
"MIT"
] | null | null | null | my_dataclasses/plays.py | GudniNatan/GSKI-PA6 | a0f9a38bc0d2f6710f803a77276e6a76cd6f4471 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from my_dataclasses.member import Member
from my_dataclasses.sport import Sport
@dataclass(order=True, frozen=True)
class Plays(object):
member: Member
sport: Sport
| 20.9 | 40 | 0.794258 | 56 | 0.267943 | 0 | 0 | 92 | 0.440191 | 0 | 0 | 0 | 0 |
379367e9f1febecd8e0efe7f49a5b29af96989dd | 2,747 | py | Python | src/methods/linear_scalarization_method.py | nbingo/sMOOth | aacdc5d24b931e534e984681923ec74f1103ca2f | [
"MIT"
] | null | null | null | src/methods/linear_scalarization_method.py | nbingo/sMOOth | aacdc5d24b931e534e984681923ec74f1103ca2f | [
"MIT"
] | null | null | null | src/methods/linear_scalarization_method.py | nbingo/sMOOth | aacdc5d24b931e534e984681923ec74f1103ca2f | [
"MIT"
] | null | null | null | import time
import torch
from torch.distributions.dirichlet import Dirichlet
from detectron2.engine.train_loop import SimpleTrainer
class LinearScalarizationTrainer(SimpleTrainer):
"""
A simple trainer for the most common type of task:
single-cost single-optimizer single-data-source iterative optimization,
optionally using data-parallelism.
It assumes that every step, you:
1. Compute the loss with a data from the data_loader.
2. Compute the gradients with the above loss.
3. Update the model with the optimizer.
All other tasks during training (checkpointing, logging, evaluation, LR schedule)
are maintained by hooks, which can be registered by :meth:`TrainerBase.register_hooks`.
If you want to do anything fancier than this,
either subclass TrainerBase and implement your own `run_step`,
or write your own training loop.
"""
def __init__(self, model, data_loader, optimizer, preference_vector: float = torch.ones(2) / 2):
"""
Args:
model: a torch Module. Takes a data from data_loader and returns a
dict of losses.
data_loader: an iterable. Contains data to be used to call model.
optimizer: a torch optimizer.
preference_vector: Vector detailing the weight between losses
"""
super().__init__(model, data_loader, optimizer)
self.preference_vector = preference_vector
def run_step(self):
"""
Implement the standard training logic described above.
"""
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
start = time.perf_counter()
"""
If you want to do something with the data, you can wrap the dataloader.
"""
data = next(self._data_loader_iter)
data_time = time.perf_counter() - start
"""
If you want to do something with the losses, you can wrap the model.
"""
loss_dict = self.model(data)
losses = torch.matmul(torch.stack(list(loss_dict.values())), self.preference_vector)
loss_dict = self.model(data)
loss_dict['total_loss'] = losses
"""
If you need to accumulate gradients or do something similar, you can
wrap the optimizer with your custom `zero_grad()` method.
"""
self.optimizer.zero_grad()
losses.backward()
self._write_metrics(loss_dict, data_time)
"""
If you need gradient clipping/scaling or other processing, you can
wrap the optimizer with your custom `step()` method. But it is
suboptimal as explained in https://arxiv.org/abs/2006.15704 Sec 3.2.4
"""
self.optimizer.step()
| 36.144737 | 100 | 0.662541 | 2,610 | 0.950127 | 0 | 0 | 0 | 0 | 0 | 0 | 1,762 | 0.641427 |
3793ea87d43c2eaa6c5930add1e9632bc31a7439 | 968 | py | Python | test/proxyhttp_test.py | sancau/ivelum_test_task | c1fe0cbb2794e76f86a030a980eb16aa6a714e31 | [
"MIT"
] | null | null | null | test/proxyhttp_test.py | sancau/ivelum_test_task | c1fe0cbb2794e76f86a030a980eb16aa6a714e31 | [
"MIT"
] | null | null | null | test/proxyhttp_test.py | sancau/ivelum_test_task | c1fe0cbb2794e76f86a030a980eb16aa6a714e31 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import falcon
from proxyhttp import Proxy
from transformer import Transformer
def test_api_runs(client):
resp = client.simulate_get('/')
assert resp.status == falcon.HTTP_200
def test_proxy_middleware_instance_initializes_correctly():
p = Proxy(target='target')
assert p.target_domain == 'target'
assert isinstance(p.transformer, Transformer)
assert p.transformer.target_domain == p.target_domain
def test_app_returns_expected_data_with_not_existing_url(client):
resp = client.simulate_get('/something_that_not_exists')
content = resp.content.decode('utf-8')
assert 'Exception: 404 Client Error' in content
assert 'https://habrahabr.ru/something_that_not_exists' in content
def test_app_returns_expected_data_with_existing_url(client):
resp = client.simulate_get('/')
content = resp.content.decode('utf-8')
assert '<title>Лучшие™ публикации за сутки / Хабрахабр</title>' in content
| 30.25 | 78 | 0.753099 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 254 | 0.253493 |
3794ed2744f1e9cb2f1c9f008aeaf5a48cae917c | 79 | py | Python | singletons/mail.py | kwestpharedhat/quay | a0df895005bcd3e53847046f69f6a7add87c88fd | [
"Apache-2.0"
] | null | null | null | singletons/mail.py | kwestpharedhat/quay | a0df895005bcd3e53847046f69f6a7add87c88fd | [
"Apache-2.0"
] | null | null | null | singletons/mail.py | kwestpharedhat/quay | a0df895005bcd3e53847046f69f6a7add87c88fd | [
"Apache-2.0"
] | null | null | null | from flask_mail import Mail
from singletons.app import _app
mail = Mail(_app)
| 15.8 | 31 | 0.797468 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
37959b025855b0a8b6550368174af7129e650d86 | 607 | py | Python | system-test/testnet-automation-json-parser.py | Flawm/solana | 551c24da5792f4452c3c555e562809e8c9e742e5 | [
"Apache-2.0"
] | 7,843 | 2018-03-27T22:56:27.000Z | 2022-03-31T17:37:41.000Z | system-test/testnet-automation-json-parser.py | Flawm/solana | 551c24da5792f4452c3c555e562809e8c9e742e5 | [
"Apache-2.0"
] | 18,799 | 2018-03-28T14:01:39.000Z | 2022-03-31T23:44:12.000Z | system-test/testnet-automation-json-parser.py | Flawm/solana | 551c24da5792f4452c3c555e562809e8c9e742e5 | [
"Apache-2.0"
] | 1,962 | 2018-03-30T17:02:41.000Z | 2022-03-31T19:53:09.000Z | #!/usr/bin/env python3
import sys, json, argparse
parser = argparse.ArgumentParser()
parser.add_argument("--empty_error", action="store_true", help="If present, do not print error message")
args = parser.parse_args()
data=json.load(sys.stdin)
if 'results' in data:
for result in data['results']:
if 'series' in result:
print(result['series'][0]['columns'][1] + ': ' + str(result['series'][0]['values'][0][1]))
elif not args.empty_error:
print("An expected result from CURL request is missing")
elif not args.empty_error:
print("No results returned from CURL request")
| 33.722222 | 104 | 0.682043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 240 | 0.395387 |
37964fe5ef397296275522b31af654802f9c7a91 | 3,337 | py | Python | perfkitbenchmarker/providers/ibmcloud/flags.py | Nowasky/PerfKitBenchmarker | cfa88e269eb373780910896ed4bdc8db09469753 | [
"Apache-2.0"
] | 3 | 2018-04-28T13:06:14.000Z | 2020-06-09T02:39:44.000Z | perfkitbenchmarker/providers/ibmcloud/flags.py | Nowasky/PerfKitBenchmarker | cfa88e269eb373780910896ed4bdc8db09469753 | [
"Apache-2.0"
] | 1 | 2018-03-15T21:01:27.000Z | 2018-03-15T21:01:27.000Z | perfkitbenchmarker/providers/ibmcloud/flags.py | Nowasky/PerfKitBenchmarker | cfa88e269eb373780910896ed4bdc8db09469753 | [
"Apache-2.0"
] | 6 | 2019-06-11T18:59:57.000Z | 2021-03-02T19:14:42.000Z | # Copyright 2020 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing flags applicable across benchmark run on IBM Cloud."""
from absl import flags
flags.DEFINE_string('ibmcloud_azone', None,
'IBMCloud internal DC name')
flags.DEFINE_integer('ibmcloud_volume_iops', 20000,
'Desired volume IOPS.')
flags.DEFINE_integer('ibmcloud_volume_bandwidth', None,
'Desired volume bandwidth in Mbps.')
flags.DEFINE_boolean('ibmcloud_volume_encrypted', False,
'Enable encryption on volume creates.')
flags.DEFINE_string('ibmcloud_image_username', 'root',
'Ssh username for cloud image.')
flags.DEFINE_integer('ibmcloud_polling_delay', 2,
'Delay between polling attempts in seconds.')
flags.DEFINE_integer('ibmcloud_timeout', 600,
'timeout in secs.')
flags.DEFINE_integer('ibmcloud_boot_disk_size', 10,
'boot volume disk size.')
flags.DEFINE_boolean('ibmcloud_debug', False,
'debug flag.')
flags.DEFINE_boolean('ibmcloud_resources_keep', False,
'keep resources.')
flags.DEFINE_string('ibmcloud_volume_profile', 'custom',
'volume profile')
flags.DEFINE_string('ibmcloud_bootvol_encryption_key', None,
'boot volume encryption key crn')
flags.DEFINE_string('ibmcloud_datavol_encryption_key', None,
'data volume encryption key crn')
flags.DEFINE_string('ibmcloud_vpcid', None,
'IBM Cloud vpc id')
flags.DEFINE_string('ibmcloud_subnet', None,
'primary subnet id')
flags.DEFINE_string('ibmcloud_networks', None,
'additional network ids, comma separated')
flags.DEFINE_string('ibmcloud_prefix', 'perfkit',
'resource name prefix')
flags.DEFINE_string('ibmcloud_rgid', None,
'Resource Group id for the account.')
flags.DEFINE_integer('ibmcloud_boot_volume_iops', 3000,
'boot voume iops')
flags.DEFINE_integer('ibmcloud_boot_volume_size', 0,
'boot voume size in GB')
flags.DEFINE_string('ibmcloud_pub_keyid', None,
'rias public sshkey id')
flags.DEFINE_integer('ibmcloud_network_mtu', 9000,
'MTU size on network interfaces.')
flags.DEFINE_integer('ibmcloud_subnets_extra', 0,
'extra subnets to lookup')
flags.DEFINE_integer('ibmcloud_vdisks_extra', 0,
'extra disks to create')
flags.DEFINE_string('ibmcloud_image_info', None,
'image info in json formatted file')
flags.DEFINE_boolean('ibmcloud_encrypted_image', False,
'encrypted image.')
| 35.126316 | 75 | 0.661073 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,967 | 0.589452 |
37992a001eabfa8bb8a5b76b3449dc0f3bbdc33d | 30,183 | py | Python | belleflopt/optimize.py | ucd-cws/eflows_optimization | 2eb9f13a042ab81541488358ad0724555a5d57fc | [
"MIT"
] | 2 | 2020-04-19T04:05:51.000Z | 2021-04-19T02:47:40.000Z | belleflopt/optimize.py | ucd-cws/eflows_optimization | 2eb9f13a042ab81541488358ad0724555a5d57fc | [
"MIT"
] | 7 | 2019-08-31T05:57:30.000Z | 2019-11-27T23:58:13.000Z | belleflopt/optimize.py | ucd-cws/eflows_optimization | 2eb9f13a042ab81541488358ad0724555a5d57fc | [
"MIT"
] | null | null | null | import logging
import random
import collections
import os
from itertools import chain
import numpy
import pandas
from platypus import Problem, Real
from platypus.operators import Generator, Solution
from matplotlib import pyplot as plt
from belleflopt import models
from belleflopt import economic_components
from eflows_optimization.local_settings import PREGENERATE_COMPONENTS
log = logging.getLogger("eflows.optimization")
random.seed = 20200224
class SimpleInitialFlowsGenerator(Generator):
"""
Generates initial flows based on a constant proportion passed into the constructor
"""
def __init__(self, proportion):
self.proportion = proportion
super(SimpleInitialFlowsGenerator, self).__init__()
def generate(self, problem):
solution = Solution(problem)
solution.variables = [self.proportion, ] * problem.decision_variables # start with almost everything for the environment
return solution
class InitialFlowsGenerator(Generator):
"""
Generates initial flows based on the actual allocated flows
"""
def __init__(self):
super(InitialFlowsGenerator, self).__init__()
def generate(self, problem):
solution = Solution(problem)
initial_values = [(random.random()*0.4)+0.6, ] * problem.decision_variables # start with almost everything for the environment
solution.variables = initial_values
return solution
class SparseList(list):
"""
via https://stackoverflow.com/a/1857860/587938 - looks like a nice
implemetation of a sparse list, which we'll want for when we do our
indexing and sums
"""
def __setitem__(self, index, value):
missing = index - len(self) + 1
if missing > 0:
self.extend([None] * missing)
list.__setitem__(self, index, value)
def __getitem__(self, index):
try:
return list.__getitem__(self, index)
except IndexError:
return None
class ModelStreamSegment(object):
"""
# I think maybe we'd be better off making a simple tree structure here than relying on Django - should be much faster.
# We can create the tree on model startup and it'll let us route daily flows through the network much faster.
# we'll have a segment class with attributes for downstream instance, and we'll attach the django segment too so that we
# can send off flows for evaluation without another lookup. If we give it its decision variable as an array and it
# provides the extracted, local, and available downstream values, that's what we need (it can do the whole year at once).
# We need a recursive function that allocates the flows through the network, looking upstream. It can stop and use the total
# upstream that's already calculated once it hits spots that have already done it. PITA to redevelop this!
# number of decision variables = number of segments in network * days in water year
# numpy reshape it so that we have a 2 dimensional array with days in water year columns and n segments rows
# each value is the proportion of available water we want to reserve in the stream for environmental flows
# we have a similar array for locally available water. We then need to create the upstream water array from
# traversing the network and doing the allocations to each segment and each segment's downstream. If we do that
# and then translate it back out into a numpy array of the same shape, we can get a total water array (upstream + local)
# then an environmental water array (total * decision variable array) and an economic water array(total * (1-decsion var array).
# can then put the economic through a single benefit calculation from the economic benefit item to get total economic benefit.
# for environmental benefit, we then need to iterate through each segment and give it its timeseries and have it return
# the total benefit for that segment. We should track each segment's benefit and total economic benefit separately, and
# also then sum all segment economic benefits together to get total environmental benefit.
"""
annual_allocation_proportion = None
_local_available = numpy.zeros((365,)) # will be overridden when __init__ runs get_local_flows
eflows_proportion = numpy.zeros((365,))
def __init__(self, stream_segment, comid, network):
self.comid = comid
self.downstream = None
self.upstream = []
self._upstream_available = None
self.stream_segment = stream_segment
self.full_network = network
self.get_local_flows()
def get_local_flows(self):
self._local_available = self._get_local_flows(use_property="estimated_local_flow")
if self._local_available.shape[0] == 0:
log.warning("No flows for segment {} - Removing from model because leaving it in means the model may fail! It may still fail if this removal results in a loss of connectivity".format(self.comid))
raise RuntimeError("No flows for segment {}. Removing from model".format(self.comid))
def _get_local_flows(self, use_property="estimated_local_flow"):
local_flows_objects = models.DailyFlow.objects.filter(model_run=self.full_network.model_run,
water_year=self.full_network.water_year,
stream_segment=self.stream_segment) \
.order_by("water_year_day")
return numpy.array([float(getattr(day_flow, use_property)) for day_flow in local_flows_objects])
@property
def eflows_benefit(self):
return self.stream_segment.get_benefit_for_timeseries(self.eflows_water, daily=False, collapse_function=numpy.max)
@property
def eflows_water(self):
return self.eflows_proportion * self.local_available
@property
def economic_water(self):
return (1 - self.eflows_proportion) * self.local_available
@property
def downstream_available(self):
return self.eflows_water
@property
def local_available(self):
"""
How much water is available here from upsteam and local sources?
:return:
"""
return self._local_available + self.upstream_available
@property
def raw_available(self):
"""
What's the raw daily flow, ignoring where it's coming from
:return:
"""
return self._get_local_flows(use_property="estimated_total_flow")
@property
def upstream_available(self):
"""
How much water is available here from upstream?
:return:
"""
if self._upstream_available is not None: # short circuit, but if we don't have it then we need to get it.
return self._upstream_available
upstream_available = 0
for upstream in self.upstream:
upstream_available += upstream.downstream_available # get the amount of water in the upstream item that flows downstream
self._upstream_available = upstream_available
return self._upstream_available
def reset(self):
"""
resets the class for another evaluation round
:return:
"""
self._upstream_available = None
def set_allocation(self, allocation):
self.reset()
self.eflows_proportion = allocation # should be a numpy array with 365 elements
@property
def Available(self): # Just an alias for ease of use in plotting
return self.raw_available
@property
def EFlow(self): # just an alias for ease of use in plotting
return self.eflows_water
def plot_results_with_components(self, screen=True, results=("Available", "EFlow"),
skip_components=(), output_folder=None, name_prefix=None, autoremove=True,
include_benefit=True):
"""
Plots a flow timeseries with red boxes for each component for the segment
layered on top. By default shows the eflows allocation, but by passing the name
of the attribute as a string to "result", you can plot a different timeseries.
:param screen:
:param result:
:param autoremove: default True - when True, removes components whose lowest magnitude is higher than the
highest flow in any of the results being plotted. It keeps the plot from being rescaled for peak flows
when the segment doesn't have any peak flows.
:param include_benefit: default True - includes the environmental benefit in the filename so it can be sorted
on. Will require evaluating the solution for this segment, so can be slow depending on the model.
:return:
"""
components = self.stream_segment.segmentcomponent_set.all()
fig, self.ax = plt.subplots(1)
# plotting by making a data frame first to try to get it to show a legend
# just get the data first so we can get the max value for use when plotting the components.
# We plot the data down lower
plot_data = {}
for result in results:
plot_data[result] = getattr(self, result)
max_value = max(list(chain.from_iterable(plot_data.values())))
plot_data["Days"] = range(1, 366) # add days in after the max value is calculated
# they can provide a FlowComponent queryset/iterable to skip, get the IDs
skip_components = [component.id for component in skip_components]
for component in components:
if component.component.id in skip_components: # if the component ID matches one to skip, go to next
continue
# wraparound logic to plot dry season component correctly
if component.start_day_ramp + component.duration_ramp > 365:
box_width = 365 - component.start_day_ramp
extra_width = component.start_day_ramp + component.duration_ramp - 365
else:
box_width = component.duration_ramp
extra_width = 0
# if we want to automatically remove components we don't *ever* reach, then continue
if autoremove is True and component.minimum_magnitude_ramp > max_value:
continue
try:
self.add_rectangle_to_plot(component=component, left=component.start_day_ramp, width=box_width)
if extra_width > 0:
self.add_rectangle_to_plot(component=component, left=0, width=extra_width)
except TypeError:
continue
# plot any hydrographs
for result in results:
self.ax.plot("Days", result, data=plot_data, label=result)
self.ax.autoscale()
eflows_water = sum(getattr(self, "eflows_water"))
extracted = sum(getattr(self, "raw_available")) - eflows_water
plt.title("{} {} - EF = {:.4}, Ext = {:.4}".format(self.stream_segment.com_id, self.stream_segment.name, eflows_water, extracted))
plt.xlabel("Day of Water Year")
plt.ylabel("Flow Magnitude Q (CFS)")
self.ax.legend()
if output_folder is not None:
segment_name = "{}_{}_{}_{}.png".format(int(self.eflows_benefit), name_prefix, self.stream_segment.com_id, self.stream_segment.name)
output_path = os.path.join(output_folder, segment_name)
plt.savefig(output_path, dpi=300)
if screen:
plt.show()
plt.close()
def add_rectangle_to_plot(self, component, left, width):
rect = plt.Rectangle((left, component.minimum_magnitude_ramp),
width,
component.maximum_magnitude_ramp - component.minimum_magnitude_ramp,
linewidth=1, edgecolor='r', facecolor='none', fill=False)
self.ax.add_patch(rect)
class StreamNetwork(object):
stream_segments = collections.OrderedDict()
def __init__(self, django_segments, water_year, model_run, economic_benefit_instance=None):
self.water_year = water_year
self.model_run = model_run # Django model run object
self.economic_benefit_calculator = economic_benefit_instance
self.build(django_segments)
def build(self, django_segments):
log.info("Initiating network and pulling daily flow data")
if PREGENERATE_COMPONENTS:
log.info("PREGENERATE_COMPONENTS is True, so network build will be slow")
for segment in django_segments.all():
try:
self.stream_segments[segment.com_id] = ModelStreamSegment(segment, segment.com_id, network=self)
except RuntimeError: # We use RuntimeError to indicate a flow problem that this clause prevents - it raises a warning where the exception originates
pass
log.info("Making network connectivity")
for segment in self.stream_segments.values():
try:
segment.downstream = self.stream_segments[segment.stream_segment.downstream.com_id] # get the comid of the downstream object off the django object, then use it to index these objects
except KeyError:
log.warning("No downstream segment for comid {}. If this is mid-network, it's likely a problem, but it most"
"likely means this is the outlet".format(segment.stream_segment.com_id))
for upstream in segment.stream_segment.directly_upstream.all():
try:
segment.upstream.append(self.stream_segments[upstream.com_id]) # then get the comid for the upstream item and use it to look up the item in this network
except KeyError:
log.warning("Missing upstream segment with comid {}. Likely means no flow data for segment, so it's left out."
"This could be a problem mid-network, but this most likely is a small headwaters tributary. You should"
"go look on a map.".format(upstream.com_id))
segment.stream_segment.ready_run() # attaches the benefit objects so that we can evaluate benefit
def set_segment_allocations(self, allocations, simplified=False):
# reset happens in segment.set_allocation
if not simplified:
allocations = numpy.reshape(allocations, (-1, 365))
allocation_index = 0
for segment in self.stream_segments.values():
segment.set_allocation(allocations[allocation_index])
allocation_index += 1
else:
for segment in self.stream_segments.values():
segment.set_allocation(numpy.array(allocations))
def get_benefits(self):
environmental_benefits = [segment.eflows_benefit for segment in self.stream_segments.values()]
eflow_benefit = numpy.sum(environmental_benefits)
economic_water_total = numpy.sum([segment.economic_water for segment in self.stream_segments.values()])
self.economic_benefit_calculator.units_of_water = economic_water_total
economic_benefit = self.economic_benefit_calculator.get_benefit()
#print("Available Water: {}".format(numpy.sum([segment._local_available for segment in self.stream_segments.values()])))
#print("Env Water, Ben: {}, {}".format(numpy.sum([segment.eflows_water for segment in self.stream_segments.values()]), eflow_benefit))
#print("Eco Water, Ben: {}, {}".format(economic_water_total, economic_benefit))
# we could return the individual benefits here, but we'll save that for another time
return {
"environmental_benefit": eflow_benefit,
"economic_benefit": economic_benefit,
}
def reset(self):
for segment in self.stream_segments.values():
segment.reset()
def dump_plots(self, output_folder, base_name, nfe, show_plots=False):
log.info("Dumping plots to {}".format(output_folder))
os.makedirs(output_folder, exist_ok=True)
for segment in self.stream_segments.values():
segment.plot_results_with_components(screen=show_plots, output_folder=output_folder, name_prefix=base_name)
with open(os.path.join(output_folder, "nfe_{}.txt".format(nfe)), 'w') as output_file:
output_file.write(str(nfe))
class StreamNetworkProblem(Problem):
"""
We need to subclass this because:
1) We want to save the HUCs so we don't load them every time - originally
did this as a closure, but we *also* would like a class for
2) Updating constraints for every solution. It's undocumented, but
Platypus allows for *functions* as constraints, so we'll actually
need a function that traverses the hydrologic network and returns 0 if
the solution is feasible and 1 if it's not.
Thinking that the constraint function will just traverse the network and make sure
that flow value in each HUC is less than or equal to the sum of that HUC's initial flow
plus everything coming from upstream.
"""
def __init__(self,
stream_network,
starting_water_price=800,
total_units_needed_factor=0.99,
objectives=2,
min_proportion=0,
simplified=False,
plot_output_folder=None,
*args):
"""
:param decision_variables: when this is set to None, it will use the number of HUCs as the number of decision
variables
:param objectives: default is two (total needs met, and min by species)
:param min_proportion: What is the minimum proportion of flow that we can allocate to any single segment? Raising
this value (min 0, max 0.999999999) prevents the model from extracting all its water in one spot.
:param args:
"""
self.stream_network = stream_network
self.stream_network.economic_benefit_calculator = economic_components.EconomicBenefit(starting_water_price,
total_units_needed=self.get_needed_water(total_units_needed_factor))
if simplified:
self.decision_variables = 365
self.simplified = True
else:
self.decision_variables = len(stream_network.stream_segments) * 365 # we need a decision variable for every stream segment and day - we'll reshape them later
self.simplified = False
self.iterations = []
self.objective_1 = []
self.objective_2 = []
self.best_obj1 = 0
self._best_obj2_for_obj1 = 0
self.best_obj2 = 0
self.plot_output_folder = plot_output_folder
log.info("Number of Decision Variables: {}".format(self.decision_variables))
super(StreamNetworkProblem, self).__init__(self.decision_variables, objectives, *args) # pass any arguments through
self.directions[:] = Problem.MAXIMIZE # we want to maximize all of our objectives
self.types[:] = Real(min_proportion, 1) # we now construe this as a proportion instead of a raw value
self.eflows_nfe = 0
def reset(self):
self.iterations = []
self.objective_1 = []
self.objective_2 = []
self.eflows_nfe = 0
def get_needed_water(self, proportion):
"""
Given a proportion of a basin's total water to extract, calculates the quantity
:return:
"""
log.info("Calculating total water to extract")
total_water = 0
all_flows = self.stream_network.model_run.daily_flows.filter(water_year=self.stream_network.water_year)
for flow in all_flows:
total_water += flow.estimated_local_flow
print("Total Water Available: {}".format(total_water))
return float(total_water) * proportion
def evaluate(self, solution):
"""
We want to evaluate a full hydrograph of values for an entire year
"""
if self.eflows_nfe % 5 == 0:
log.info("NFE (inside): {}".format(self.eflows_nfe))
self.eflows_nfe += 1
# attach allocations to segments here - doesn't matter what order we do it in, so long as it's consistent
self.stream_network.set_segment_allocations(allocations=solution.variables, simplified=self.simplified)
benefits = self.stream_network.get_benefits()
# set the outputs - platypus looks for these here.
solution.objectives[0] = benefits["environmental_benefit"]
solution.objectives[1] = benefits["economic_benefit"]
# tracking values
self.iterations.append(self.eflows_nfe)
self.objective_1.append(benefits["environmental_benefit"])
self.objective_2.append(benefits["economic_benefit"])
if self.plot_output_folder: # if we want to dump the best, then check the values and dump the network if it's better than what we've seen
if int(benefits["environmental_benefit"]) >= self.best_obj1: # these nested conditions *could* be simplified. If env benefit is the same, but economic is better, plot. If env is better on its own, plot
# we can dump for an environmental value that's tied for the best we've seen before *if* the economic value of it's better (AKA, it's nondominated)
if int(benefits["environmental_benefit"]) > self.best_obj1 or int(benefits["economic_benefit"]) > self._best_obj2_for_obj1:
self.stream_network.dump_plots(output_folder=os.path.join(self.plot_output_folder, "best", "env_{}_econ_{}".format(int(benefits["environmental_benefit"]), int(benefits["economic_benefit"]))),
base_name="{}_".format(int(benefits["environmental_benefit"])),
nfe=self.eflows_nfe)
self.best_obj1 = int(benefits["environmental_benefit"])
self.best_obj2_for_obj1 = int(benefits["economic_benefit"])
elif benefits["economic_benefit"] > (self.best_obj2 * 1.005): # don't dump every economic output - it changes frequently. It needs to improve a bit before we dump it.
self.stream_network.dump_plots(output_folder=os.path.join(self.plot_output_folder, "best", "econ_{}_env{}".format(int(benefits["economic_benefit"]), int(benefits["environmental_benefit"]))),
base_name="{}_".format(int(benefits["economic_benefit"])),
nfe=self.eflows_nfe)
self.best_obj2 = benefits["economic_benefit"]
class HUCNetworkProblem(Problem):
"""
We need to subclass this because:
1) We want to save the HUCs so we don't load them every time - originally
did this as a closure, but we *also* would like a class for
2) Updating constraints for every solution. It's undocumented, but
Platypus allows for *functions* as constraints, so we'll actually
need a function that traverses the hydrologic network and returns 0 if
the solution is feasible and 1 if it's not.
Thinking that the constraint function will just traverse the network and make sure
that flow value in each HUC is less than or equal to the sum of that HUC's initial flow
plus everything coming from upstream.
"""
def __init__(self, decision_variables=None, objectives=2, *args):
"""
:param decision_variables: when this is set to None, it will use the number of HUCs as the number of decision
variables
:param objectives: default is two (total needs met, and min by species)
:param args:
"""
self.hucs = models.HUC.objects.all()
if not decision_variables:
self.decision_variables = models.HUC.objects.count()
else:
self.decision_variables = decision_variables
self.iterations = []
self.objective_1 = []
self.objective_2 = []
log.info("Number of Decision Variables: {}".format(self.decision_variables))
super(HUCNetworkProblem, self).__init__(self.decision_variables, objectives, nconstrs=1) # pass any arguments through
self.directions[:] = Problem.MAXIMIZE # we want to maximize all of our objectives
self.feasible = 1 # 1 = infeasible, 0 = feasible - store the value here because we'll reference it layer in a closure
self.eflows_nfe = 0
self.setUp()
def setUp(self,):
"""
On top of init, let's make something that actually does the setup when we're ready to.
This would also be used when resetting a run or something
:return:
"""
self.make_constraint()
self.set_types()
self.feasible = 1 # 1 = infeasible, 0 = feasible - store the value here because we'll reference it layer in a closure
available_species = {}
for huc in self.hucs: # prepopulate all the species so we can skip a condition later - don't use all species because it's possible that some won't be present. Only use the species in all the hucs
for species in huc.assemblage.all():
available_species[species.common_name] = 1
self.available_species = available_species.keys()
log.debug("Total Species in Area: {}".format(len(available_species.keys())))
self.eflows_nfe = 0
def make_constraint(self):
def constraint_function(value):
"""
We want this here so it's a closure and the value from the class is in-scope without a "self"
:return:
"""
return self.feasible # this will be set during objective evaluation later
self.constraints[:] = constraint_function
def set_types(self):
"""
Sets the type of each decision variable and makes it the max, should be in the same order that we
assign flows out later, so the max values should allign with the allocations that come in.
:return:
"""
allocation_index = 0
hucs = self.hucs
for huc in hucs:
self.types[allocation_index] = Real(0, huc.max_possible_flow)
allocation_index += 1
def set_huc_allocations(self, allocations):
allocation_index = 0
hucs = self.hucs
for huc in hucs:
try:
huc.flow_allocation = allocations[allocation_index]
except IndexError:
log.error("Size mismatch between number of HUCs and number of allocations - either"
"too many HUCs are loaded in the database, or there are too few decision"
"variables receiving allocations")
raise
allocation_index += 1
# huc.save() # let's see if we can skip this - lots of overhead in it.from
def evaluate(self, solution):
"""
Alternatively, could build this so that it reports the number of hucs, per species and we construct our
problem to be ready for that - we might not want that for actual use though, because that would lead to
way too many resulting variables on the pareto front, etc, and would prevent a true tradeoff with economics.
Options for this initial project :
1) Average across the entire system and min needs met (min of the # hucs per species)
- that way we can see the overall benefit, but also make sure it's not zeroing out species to get there
:param allocations:
:return:
"""
if self.eflows_nfe % 5 == 0:
log.info("NFE (inside): {}".format(self.eflows_nfe))
self.eflows_nfe += 1
# attach allocations to HUCs here - doesn't matter what order we do it in,
# so long as it's consistent
self.set_huc_allocations(allocations=solution.variables)
# initialize code to track how many flow needs are met per species
met_needs = {}
for species in self.available_species:
met_needs[species] = 0
### TODO: REWORK THIS SLIGHTLY FOR BOTH MINIMUM AND MAXIMUM FLOW - DON'T THINK IT'LL WORK AS IS.
# Iterate through assemblages for all HUCs and evaluate which flow needs have been met.
for huc in self.hucs:
for species in huc.assemblage.all(): # for every species
needs = []
for component in models.SpeciesComponent.objects.filter(species=species, component__name="min_flow"):
needs.append(component.value*component.threshold)
needs = numpy.array(needs)
met_needs[species.common_name] += (needs < huc.flow_allocation).sum() # / species.components.count()
# determine objective values
all_met = sum([met_needs[species] for species in met_needs])
min_met_needs = min([met_needs[species]/models.Species.objects.get(common_name=species).presence.count() for species in met_needs])
self.check_constraints() # run it now - it'll set a flag that'll get returned by the constraint function
log.debug("Feasibility: {}".format("Feasible" if self.feasible == 0 else "Infeasible"))
# set the outputs - platypus looks for these here.
solution.objectives[0] = all_met
solution.objectives[1] = min_met_needs # the total number of needs met
#solution.constraints[:self.decision_variables+1] = 99 # TODO: THIS MIGHT BE WRONG - THIS SET OF CONSTRAINTS MIGHT NOT
# FOLLOW THE 0/1 feasible/infeasible pattern - should confirm
# tracking values
self.iterations.append(self.eflows_nfe)
self.objective_1.append(all_met)
self.objective_2.append(min_met_needs)
def check_constraints(self):
"""
Just pseudocode now. This function should take as a parameter a watershed network. That network should be created
and just return the indexes of the item and its upstream hucs in the allocation list. Then it can just subset
and sum the list to get the total allocation, and compare that to the initial total allocation available for that
same set of HUCs (same process - subset and sum inital allocations).
Constraints:
1) Current HUC can't use more than unused total water upstream + current HUC water.
2) Current HUC + all upstream HUCs can't use more than total water upstream + current HUC water
Other approach would be to zero out allocations, then go through and actually calculate the water available
by summing the upstream allocations minus the used water, then just check each HUC against its allocation.
The above is maybe simpler (and faster?), but maybe more prone to a logic error and less explicit. Must be
documented regardless. The second approach would scale better to future constraints, where we loop through,
calculate some parameters on each HUC, and then check the values against each HUC's constraints. We'll need
some other logic changes before we do that, but they shouldn't be too bad.
indexing code can happen before this and follow prior patterns.
Also, initial available values should just come from zonal stats on a BCM raster. Low testing could be a summer
flow and high testing a winter flow
Need to run the constraint here once because when we check constraints, we won't be able
to tell which item it's for, and it'll be run many times. We'll evaluate the network here, then set
the constraint function to be a closure with access to the instance's constraint validity
variable.
:return:
"""
## TODO: WHY ARE WE TREATING ENVIRONMENTAL FLOWS AS CONSUMPTIVE RELATIVE TO OTHER EFLOWS.
## TODO: THEY SHOULD BE CONSUMPTIVE RELATIVE TO ECONOMIC USES, BUT NOT TO OTHER EFLOWS.
for huc in self.hucs:
upstream_available = huc.upstream_total_flow
upstream_used = sum([up_huc.flow_allocation for up_huc in huc.upstream.all() if up_huc.flow_allocation is not None])
# first check - mass balance - did it allocate more water than is available somewhere in the system?
if (upstream_used + huc.flow_allocation) > (upstream_available + huc.initial_available_water):
log.debug("Infeasible HUC: {}".format(huc.huc_id))
log.debug("HUC Initial Available: {}".format(huc.initial_available_water))
log.debug("HUC Allocation: {}".format(huc.flow_allocation))
log.debug("Upstream Available: {}".format(upstream_available))
log.debug("Upstream Used: {}".format(upstream_used))
self.feasible = 1 # infeasible
return 1
# second check - is the current huc using more than is available *right here*?
# I think this condition, as written, is the same as above - never triggered
#if huc.flow_allocation > (upstream_available + huc.initial_available_water - upstream_used):
# self.feasible = 1 # infeasible
# log.debug("infeasible 2")
# return
# for now, if those two constraints are satisfied for all HUCs, then we're all set - set the contstraint
# as valid (0)
self.feasible = 0
return 0
| 42.691655 | 204 | 0.736872 | 29,708 | 0.984263 | 0 | 0 | 1,510 | 0.050028 | 0 | 0 | 15,358 | 0.508829 |
379bf3571cc579f5ca16b10bc21ccfcf8dbbd6bb | 3,146 | py | Python | tests/test_booking.py | muthash/FlightBooking-Flask | 77b157098d618582737979382197e5302d347017 | [
"MIT"
] | 1 | 2022-03-28T16:37:17.000Z | 2022-03-28T16:37:17.000Z | tests/test_booking.py | muthash/FlightBooking-Flask | 77b157098d618582737979382197e5302d347017 | [
"MIT"
] | null | null | null | tests/test_booking.py | muthash/FlightBooking-Flask | 77b157098d618582737979382197e5302d347017 | [
"MIT"
] | 1 | 2019-10-08T17:48:50.000Z | 2019-10-08T17:48:50.000Z | """Test case for the booking creation functionality"""
import os
import json
from datetime import datetime
from tests.base_test import BaseTestCase
class TestBookingManipulation(BaseTestCase):
"""Test for Booking manipulation endpoint"""
def crate_flight(self):
self.admin_login()
self.client.post('api/airport',
headers=self.header,
data=json.dumps(self.airport_data))
self.client.post('api/airport',
headers=self.header,
data=json.dumps(self.arrival_airport_data))
self.client.post('api/airplane',
headers=self.header,
data=json.dumps(self.airplane_data))
self.client.post('api/flight',
headers=self.header,
data=json.dumps(self.flight_data))
def make_booking(self, seat):
self.get_login_token()
return self.client.post('api/booking/1',
headers=self.header,
data=json.dumps(dict(seat=seat)))
def test_economy_seat_booking(self):
"""Test making booking for economy seat works correcty"""
self.crate_flight()
res = self.make_booking(1)
result = json.loads(res.data.decode())
self.assertEqual(result['message'],
"Economy seat flight reservation successfull")
self.assertEqual(res.status_code, 201)
def test_business_seat_booking(self):
"""Test making booking for business seat works correcty"""
self.crate_flight()
res = self.make_booking(2)
result = json.loads(res.data.decode())
self.assertEqual(result['message'],
"Business seat flight reservation successfull")
self.assertEqual(res.status_code, 201)
def test_booking_unavailable_flight(self):
"""Test making booking for non existing flight"""
res = self.make_booking(1)
result = json.loads(res.data.decode())
self.assertEqual(result['message'],
"Selected flight not available")
self.assertEqual(res.status_code, 400)
def test_get_daily_bookings(self):
"""Test getting a list of all reservations in a given day"""
self.crate_flight()
self.make_booking(1)
self.make_booking(2)
self.admin_login()
res = self.client.get('api/booking/1',
headers=self.header)
result = json.loads(res.data.decode())
self.assertEqual(result['number_of_booking'], 2)
self.assertEqual(res.status_code, 200)
def test_get_unavaillable_flight_daily_bookings(self):
"""Test getting a booking for non existing flight"""
self.admin_login()
res = self.client.get('api/booking/1',
headers=self.header)
result = json.loads(res.data.decode())
self.assertEqual(result['message'],
"Selected flight not available")
self.assertEqual(res.status_code, 400)
| 39.325 | 72 | 0.593452 | 2,993 | 0.951367 | 0 | 0 | 0 | 0 | 0 | 0 | 679 | 0.21583 |
379cf4a3f41a62bc5af342e9b10bafd901889714 | 1,875 | py | Python | src/constants.py | tomasmikeska/face-identification | 15a65c66f840e183f83119dba35488607a4ff0b2 | [
"MIT"
] | 5 | 2019-06-24T16:22:28.000Z | 2020-10-02T21:58:44.000Z | src/constants.py | tomasmikeska/face-identification | 15a65c66f840e183f83119dba35488607a4ff0b2 | [
"MIT"
] | 2 | 2020-11-09T09:24:42.000Z | 2020-11-09T09:24:52.000Z | src/constants.py | tomasmikeska/face-identification | 15a65c66f840e183f83119dba35488607a4ff0b2 | [
"MIT"
] | 2 | 2020-03-03T15:58:27.000Z | 2020-05-07T11:46:30.000Z | import os
from utils import relative_path
# Hyperparams
ARCFACE_M = 0.5
ARCFACE_S = 10.
CENTERLOSS_ALPHA = 0.008
CENTERLOSS_LAMBDA = 0.5
EMBEDDING_SIZE = 256
MIN_FACES_PER_PERSON = 5 # Min num of samples per class - or class is removed
MAX_FACES_PER_PERSON = 200 # Max num of samples per class - additional samples are removed
MIN_FACES_UNSAMPLE = 5 # All classes with lower num of samples are upscaled to this num of samples
DEV_FACES_PER_PERSON = 2 # Number of images per person in dev data
BATCH_SIZE = 256
EPOCHS = 50
TARGET_IMG_WIDTH = 96
TARGET_IMG_HEIGHT = 112
MIN_IMG_WIDTH = TARGET_IMG_WIDTH # no image upscale allowed
MIN_IMG_HEIGHT = TARGET_IMG_HEIGHT # no image upscale allowed
INPUT_SHAPE = (TARGET_IMG_HEIGHT, TARGET_IMG_WIDTH, 3)
# Paths
MODEL_SAVE_PATH = os.environ.get('MODEL_SAVE_PATH', relative_path('../model/'))
VGG_TRAIN_PATH = os.environ.get('VGG_DATASET', relative_path('../data/VGGFace2/')) + '/train/'
VGG_TEST_PATH = os.environ.get('VGG_DATASET', relative_path('../data/VGGFace2/')) + '/test/'
VGG_BB_TRAIN_MAP = os.environ.get('BB_TRAIN', relative_path('../data/vggface_bb_landmark/loose_bb_train.csv'))
VGG_BB_TEST_MAP = os.environ.get('BB_TEST', relative_path('../data/vggface_bb_landmark/loose_bb_test.csv'))
CASIA_PATH = os.environ.get('CASIA_DATASET', relative_path('../data/CASIA-WebFace/'))
CASIA_BB_MAP = os.environ.get('CASIA_BB', relative_path('../data/casia_landmark.csv'))
LFW_PATH = os.environ.get('LFW_DATASET', relative_path('../data/lfw/'))
LFW_BB_MAP = os.environ.get('LFW_BB', relative_path('../data/lfw_landmark.csv'))
LFW_PAIRS_PATH = os.environ.get('LFW_PAIRS', relative_path('../data/lfw_pairs.txt'))
| 55.147059 | 121 | 0.678933 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 698 | 0.372267 |
379e57a273cd4876cffb73afd2c43311830a75c5 | 86 | py | Python | consent/__init__.py | alekosot/django-consent | 545363978e5ef9e404e633f198d94295b5aa384b | [
"MIT"
] | 1 | 2019-09-25T06:37:45.000Z | 2019-09-25T06:37:45.000Z | consent/__init__.py | d0ugal/django-consent | 8b72b487ace0a09e59962646ddb63b95796ca55a | [
"MIT"
] | null | null | null | consent/__init__.py | d0ugal/django-consent | 8b72b487ace0a09e59962646ddb63b95796ca55a | [
"MIT"
] | null | null | null | # following PEP 386, versiontools will pick it up
__version__ = (0, 2, 0, "final", 0)
| 28.666667 | 49 | 0.686047 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.651163 |
37a15290b1671fa8d820f9c2d35d5c79d583a607 | 229 | py | Python | sample_test.py | tynski/sample_package | ab0fac6ea8cd14ebd3cbdf2666dcb9560efe31ea | [
"MIT"
] | null | null | null | sample_test.py | tynski/sample_package | ab0fac6ea8cd14ebd3cbdf2666dcb9560efe31ea | [
"MIT"
] | null | null | null | sample_test.py | tynski/sample_package | ab0fac6ea8cd14ebd3cbdf2666dcb9560efe31ea | [
"MIT"
] | null | null | null | import unittest
from sample_package.sub_package1 import my_sum
class TestSamplePackage(unittest.TestCase):
def test_my_sum(self):
self.assertEqual(my_sum([7,9,1]),17)
if __name__ == '__main__':
unittest.main()
| 20.818182 | 46 | 0.733624 | 115 | 0.502183 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.043668 |
37a1dff781641c0f7cb06a0378062d227d3b4139 | 1,441 | py | Python | tests/test_processor.py | manslogic/rasa_core | 17c82e6be052fc147caef9a9914d06f79a944687 | [
"Apache-2.0"
] | 1 | 2017-12-27T04:07:24.000Z | 2017-12-27T04:07:24.000Z | tests/test_processor.py | jenish-cj/botnlufoodrest | b41aa2c7a1f6e492e10f07e67562b612b5b13a53 | [
"Apache-2.0"
] | null | null | null | tests/test_processor.py | jenish-cj/botnlufoodrest | b41aa2c7a1f6e492e10f07e67562b612b5b13a53 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from rasa_core.channels import UserMessage
from rasa_core.channels.direct import CollectingOutputChannel
from rasa_core.featurizers import BinaryFeaturizer
from rasa_core.interpreter import RegexInterpreter
from rasa_core.channels.console import ConsoleOutputChannel
from rasa_core.policies import PolicyTrainer
from rasa_core.policies.ensemble import SimplePolicyEnsemble
from rasa_core.policies.scoring_policy import ScoringPolicy
from rasa_core.processor import MessageProcessor
from rasa_core.tracker_store import InMemoryTrackerStore
def test_message_processor(default_domain, capsys):
story_filename = "data/dsl_stories/stories_defaultdomain.md"
ensemble = SimplePolicyEnsemble([ScoringPolicy()])
interpreter = RegexInterpreter()
PolicyTrainer(ensemble, default_domain, BinaryFeaturizer()).train(
story_filename,
max_history=3)
tracker_store = InMemoryTrackerStore(default_domain)
processor = MessageProcessor(interpreter,
ensemble,
default_domain,
tracker_store)
out = CollectingOutputChannel()
processor.handle_message(UserMessage("_greet[name=Core]", out))
assert ("default", "hey there Core!") == out.latest_output()
| 40.027778 | 70 | 0.764053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.061069 |
37a2b2de5c6cf3d0a5e50389fe28c3eb4acbc10c | 4,609 | py | Python | metrics.py | juanmc2005/continual-cross-lingual-nlu | ce2a01ddaa8754404f3f6b5b0fe81953c8a6951f | [
"MIT"
] | null | null | null | metrics.py | juanmc2005/continual-cross-lingual-nlu | ce2a01ddaa8754404f3f6b5b0fe81953c8a6951f | [
"MIT"
] | null | null | null | metrics.py | juanmc2005/continual-cross-lingual-nlu | ce2a01ddaa8754404f3f6b5b0fe81953c8a6951f | [
"MIT"
] | null | null | null | import uuid
from typing import Dict, List, Text, Union
import pandas as pd
import torch
from datasets import load_metric
from pytorch_lightning.metrics import Metric
from seqeval.metrics import classification_report
# MIT License
#
# Copyright (c) 2021 Université Paris-Saclay
# Copyright (c) 2021 Laboratoire national de métrologie et d'essais (LNE)
# Copyright (c) 2021 CNRS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from dataset import LabelEncoding
UNIQUE_RUN_ID = str(uuid.uuid4())
def cat_labels(old: List[List[Text]], new: List[List[Text]]) -> List[List[Text]]:
"""
Custom concatenation of lists to keep the
state of the metric as lists of lists.
"""
old.extend(new)
return old
class SlotF1(Metric):
"""
A PyTorch Lightning metric to calculate slot filling F1 score using the seqeval script.
The seqeval script is used via the Huggingface metrics interface.
"""
def __init__(
self,
label_encoding: LabelEncoding,
ignore_index: int,
dist_sync_on_step=False,
name_or_path: str = 'seqeval',
compute_report: bool = False
):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.encoding = label_encoding
self.ignore_index = ignore_index
self.seqeval = load_metric(name_or_path, experiment_id=UNIQUE_RUN_ID)
self.compute_report = compute_report
self.add_state("predictions", default=[], dist_reduce_fx=cat_labels)
self.add_state("targets", default=[], dist_reduce_fx=cat_labels)
def update(self, predictions: torch.Tensor, targets: torch.Tensor):
"""
Update internal state with a new batch of predictions and targets.
This function is called automatically by PyTorch Lightning.
:param predictions: Tensor, shape (batch_size, seq_len, num_slot_labels)
Model predictions per token as (log) softmax scores.
:param targets: Tensor, shape (batch_size, seq_len)
Slot filling ground truth per token encoded as integers.
"""
# Get hard predictions
predictions = torch.argmax(predictions, dim=-1)
# Transform to list since it needs to deal with different sequence lengths
predictions = predictions.tolist()
targets = targets.tolist()
# Remove ignored predictions (special tokens and possibly subtokens)
true_predictions = [
[self.encoding.get_slot_label_name(p) for (p, l) in zip(pred, label) if l != self.ignore_index]
for pred, label in zip(predictions, targets)
]
true_targets = [
[self.encoding.get_slot_label_name(l) for (p, l) in zip(pred, label) if l != self.ignore_index]
for pred, label in zip(predictions, targets)
]
# Add predictions and labels to current state
self.predictions += true_predictions
self.targets += true_targets
def compute(self) -> Union[torch.Tensor, Dict]:
"""
Compute the Slot F1 score using the current state.
"""
results = self.seqeval.compute(predictions=self.predictions, references=self.targets)
# overall_precision, overall_recall and overall_accuracy are also available
f1 = torch.tensor(results["overall_f1"])
if self.compute_report:
report = classification_report(
y_true=self.targets, y_pred=self.predictions, output_dict=True
)
return {"f1": f1, "report": pd.DataFrame(report).transpose()}
else:
return f1
| 41.151786 | 107 | 0.69169 | 2,876 | 0.623726 | 0 | 0 | 0 | 0 | 0 | 0 | 2,317 | 0.502494 |
37a3fb8a1b065ee6603e032e39ed5aad8ab6c268 | 430 | py | Python | EulerFour.py | vanigupta20024/Programming-Challenges | 578dba33e9f6b04052a503bcb5de9b32f33494a5 | [
"MIT"
] | 14 | 2020-10-15T21:47:18.000Z | 2021-12-01T06:06:51.000Z | EulerFour.py | vanigupta20024/Programming-Challenges | 578dba33e9f6b04052a503bcb5de9b32f33494a5 | [
"MIT"
] | null | null | null | EulerFour.py | vanigupta20024/Programming-Challenges | 578dba33e9f6b04052a503bcb5de9b32f33494a5 | [
"MIT"
] | 4 | 2020-06-15T14:40:45.000Z | 2021-06-15T06:22:03.000Z | # Project Euler - Problem 4
# Find the largest palindrome made from the product of two 3-digit numbers.
import time
start = time.time()
def pal(s):
i = 0
j = len(s) - 1
while i < j:
if s[i] != s[j]:
return 0
i += 1
j -= 1
return 1
n1 = 100
n2 = 1000 # exclusive
mx = 0
for i in range(n1, n2):
for j in range(n1, n2):
if pal(str(i * j)) and mx < i * j:
mx = i * j
print(mx)
print(time.time() - start, "sec")
| 15.925926 | 75 | 0.583721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.274419 |
37a4e47052a2d1253f3384427f9c90bfebcfbf03 | 4,393 | py | Python | tests/testing/helpers/test_assert_function_call_count.py | munichpavel/tubular | 53e277dea2cc869702f2ed49f2b495bf79b92355 | [
"BSD-3-Clause"
] | null | null | null | tests/testing/helpers/test_assert_function_call_count.py | munichpavel/tubular | 53e277dea2cc869702f2ed49f2b495bf79b92355 | [
"BSD-3-Clause"
] | null | null | null | tests/testing/helpers/test_assert_function_call_count.py | munichpavel/tubular | 53e277dea2cc869702f2ed49f2b495bf79b92355 | [
"BSD-3-Clause"
] | null | null | null | import pytest
import tubular
import tubular.testing.helpers as h
import tubular.testing.test_data as d
def test_arguments():
"""Test tubular.testing.helpers.assert_function_call_count has expected arguments."""
# use of contextmanager decorator means we need to use .__wrapped__ to get back to original function
h.test_function_arguments(
func=h.assert_function_call_count.__wrapped__,
expected_arguments=["mocker", "target", "attribute", "expected_n_calls"],
expected_default_values=None,
)
def test_mocker_arg_not_mocker_fixture_error():
"""Test an exception is raised if mocker is not pytest_mock.plugin.MockerFixture type."""
with pytest.raises(
TypeError, match="mocker should be the pytest_mock mocker fixture"
):
df = d.create_df_1()
x = tubular.base.BaseTransformer(columns="a")
with h.assert_function_call_count(
"aaaaaa", tubular.base.BaseTransformer, "columns_set_or_check", 1
):
x.fit(X=df)
def test_mocker_patch_object_call(mocker):
"""Test the mocker.patch.object call."""
mocked = mocker.spy(mocker.patch, "object")
with h.assert_function_call_count(
mocker,
tubular.base.BaseTransformer,
"__init__",
1,
return_value=None,
):
tubular.imputers.BaseImputer("a", other=1)
assert mocked.call_count == 1, "unexpected number of calls to mocker.patch.object"
mocker_patch_object_call = mocked.call_args_list[0]
call_pos_args = mocker_patch_object_call[0]
call_kwargs = mocker_patch_object_call[1]
assert call_pos_args == (
tubular.base.BaseTransformer,
"__init__",
), "unexpected positional args in mocker.patch.object call"
assert call_kwargs == {
"return_value": None
}, "unexpected kwargs in mocker.patch.object call"
def test_successful_usage(mocker):
"""Test an example of successful run of h.assert_function_call_count."""
df = d.create_df_1()
x = tubular.base.BaseTransformer(columns="a")
with h.assert_function_call_count(
mocker, tubular.base.BaseTransformer, "columns_set_or_check", 1
):
x.fit(X=df)
def test_exception_raised_more_calls_expected(mocker):
"""Test an exception is raised in the case more calls to a function are expected than happen."""
with pytest.raises(
AssertionError,
match="incorrect number of calls to columns_set_or_check, expected 2 but got 1",
):
df = d.create_df_1()
x = tubular.base.BaseTransformer(columns="a")
with h.assert_function_call_count(
mocker, tubular.base.BaseTransformer, "columns_set_or_check", 2
):
x.fit(X=df)
def test_exception_raised_more_calls_expected2(mocker):
"""Test an exception is raised in the case more calls to a function are expected than happen."""
with pytest.raises(
AssertionError,
match="incorrect number of calls to __init__, expected 4 but got 0",
):
df = d.create_df_1()
x = tubular.base.BaseTransformer(columns="a")
with h.assert_function_call_count(
mocker, tubular.base.BaseTransformer, "__init__", 4
):
x.fit(X=df)
def test_exception_raised_less_calls_expected(mocker):
"""Test an exception is raised in the case less calls to a function are expected than happen."""
with pytest.raises(
AssertionError,
match="incorrect number of calls to columns_set_or_check, expected 1 but got 2",
):
df = d.create_df_1()
x = tubular.base.BaseTransformer(columns="a")
with h.assert_function_call_count(
mocker, tubular.base.BaseTransformer, "columns_set_or_check", 1
):
x.fit(X=df)
x.fit(X=df)
def test_exception_raised_less_calls_expected2(mocker):
"""Test an exception is raised in the case less calls to a function are expected than happen."""
with pytest.raises(
AssertionError,
match="incorrect number of calls to columns_set_or_check, expected 0 but got 1",
):
df = d.create_df_1()
x = tubular.base.BaseTransformer(columns="a")
with h.assert_function_call_count(
mocker, tubular.base.BaseTransformer, "columns_set_or_check", 0
):
x.fit(X=df)
| 28.160256 | 104 | 0.671068 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,489 | 0.338948 |
37a645c289e6906ab538b59230d264d2e38c959a | 5,002 | py | Python | portfolio/Python/scrapy/petsafe/petstreetmallcom.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | null | null | null | portfolio/Python/scrapy/petsafe/petstreetmallcom.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | null | null | null | portfolio/Python/scrapy/petsafe/petstreetmallcom.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | 5 | 2016-03-22T07:40:46.000Z | 2021-05-30T16:12:21.000Z | from csv import DictReader
from petsafeconfig import CSV_FILENAME
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from product_spiders.items import Product, ProductLoader
import logging
class PetstreetmallComSpider(BaseSpider):
name = 'petstreetmall.com'
allowed_domains = ['petstreetmall.com']
start_urls = ()
site_name_csv = 'petstreetmall.com'
def start_requests(self):
products = []
with open(CSV_FILENAME, 'rb') as csv_file:
csv_reader = DictReader(csv_file)
for row in csv_reader:
if row['Retailer'] == self.site_name_csv and row['Link'] != '':
products.append((row['SKU'].strip(), row['Link'].strip(), row['Notes'].strip(), row['Name of Product'].strip().decode('utf-8')))
for sku, url, notes, name in products:
yield Request(url, self.parse, meta={'sku': sku, 'notes': notes, 'name': name}, dont_filter=True)
def parse(self, response):
hxs = HtmlXPathSelector(response)
url = response.url
sku = response.meta['sku']
sec_sku = response.meta['notes']
name = response.meta['name'].encode('ascii', 'ignore')
main_product = hxs.select("//div[@id='Product-MainProduct']")
main_products = hxs.select("//div[@id='Product-MainProductContainer']//div[@class='Product-SubProduct']")
secondary_products = hxs.select("//div[@id='Product-SubProductContainer']//div[@class='Product-SubProduct']")
main_product_sku = main_product.select("div[@id='Product-lblItem']/span[@id='lblItem']/text()").extract()
if not main_product_sku:
logging.error("NO MAIN SKU! %s" % url)
else:
main_product_sku = main_product_sku[0]
if main_product_sku == sku or main_product_sku == sec_sku:
# extract main product
price = main_product.select(".//div[@class='Product-Price']/span[@id='lblClubPrice']/b/font/text()").re("\$(.*)")
if not price:
logging.error('ERROR!! NO PRICE!! %s "%s" "%s"' % (sku, name, url))
return
price = price[0].strip()
product = Product()
loader = ProductLoader(item=product, response=response, selector=hxs)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
loader.add_value('sku', sku)
yield loader.load_item()
return
elif main_products:
for product in main_products:
product_sku = product.select("div[@class='Product-SubProductNumber']/font/text()").re("#(.+)")
if not product_sku:
logging.error("NO MAIN SKU! %s" % url)
else:
product_sku = product_sku[0]
if product_sku == sku or product_sku == sec_sku:
# extract secondary product
price = product.select(".//span[contains(@id, 'lblClubPrice')]/b/font/text()").re("\$(.*)")
if not price:
logging.error('ERROR!! NO SEC PRICE!! %s "%s" "%s"' % (sku, name, url))
return
price = price[0].strip()
product = Product()
loader = ProductLoader(item=product, response=response, selector=hxs)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
loader.add_value('sku', sku)
yield loader.load_item()
return
elif secondary_products:
for product in secondary_products:
product_sku = product.select("div[@class='Product-SubProductNumber']/text()").re("#(.+)")
if not product_sku:
logging.error("NO SECONDARY SKU! %s" % url)
else:
product_sku = product_sku[0]
if product_sku == sku or product_sku == sec_sku:
# extract secondary product
price = product.select(".//span[contains(@id, 'lblClubPrice2')]/b/font/text()").re("\$(.*)")
if not price:
logging.error('ERROR!! NO SEC PRICE!! %s "%s" "%s"' % (sku, name, url))
return
price = price[0].strip()
product = Product()
loader = ProductLoader(item=product, response=response, selector=hxs)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
loader.add_value('sku', sku)
yield loader.load_item()
return
else:
logging.error("No products found!")
| 41.683333 | 148 | 0.541583 | 4,743 | 0.948221 | 4,554 | 0.910436 | 0 | 0 | 0 | 0 | 1,059 | 0.211715 |
37a716de9ac1554b60a3ff8e3c6b5f25ee3aacd8 | 897 | py | Python | app.py | elben10/corona-dashboard | ce3be765ee560b9cfec364f3dca32cc804776b8a | [
"MIT"
] | null | null | null | app.py | elben10/corona-dashboard | ce3be765ee560b9cfec364f3dca32cc804776b8a | [
"MIT"
] | 1 | 2021-05-11T07:29:24.000Z | 2021-05-11T07:29:24.000Z | app.py | elben10/corona-dashboard | ce3be765ee560b9cfec364f3dca32cc804776b8a | [
"MIT"
] | null | null | null | import dash
from flask_caching import Cache
EXTERNAL_SCRIPTS = [
"https://code.jquery.com/jquery-3.4.1.slim.min.js",
"https://cdn.jsdelivr.net/npm/popper.js@1.16.0/dist/umd/popper.min.js",
"https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/js/bootstrap.min.js",
]
EXTERNAL_STYLESHEETS = [
"https://fonts.googleapis.com/css?family=Nunito:200,200i,300,300i,400,400i,600,600i,700,700i,800,800i,900,900i",
"https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css",
"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.12.1/css/all.min.css",
]
app = dash.Dash(
__name__,
external_scripts=EXTERNAL_SCRIPTS,
external_stylesheets=EXTERNAL_STYLESHEETS,
)
server = app.server
app.config.suppress_callback_exceptions = True
cache = Cache(server, config={
'CACHE_TYPE': 'filesystem',
'CACHE_DIR': 'cache-directory'
})
TIMEOUT = 60 * 60 * 6
| 29.9 | 116 | 0.721293 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 505 | 0.562988 |
37a77af1f8414e70cf99ebbeef6b921b7ebf8a25 | 10,983 | py | Python | scripts/active_inference.py | tud-cor/jackal_active_inference_versus_kalman_filter | 406a110bec05967d2b158f9f0d0be703e473ab69 | [
"Apache-2.0"
] | 4 | 2020-03-29T01:41:28.000Z | 2021-05-29T06:04:29.000Z | scripts/active_inference.py | tud-cor/jackal_active_inference_versus_kalman_filter | 406a110bec05967d2b158f9f0d0be703e473ab69 | [
"Apache-2.0"
] | 2 | 2020-01-09T16:20:45.000Z | 2021-01-29T11:32:16.000Z | scripts/active_inference.py | tud-cor/jackal_active_inference_versus_kalman_filter | 406a110bec05967d2b158f9f0d0be703e473ab69 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
###########################################################################
# Active Inference algorithm
#
# Execute the AI algorithm using the data from the
# /filter/y_coloured_noise topic and publish the results to the
# /filter/ai/output topic.
# Note that only the filtering part of the AI algorithm is implemented yet.
#
# Author: Dennis Benders, TU Delft
# Last modified: 17.11.2019
#
###########################################################################
#Import all necessary packages
import rospy #needed to be able to program in Python
import numpy as np #needed to be able to work with numpy
import time #needed to be able to get the execution time of code parts
from scipy.linalg import toeplitz #needed to create derivative matrix in general way
from scipy.linalg import block_diag #needed to create the block-diagonal PI matrix
from jackal_active_inference_versus_kalman_filter.msg import gazebo_model_states_noise #needed to read the custom output messages gazebo_model_states_noise
from jackal_active_inference_versus_kalman_filter.msg import filt_output #needed to publish the custom output messages filt_output resulting from the filtering methods
#TODO:
#-finish the implementation with a correct usage of the learning rate, precision matrices and prior
#-implement the update rule for the next control input
#-extend the algorithm to work on all system model states
#-use IMU data in case of experiment with Jackal robot
#Active Inference class
#-------------------------------------------------------------------
class AI(object):
"""Class providing all AI functionality:
- initialization of all necessary matrices
- compute belief mu
- compute control action u"""
def __init__(self, n_states, n_inputs, n_outputs, p, x_ref):
super(AI, self).__init__()
#Input processing
self.p = p
#Indicating the first time AI function is called
self.first_time = True
#System dimensions
self.n_states = n_states
self.n_inputs = n_inputs
self.n_outputs = n_outputs
#Initial states
self.x_0 = np.matrix(np.zeros(shape = (self.n_states, 1)))
self.mu_0 = np.matrix(np.zeros(shape = ((1 + self.p) * self.n_states, 1)))
self.mu = self.mu_0
self.mu_dot = np.matrix(np.zeros(shape = ((1 + self.p) * self.n_states, 1)))
#Initial system input (u) and output (z)
self.u = np.matrix(np.zeros(shape = (self.n_inputs, 1)))
self.z = np.matrix(np.zeros(shape = (self.n_outputs, 1)))
#Derivative matrix
self.Der = np.kron(np.eye((1 + self.p), k = 1), np.matrix(np.eye(self.n_states)))
#Learning rates #TODO: tune these values when correct usage of precision matrices is known
self.alpha_mu = 3.408*10**(-6)
# self.alpha_u = 0.01
#System matrices
self.A = -209.6785884514270
self.A_tilde = np.kron(np.eye(1 + self.p), self.A)
self.B = np.matrix('16.921645797507500 -16.921645797507500')
self.C = 1
self.C_tilde = np.kron(np.matrix(np.eye(1 + self.p)), self.C)
#Initial reference path (needed for prior belief): assuming no prior belief should be given
self.x_ref = x_ref
temp = np.matrix(np.zeros(shape = ((1 + self.p), 1)))
temp[0] = 1
self.mu_ref = np.kron(temp, self.x_ref) #this assumes that reference acceleration of the robot will always be zero (the reference velocity constant)!
self.xi = self.Der * self.mu_ref - self.A_tilde * self.mu_ref
#Forward model #TODO: is this one always correct to use or should it actually be combined with alpha_u for update rule of u?
# self.G = -1 * self.C * (1 / self.A) * self.B
def construct_precision_matrices(self, sigma_w, s_w, sigma_z, s_z):
'''Using the standard deviation information of the process output noise signals, construct the precision matrices'''
#Process noise precision matrix
self.sigma_w = sigma_w
self.s_w = s_w
self.SIGMA_w = np.matrix(np.eye(self.n_states)) * self.sigma_w**2
self.PI_w = self.generate_PI(1 + self.p, self.SIGMA_w, self.s_w)
#Output noise precision matrix
self.sigma_z = sigma_z
self.s_z = s_z
self.SIGMA_z = np.matrix(np.eye(self.n_states)) * self.sigma_z**2
self.PI_z = self.generate_PI(1 + self.p, self.SIGMA_z, self.s_z)
#Total precision matrix
self.PI = block_diag(self.PI_w, self.PI_z)
def generate_PI(self, k, SIGMA, s):
if np.amax(SIGMA) == 0:
print("PI cannot be generated if sigma is 0 or negative")
n = SIGMA.shape[0]
if s != 0:
l = np.array(range(0, 2*k-1, 2))
rho = np.matrix(np.zeros(shape = (1, 2*k-1)))
rho[0,l] = np.cumprod(1-l)/(np.sqrt(2)*s)**l
V = np.matrix(np.zeros(shape = (k, k)))
for r in range(k):
V[r,:] = rho[0,r:r+k]
rho = -rho
SIGMA_tilde = np.kron(V, SIGMA)
PI = np.linalg.inv(SIGMA_tilde)
else:
PI = np.matrix(np.zeros(shape = (k*n, k*n)))
PI[0:n, 0:n] = np.linalg.inv(SIGMA)
return PI
def compute_mu(self):
'''Update belief mu'''
self.mu_dot = self.Der * self.mu - self.alpha_mu * ((self.Der - self.A_tilde).getT() * self.PI_w * (self.Der * self.mu - self.A_tilde * self.mu - self.xi) - self.C_tilde.getT() * self.PI_z * (self.z_gen - self.C_tilde * self.mu))
# self.mu_dot = self.Der * self.mu - self.alpha_mu * ((self.Der - self.A_tilde).getT() * self.PI_w * (self.Der * self.mu - self.A_tilde * self.mu - self.xi) - self.C_tilde.getT() * self.PI_z * (self.z - self.C_tilde * self.mu))
self.mu = self.mu + self.mu_dot * self.delta_t
def compute_u(self):
'''Update control action u'''
# self.u_dot = -1 * self.alpha_u * self.G.getT() * self.PI_z * (self.z - self.C_tilde * self.mu)
# self.u = self.u + self.u_dot * self.delta_t
def debug(self):
'''Debug function for AI functionality: print all kinds of desirable variables'''
print("Der:\n{}\n\nmu:\n{}\n\nmu_dot:\n{}\n\nA_tilde:\n{}\n\nPI_w:\n{}\n\nxi:\n{}\n\nC_tilde:\n{}\n\nPI_z:\n{}\n\n-------------------------------------------------------------------------------------------\n".format(self.Der, self.mu, self.mu_dot, self.A_tilde, self.PI_w, self.xi, self.C_tilde, self.PI_z))
print("Der*mu:\n{}\n\n2nd term:\n{}\n\n3rd term:\n{}\n\nmu_dot:\n{}\n\nmu:\n{}\n\n-------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------\n".format(self.Der*self.mu, self.alpha_mu * ((self.Der - self.A_tilde).getT() * self.PI_w * (self.Der * self.mu - self.A_tilde * self.mu - self.xi)), self.alpha_mu * (self.C_tilde.getT() * self.PI_z * (self.z - self.C_tilde * self.mu)), self.mu_dot, self.mu))
print("C_tildeT:\n{}\n\nPI_z:\n{}\n\nC_tildeT*PI_z:\n{}\n\nz:\n{}\n\nC_tilde:\n{}\n\nC_tilde*mu:\n{}\n\nz-C_tilde*mu:\n{}\n\n-------------------------------------------------------------------------------------------\n".format(self.C_tilde.getT(), self.PI_z, self.C_tilde.getT()*self.PI_z, self.z, self.C_tilde, self.C_tilde*self.mu, self.z-self.C_tilde*self.mu))
print("C_tildeT*PI_z:\n{}\n\nz:\n{}\n\nC_tilde*mu:\n{}\n\nz-C_tilde*mu:\n{}\n\n3rd term:\n{}\n\n-------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------\n".format(self.C_tilde.getT()*self.PI_z, z, self.C_tilde*self.mu, z-self.C_tilde*self.mu, self.C_tilde.getT() * self.PI_z * (z - self.C_tilde * self.mu)))
#-------------------------------------------------------------------
#Subscriber class
#-------------------------------------------------------------------
class Subscriber(object):
"""Class providing all functionality needed to:
- subscribe to the measurement data
- run the AI equations
- publish the result"""
def __init__(self):
super(Subscriber, self).__init__()
#Create AI object
self.mean_u = np.matrix([[4.183917321479406], [1.942289357961973]])
self.mean_y = 0.401988453296692
self.debug = False
self.n_states = 1
self.p = 6
self.x_ref = np.matrix(np.zeros(shape = (self.n_states, 1)))
#---------------------------------------------
self.ai = AI(n_states = self.n_states, n_inputs = 1, n_outputs = 1, p = self.p, x_ref = self.x_ref)
#Initialize node, publisher and subscriber
self.msg = filt_output() #construct the custom message filt_output
rospy.init_node('ai', anonymous=True)
self.publisher = rospy.Publisher('filter/ai/output', filt_output, queue_size=1)
rospy.Subscriber('filter/y_coloured_noise', gazebo_model_states_noise, self.callback)
rospy.spin()
def callback(self, data):
'''Get system output z and call AI functionality'''
#The first time data comes in, the Gazebo model states update time is known and the precision matrices can be constructed
if self.ai.first_time:
self.ai.delta_t = data.delta_t #get time difference between two subsequent Gazebo model states data updates
self.ai.construct_precision_matrices(data.sigma_w, data.s_w, data.sigma_z, data.s_z)
self.ai.first_time = False
#Transform system output from operating point to origin and provide to AI algorithm
self.z = data.y_model_noise[2]
self.ai.z = self.z - self.mean_y
temp = np.matrix(np.zeros(shape = (1 + self.p, 1)))
temp[0,0] = 1
self.ai.z_gen = np.kron(temp, self.ai.z)
#Call AI functionality
if self.debug:
self.ai.debug()
self.ai.compute_mu()
self.ai.compute_u()
self.x_filt = self.ai.mu[:self.n_states, 0] + 1/self.ai.C*self.mean_y
#Publish result AI algorithm
self.msg.x_filt = [float(self.x_filt)]
# self.msg.u = [float(i) for i in self.ai.u]
# self.msg.u_lin = []
# for i,x in enumerate(self.msg.u):
# self.msg.u_lin.append(x - self.mean_u[i])
self.msg.y = [float(self.z)]
self.msg.y_lin = [float(self.ai.z)]
self.publisher.publish(self.msg)
#-------------------------------------------------------------------
#Main function
if __name__ == '__main__':
subscriber = Subscriber()
| 48.813333 | 534 | 0.558955 | 9,027 | 0.821907 | 0 | 0 | 0 | 0 | 0 | 0 | 4,978 | 0.453246 |
37a7ca05e91bd835826fe1d91d51fc3eec3454e9 | 828 | py | Python | alembic/versions/add66992d51f_add_user_model.py | shiroyuki/2019-cfp | 90c20ad01c19ddf17b0bfd1f96b264c715456c01 | [
"BSD-3-Clause"
] | null | null | null | alembic/versions/add66992d51f_add_user_model.py | shiroyuki/2019-cfp | 90c20ad01c19ddf17b0bfd1f96b264c715456c01 | [
"BSD-3-Clause"
] | 6 | 2019-04-27T16:48:33.000Z | 2019-08-06T20:28:23.000Z | alembic/versions/add66992d51f_add_user_model.py | shiroyuki/2019-cfp | 90c20ad01c19ddf17b0bfd1f96b264c715456c01 | [
"BSD-3-Clause"
] | 2 | 2019-08-06T15:23:57.000Z | 2019-08-21T23:16:01.000Z | """add user model
Revision ID: add66992d51f
Revises:
Create Date: 2018-05-29 20:47:40.890728
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'add66992d51f'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=256), nullable=False),
sa.Column('name', sa.String(length=256), nullable=False),
sa.PrimaryKeyConstraint('user_id'),
sa.UniqueConstraint('email')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user')
# ### end Alembic commands ###
| 23.657143 | 65 | 0.676329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 385 | 0.464976 |
37a8a2a6f7db9c2e98f1c2a71bb16ab7202f778e | 1,049 | py | Python | scrapeMembers.py | iversc/lb-conforums-scraper | a99448975d13e5bd1542a7f9684938129b533ce9 | [
"MIT"
] | null | null | null | scrapeMembers.py | iversc/lb-conforums-scraper | a99448975d13e5bd1542a7f9684938129b533ce9 | [
"MIT"
] | null | null | null | scrapeMembers.py | iversc/lb-conforums-scraper | a99448975d13e5bd1542a7f9684938129b533ce9 | [
"MIT"
] | null | null | null | import forumLogin
import os
from bs4 import BeautifulSoup
print("Logging in to conforums site...")
forumLogin.doLogin()
print("Creating member indexes folder...")
try:
os.mkdir("member-indexes")
except OSError:
pass
members_url = forumLogin.board_url + "index.cgi?action=mlall"
print("Scraping members index page...")
resp = forumLogin.urllib2.urlopen(members_url)
contents = resp.read()
f = open("member-indexes/member-index-000.html", "w+")
f.write(contents.decode("ISO-8859-1"))
f.close()
print("Checking number of member pages...")
soup = BeautifulSoup(contents, "lxml")
pages = int( soup.find_all("option")[-1].string)
print(str(pages) + " member pages found.")
for x in range(1, pages):
print("Scraping page " + str(x+1) + " of " + str(pages) + "...")
members_subpage_url = members_url + "&start=" + str(x * 20)
file_name = "member-indexes/member-index-" + ("000" + str(x))[-3:] + ".html"
f = open(file_name, "w+")
resp = forumLogin.urllib2.urlopen(members_subpage_url)
f.write(resp.read().decode("ISO-8859-1"))
f.close()
| 25.585366 | 77 | 0.692088 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 360 | 0.343184 |
37aaacec4e931cb07cb16c1a2609ce1ddba1e5f7 | 1,158 | py | Python | tests/modules/idn/test_idn_update.py | bladeroot/heppy | b597916ff80890ca057b17cdd156e90bbbd9a87a | [
"BSD-3-Clause"
] | 20 | 2016-06-02T20:29:29.000Z | 2022-01-31T07:47:02.000Z | tests/modules/idn/test_idn_update.py | bladeroot/heppy | b597916ff80890ca057b17cdd156e90bbbd9a87a | [
"BSD-3-Clause"
] | 1 | 2018-10-09T16:09:24.000Z | 2018-10-10T08:17:42.000Z | tests/modules/idn/test_idn_update.py | bladeroot/heppy | b597916ff80890ca057b17cdd156e90bbbd9a87a | [
"BSD-3-Clause"
] | 7 | 2018-04-11T16:05:06.000Z | 2020-01-28T16:30:40.000Z | #!/usr/bin/env python
import unittest
from ..TestCase import TestCase
class TestIdnUpdate(TestCase):
def test_render_idn_update_request(self):
self.assertRequest('''<?xml version="1.0" ?>
<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<update>
<domain:update xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
<domain:chg/>
</domain:update>
</update>
<extension>
<idn:update xmlns:idn="urn:afilias:params:xml:ns:idn-1.0">
<idn:chg>
<idn:script>fr</idn:script>
</idn:chg>
</idn:update>
</extension>
<clTRID>XXXX-11</clTRID>
</command>
</epp>
''', {
'command': 'domain:update',
'name': 'example.com',
'chg': {},
'extensions': [
{
'command': 'idn:update',
'script': 'fr'
}
],
'clTRID': 'XXXX-11',
})
if __name__ == '__main__':
unittest.main(verbosity=2)
| 25.733333 | 76 | 0.474093 | 1,024 | 0.884283 | 0 | 0 | 0 | 0 | 0 | 0 | 749 | 0.646805 |
37aac198548f8d29bc2f8b8cc60f05e06816e5e8 | 506 | py | Python | Python-Basics/13.Nested Loops/06.Tower.py | Xamaneone/SoftUni-Intro | 985fe3249cd2adf021c2003372e840219811d989 | [
"MIT"
] | null | null | null | Python-Basics/13.Nested Loops/06.Tower.py | Xamaneone/SoftUni-Intro | 985fe3249cd2adf021c2003372e840219811d989 | [
"MIT"
] | null | null | null | Python-Basics/13.Nested Loops/06.Tower.py | Xamaneone/SoftUni-Intro | 985fe3249cd2adf021c2003372e840219811d989 | [
"MIT"
] | null | null | null | height = int(input())
apartments = int(input())
is_first = True
isit = 0
for f in range(height, 0, -1):
for s in range(0, apartments, 1):
if is_first == True:
isit += 1
print(f"L{f}{s}", end=" ")
if isit == apartments:
is_first = False
continue
if is_first == False:
if f % 2 == 0:
print(f"O{f}{s}", end=" ")
else:
print(f"A{f}{s}", end=" ")
print(end="\n")
| 26.631579 | 42 | 0.426877 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.08498 |
37ad375b563b64c735e0509a6bc22b77f4aa298e | 2,633 | py | Python | arse/biclustering/deflation.py | marianotepper/comdet | ca55083fdcd555b3f80586423cbe8a09498993d2 | [
"BSD-3-Clause"
] | null | null | null | arse/biclustering/deflation.py | marianotepper/comdet | ca55083fdcd555b3f80586423cbe8a09498993d2 | [
"BSD-3-Clause"
] | null | null | null | arse/biclustering/deflation.py | marianotepper/comdet | ca55083fdcd555b3f80586423cbe8a09498993d2 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from . import compression
from . import utils
class DeflationError(RuntimeError):
def __init__(self, *args, **kwargs):
super(DeflationError, self).__init__(*args, **kwargs)
class Deflator(utils.Downdater):
def __init__(self, array):
super(Deflator, self).__init__(array)
@property
def compressed_array(self):
raise DeflationError('Could not compress the array.')
class L1CompressedDeflator(Deflator):
def __init__(self, array, n_samples):
super(L1CompressedDeflator, self).__init__(array)
if n_samples >= array.shape[1]:
self._compressor = DummyCompressor(array, n_samples)
else:
self._compressor = compression.OnlineColumnCompressor(array,
n_samples)
self._inner_compress()
def _inner_compress(self):
selection = self._compressor.compress()
if selection is None or self.n_samples > selection.size:
try:
del self._selection
del self._compressed_array
except AttributeError:
pass
else:
self._selection = selection
self._compressed_array = self.array[:, self.selection]
@property
def compressed_array(self):
try:
return self._compressed_array
except AttributeError:
raise DeflationError('Could not compress the array.')
@property
def selection(self):
try:
return self._selection
except AttributeError:
raise DeflationError('Could not compress the array.')
@property
def n_samples(self):
return self._compressor.n_samples
def additive_downdate(self, u, v):
super(L1CompressedDeflator, self).additive_downdate(u, v)
self._compressor.additive_downdate(u, v)
self._inner_compress()
def remove_columns(self, idx_cols):
super(L1CompressedDeflator, self).remove_columns(idx_cols)
self._compressor.remove_columns(idx_cols)
self._inner_compress()
def remove_rows(self, idx_rows):
super(L1CompressedDeflator, self).remove_rows(idx_rows)
self._compressor.remove_rows(idx_rows)
self._inner_compress()
class DummyCompressor(object):
def __init__(self, array, n_samples):
self.n_samples = n_samples
def compress(self):
return None
def additive_downdate(self, u, v):
pass
def remove_columns(self, idx):
pass
def remove_rows(self, idx):
pass
| 28.934066 | 76 | 0.635777 | 2,536 | 0.96316 | 0 | 0 | 551 | 0.209267 | 0 | 0 | 93 | 0.035321 |
37ae8731e09fa6e9e10edeca8ddfee65b0deef43 | 12,789 | py | Python | test/test.py | Trick-17/clang-build | 9830f4bc18f5a082bd88b310965e974493508eab | [
"MIT"
] | 8 | 2018-03-09T20:02:12.000Z | 2021-08-21T21:38:13.000Z | test/test.py | Trick-17/clang-build | 9830f4bc18f5a082bd88b310965e974493508eab | [
"MIT"
] | 131 | 2018-03-09T20:40:30.000Z | 2022-02-16T23:20:59.000Z | test/test.py | Trick-17/clang-build | 9830f4bc18f5a082bd88b310965e974493508eab | [
"MIT"
] | 3 | 2018-04-15T12:55:39.000Z | 2021-07-07T00:23:55.000Z | import os, sys
import unittest
import subprocess
import shutil
import logging
import io
import stat
from pathlib import Path as _Path
from multiprocessing import freeze_support
from sys import platform as _platform
import json
from clang_build import cli
from clang_build import toolchain
from clang_build.errors import CompileError
from clang_build.errors import LinkError
from clang_build.logging_tools import TqdmHandler as TqdmHandler
def on_rm_error( func, path, exc_info):
# path contains the path of the file that couldn't be removed
# let's just assume that it's read-only and try to unlink it.
try:
os.chmod( path, stat.S_IWRITE )
os.unlink( path )
except:
print(f'Error trying to clean up file "{path}":\n{exc_info}')
def clang_build_try_except( args ):
try:
cli.build(cli.parse_args(args))
except CompileError as compile_error:
logger = logging.getLogger('clang_build')
logger.error('Compilation was unsuccessful:')
for target, errors in compile_error.error_dict.items():
printout = f'Target [{target}] did not compile. Errors:\n'
printout += ' '.join(errors)
logger.error(printout)
except LinkError as link_error:
logger = logging.getLogger('clang_build')
logger.error('Linking was unsuccessful:')
for target, errors in link_error.error_dict.items():
printout = f'Target [{target}] did not link. Errors:\n{errors}'
logger.error(printout)
class TestClangBuild(unittest.TestCase):
def test_hello_world_mwe(self):
clang_build_try_except(['-d', 'test/mwe'])
try:
output = subprocess.check_output(['./build/default/bin/main'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'Hello!')
compile_commands_file = _Path("build") / "compile_commands.json"
compile_commands = []
self.assertTrue(compile_commands_file.exists())
compile_commands_str = compile_commands_file.read_text()
logger = logging.getLogger('clang_build')
logger.info(compile_commands_str)
compile_commands = json.loads(compile_commands_str)
for command in compile_commands:
self.assertEqual(str(_Path('test/mwe/hello.cpp').resolve()), str(_Path(command["file"]).resolve()))
self.assertTrue(
str(_Path('./build/default/obj/hello.o').resolve()) == str(_Path(command["output"]).resolve()) or
str(_Path('./build/default/dep/hello.d').resolve()) == str(_Path(command["output"]).resolve())
)
def test_build_types(self):
for build_type in ['release', 'relwithdebinfo', 'debug', 'coverage']:
clang_build_try_except(['-d', 'test/mwe', '-b', build_type])
try:
output = subprocess.check_output([f'./build/{build_type}/bin/main'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program with build type "{build_type}". Message:\n{e.output}')
self.assertEqual(output, 'Hello!')
def test_compile_error(self):
with self.assertRaises(CompileError):
cli.build(cli.parse_args(['-d', 'test/build_errors/compile_error', '-V']))
def test_link_error(self):
with self.assertRaises(LinkError):
cli.build(cli.parse_args(['-d', 'test/build_errors/link_error', '-V']))
def test_script_call(self):
try:
subprocess.check_output(['clang-build', '-d', 'test/mwe', '-V'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
self.fail('Compilation failed')
try:
output = subprocess.check_output(['./build/default/bin/main'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'Hello!')
def test_hello_world_rebuild(self):
clang_build_try_except(['-d', 'test/mwe', '-V'])
try:
output = subprocess.check_output(['./build/default/bin/main'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'Hello!')
### TODO: the following does not seem to work under coverage runs...
# logger = logging.getLogger('clang_build')
# stream_capture = io.StringIO()
# ch = logging.StreamHandler(stream_capture)
# ch.setLevel(logging.DEBUG)
# logger.addHandler(ch)
# clang_build_try_except(['-d', 'test/mwe', '-V'])
# logger.removeHandler(ch)
# self.assertRegex(stream_capture.getvalue(), r'.*\[main\]: target is already compiled*')
# stream_capture = io.StringIO()
# ch = logging.StreamHandler(stream_capture)
# ch.setLevel(logging.DEBUG)
# logger.addHandler(ch)
# clang_build_try_except(['-d', 'test/mwe', '-V', '-f'])
# logger.removeHandler(ch)
# self.assertRegex(stream_capture.getvalue(), r'.*\[main\]: target needs to build sources*')
def test_automatic_include_folders(self):
clang_build_try_except(['-d', 'test/mwe_with_default_folders', '-V'])
try:
output = subprocess.check_output(['./build/default/bin/main'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'Calculated Magic: 30')
def test_toml_mwe(self):
clang_build_try_except(['-d', 'test/toml_mwe'])
try:
output = subprocess.check_output(['./build/default/bin/runHello'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'Hello!')
def test_toml_custom_folder(self):
clang_build_try_except(['-d', 'test/toml_with_custom_folder'])
try:
output = subprocess.check_output(['./build/default/bin/runHello'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'Hello!')
def test_pyapi_directory(self):
clang_build_try_except(['-d', 'test/py-api/directory', '-V'])
try:
output = subprocess.check_output(['./build/default/bin/main'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'the version is 1.2.0')
def test_subproject(self):
clang_build_try_except(['-d', 'test/subproject', '-V'])
try:
output = subprocess.check_output(['./build/myexe/default/bin/runLib'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'Hello! mylib::triple(3) returned 9')
def test_public_dependency(self):
clang_build_try_except(['-d', 'test/public_dependency', '-V'])
try:
output = subprocess.check_output(['./build/myexe/default/bin/runLib'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'Hello! libC::half(libA::triple(4)) returned 6')
def test_pyapi_subproject(self):
clang_build_try_except(['-d', 'test/py-api/subproject', '-V'])
try:
output = subprocess.check_output(['./build/myexe/default/bin/runLib'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'Hello! mylib::triple(3) returned 9')
def test_boost_filesystem(self):
clang_build_try_except(['-d', 'test/boost-filesystem', '-V'])
try:
output = subprocess.check_output(['./build/myexe/default/bin/myexe', 'build'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, '"build" is a directory')
def test_c_library(self):
clang_build_try_except(['-d', 'test/c-library', '-V'])
try:
output = subprocess.check_output(['./build/myexe/default/bin/myexe'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, '3 2 0'+os.linesep+'3 1 0')
def test_build_all(self):
clang_build_try_except(['-d', 'test/c-library', '-V', '-a'])
try:
output = subprocess.check_output(['./build/qhull/qhull-executable/default/bin/qhull', '-V'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail('Could not run a target which should have been built')
self.assertEqual(output, 'qhull_r 7.2.0 (2015.2.r 2016/01/18)')
def test_platform_flags(self):
clang_build_try_except(['-d', 'test/platform_flags', '-V', '--debug'])
try:
output = subprocess.check_output(['./build/default/bin/myexe'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
if _platform == 'linux':
self.assertEqual(output, 'Hello Linux!')
elif _platform == 'darwin':
self.assertEqual(output, 'Hello OSX!')
elif _platform == 'win32':
self.assertEqual(output, 'Hello Windows!')
else:
raise RuntimeError('Tried to run test_platform_flags on unsupported platform ' + _platform)
def test_openmp(self):
clang_build_try_except(['-d', 'test/openmp', '-V'])
try:
output = subprocess.check_output(['./build/default/bin/runHello'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertRegex(output, r'Hello from thread 1, nthreads*')
def test_mwe_two_targets(self):
clang_build_try_except(['-d', 'test/multi_target_external', '-V', '--bundle'])
try:
output = subprocess.check_output(['./build/myexe/default/bin/runLib'], stderr=subprocess.STDOUT).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
self.fail(f'Could not run compiled program. Message:\n{e.output}')
self.assertEqual(output, 'Hello! mylib::calculate() returned 2')
def test_pybind11(self):
clang_build_try_except(['-d', 'test/pybind11', '-V'])
pylib_dir = os.path.abspath(os.path.join("build", "pylib", "default", toolchain.LLVM.PLATFORM_DEFAULTS[_platform]['SHARED_LIBRARY_OUTPUT_DIR']))
sys.path.insert(0, pylib_dir)
try:
import pylib
output = pylib.triple(3)
self.assertEqual(output, 9)
except ImportError:
if os.path.exists(pylib_dir):
print(f'Expected location "{pylib_dir}" contains: {os.listdir(pylib_dir)}')
else:
print(f'Expected location "{pylib_dir}" does not exist!')
self.fail('Import of pylib failed!')
def setUp(self):
logger = logging.getLogger('clang_build')
logger.setLevel(logging.INFO)
ch = TqdmHandler()
formatter = logging.Formatter('%(message)s')
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.handlers = []
logger.addHandler(ch)
def tearDown(self):
if _Path('build').exists():
shutil.rmtree('build', onerror = on_rm_error)
if __name__ == '__main__':
freeze_support()
unittest.main() | 42.069079 | 154 | 0.642896 | 11,196 | 0.87544 | 0 | 0 | 0 | 0 | 0 | 0 | 4,202 | 0.328564 |
37af071f8b5a30447b056e0b80399b4ec724776a | 27 | py | Python | exoatlas/populations/curation/TransitingExoplanets.py | zkbt/exopop | 5e8b9d391fe9e2d39c623d7ccd7eca8fd0f0f3f8 | [
"MIT"
] | 4 | 2020-06-24T16:38:27.000Z | 2022-01-23T01:57:19.000Z | exoatlas/populations/curation/TransitingExoplanets.py | zkbt/exopop | 5e8b9d391fe9e2d39c623d7ccd7eca8fd0f0f3f8 | [
"MIT"
] | 4 | 2018-09-20T23:12:30.000Z | 2019-05-15T15:31:58.000Z | exoatlas/populations/curation/TransitingExoplanets.py | zkbt/exopop | 5e8b9d391fe9e2d39c623d7ccd7eca8fd0f0f3f8 | [
"MIT"
] | null | null | null |
def curate(pop):
pass
| 6.75 | 16 | 0.592593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |