hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
daa5713f591bda079dae43ed4b025ec5547d445b | 166 | py | Python | scripts/portal/timeSeal02.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 54 | 2019-04-16T23:24:48.000Z | 2021-12-18T11:41:50.000Z | scripts/portal/timeSeal02.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 3 | 2019-05-19T15:19:41.000Z | 2020-04-27T16:29:16.000Z | scripts/portal/timeSeal02.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 49 | 2020-11-25T23:29:16.000Z | 2022-03-26T16:20:24.000Z | SEAL_CHECKER = 9300535
SEAL_OF_TIME = 2159367
if not sm.hasQuest(25672):
sm.createQuestWithQRValue(25672, "1")
sm.showFieldEffect("lightning/screenMsg/6", 0) | 27.666667 | 50 | 0.753012 | SEAL_CHECKER = 9300535
SEAL_OF_TIME = 2159367
if not sm.hasQuest(25672):
sm.createQuestWithQRValue(25672, "1")
sm.showFieldEffect("lightning/screenMsg/6", 0) | 0 | 0 | 0 |
6be40d46faaa138af2d309cdb15bbfccb0f0f64f | 475 | py | Python | Templates/Template-1.py | CodeKnight626/codeBot | d56224b79193c17f949e9e99b7ab0b990dfd6c92 | [
"Apache-2.0"
] | null | null | null | Templates/Template-1.py | CodeKnight626/codeBot | d56224b79193c17f949e9e99b7ab0b990dfd6c92 | [
"Apache-2.0"
] | null | null | null | Templates/Template-1.py | CodeKnight626/codeBot | d56224b79193c17f949e9e99b7ab0b990dfd6c92 | [
"Apache-2.0"
] | null | null | null | import discord #Libreria de discord
client = discord.Client()
# Evento que se llama cuando el bot inicia sesión
@client.event
# Evento que se llama cuando se recibe un mensaje
@client.event
client.run("Tu token aqui") | 25 | 57 | 0.711579 | import discord #Libreria de discord
client = discord.Client()
# Evento que se llama cuando el bot inicia sesión
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
# Evento que se llama cuando se recibe un mensaje
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith('$hello'):
await message.channel.send('hello')
client.run("Tu token aqui") | 210 | 0 | 44 |
12a18aea017ffa0a14958524f458897493e2bfdf | 539 | py | Python | src/functions.py | zefe/python | 8373130bbfa35f2d1328288523db23c20ea4f55a | [
"MIT"
] | null | null | null | src/functions.py | zefe/python | 8373130bbfa35f2d1328288523db23c20ea4f55a | [
"MIT"
] | null | null | null | src/functions.py | zefe/python | 8373130bbfa35f2d1328288523db23c20ea4f55a | [
"MIT"
] | null | null | null | # Divisas
if __name__ == '__main__':
run() | 24.5 | 93 | 0.671614 | # Divisas
def run():
print('C A L C U L A D O R A D E D I V I S A S')
print('Convietr de pesos mexicanos a pesos colombianos')
print('')
amount = float(input('Ingreasa la cantidad de pesos mexicanos que quieres convertir: '))
result = foreing_exchange_calculator(amount)
print('${} pesos mexicanos son ${} pesos colombianos'.format(amount, result))
print('')
def foreing_exchange_calculator(amount):
mex_to_col_rate = 145.95
return mex_to_col_rate * amount
if __name__ == '__main__':
run() | 444 | 0 | 46 |
41f729f5fd23ec704d6194684d719cbc5c40b89a | 10,532 | py | Python | which.py | WeiFoo/WHICH | df8dbc50b219aefc54ba6371ef6788095b46a948 | [
"MIT"
] | null | null | null | which.py | WeiFoo/WHICH | df8dbc50b219aefc54ba6371ef6788095b46a948 | [
"MIT"
] | 20 | 2015-04-29T01:13:27.000Z | 2015-05-29T01:27:25.000Z | which.py | WeiFoo/WHICH | df8dbc50b219aefc54ba6371ef6788095b46a948 | [
"MIT"
] | null | null | null | # __author__ = 'WeiFu'
from __future__ import print_function, division
import sys, pdb, random
from ruler import *
from Abcd import *
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from tuner import *
from processarff import *
from sk import rdivDemo
import weka.core.jvm as jvm
from weka.core.converters import Loader
from weka.classifiers import Classifier
from scipy.integrate import trapz
import subprocess
@setting
def cart(**d):
"""
this is for tuning cart.
"""
return o(max_features=None, max_depth=None, min_samples_split=2, min_samples_leaf=1).update(**d)
@setting
def cppWHICH(**d):
"""
this is for tuning cppwhch
"""
return o(alpha=1, beta=1, gamma=0, bins=2, improvements=0.2
).update(**d)
def XY(t, mydata, flag=False):
'''generate X, Y coordinates for plotting'''
if len(mydata) == 0: return [np.array([]), np.array([])]
data = sorted(mydata, key=lambda z: z[the.DATA.loc], reverse=flag)
Loc, TP = 0, 0
xx, pd = [], []
for d in data:
if d.cells[-1] == 1:
TP += d.cells[-1]
Loc += d.cells[the.DATA.loc]
xx += [100 * Loc / t.total[the.DATA.loc]]
pd += [100 * TP / (t.defective + 0.00001)]
x = np.array(xx)
pd = np.array(pd)
return [x, pd]
def manual(t, up=False):
"""
false : ascending order ==> UP method
true : descending order ==> Down method
"""
# data = sorted(t.data, key=lambda z: z[the.DATA.loc], reverse=up)
return XY(t, t.data, up)
def gbest(t):
'''the best method which has highest score'''
mydata = [d for d in t.data if d[-1] == 1]
# data = sorted(data, key=lambda z: z[the.DATA.loc])
return XY(t, mydata)
if __name__ == "__main__":
# run(main())
crossEval()
# subprocesstest() | 33.864952 | 120 | 0.636346 | # __author__ = 'WeiFu'
from __future__ import print_function, division
import sys, pdb, random
from ruler import *
from Abcd import *
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from tuner import *
from processarff import *
from sk import rdivDemo
import weka.core.jvm as jvm
from weka.core.converters import Loader
from weka.classifiers import Classifier
from scipy.integrate import trapz
import subprocess
@setting
def cart(**d):
"""
this is for tuning cart.
"""
return o(max_features=None, max_depth=None, min_samples_split=2, min_samples_leaf=1).update(**d)
@setting
def cppWHICH(**d):
"""
this is for tuning cppwhch
"""
return o(alpha=1, beta=1, gamma=0, bins=2, improvements=0.2
).update(**d)
def readcsv(f="./data/ant/ant-1.7copy.csv"):
ff = open(f, "r")
# content = ff.readline().split("\r")
content = ff.readlines()
n = content[0].split(",")
d = [map(float, row.split(",")) for kk, row in enumerate(content[1:])]
return data(names=n, data=d)
def _range():
LIB(seed=1)
RULER(tiny=4)
def _ranges():
for z in ranges(csv()): print(z)
run(_ranges)
def _Abcd(predicted, actual):
abcd = Abcd(db='Traing', rx='Testing')
for act, pre in zip(actual, predicted):
abcd.tell(act, pre)
abcd.header()
score = abcd.ask()
def XY(t, mydata, flag=False):
'''generate X, Y coordinates for plotting'''
if len(mydata) == 0: return [np.array([]), np.array([])]
data = sorted(mydata, key=lambda z: z[the.DATA.loc], reverse=flag)
Loc, TP = 0, 0
xx, pd = [], []
for d in data:
if d.cells[-1] == 1:
TP += d.cells[-1]
Loc += d.cells[the.DATA.loc]
xx += [100 * Loc / t.total[the.DATA.loc]]
pd += [100 * TP / (t.defective + 0.00001)]
x = np.array(xx)
pd = np.array(pd)
return [x, pd]
def manual(t, up=False):
"""
false : ascending order ==> UP method
true : descending order ==> Down method
"""
# data = sorted(t.data, key=lambda z: z[the.DATA.loc], reverse=up)
return XY(t, t.data, up)
def gbest(t):
'''the best method which has highest score'''
mydata = [d for d in t.data if d[-1] == 1]
# data = sorted(data, key=lambda z: z[the.DATA.loc])
return XY(t, mydata)
def sklearn_data(train, test):
train_x = [t.cells[:-1] for t in train.data]
train_y = [(t.cells[-1]) for t in train.data]
test_x = [t.cells[:-1] for t in test.data]
test_y = [(t.cells[-1]) for t in test.data]
return [train_x, train_y, test_x, test_y]
def cart(train, test, tuning=True):
data = sklearn_data(train, test)
clf = DecisionTreeRegressor(random_state=1, max_features=the.cart.max_features, max_depth=the.cart.max_depth,
min_samples_split=the.cart.min_samples_split,
min_samples_leaf=the.cart.min_samples_leaf).fit(data[0], data[1])
if not tuning: # default cart
clf = DecisionTreeRegressor(random_state=1).fit(data[0], data[1])
predictresult = [i for i in clf.predict(data[2])] # to change the format from ndarray to list
predicted = [test.data[j] for j, p in enumerate(predictresult) if
p == 1] # all these data are predicted to be defective
return XY(predicted)
def C45(train, test):
return wekaCALL(train, test, "weka.classifiers.trees.J48")
def RIPPER(train, test):
return wekaCALL(train, test, "weka.classifiers.rules.JRip")
def NaiveBayes(train, test):
return wekaCALL(train, test, "weka.classifiers.bayes.NaiveBayes")
def wekaCALL(train, test, learner):
if not jvm.started: jvm.start()
loader = Loader(classname="weka.core.converters.ArffLoader")
train_data = loader.load_file(train)
test_data = loader.load_file(test)
train_data.class_is_last()
test_data.class_is_last()
cls = Classifier(classname=learner)
cls.build_classifier(train_data)
predicted, name = [], []
has_defects = False
for index, inst in enumerate(test_data):
pred = cls.classify_instance(inst)
if inst.values[-1] == 1: has_defects = True
if pred != 0:
predicted += [
[inst.values[i] for i in range(inst.num_attributes)]] # this API changes "false" to 0, and "true" to 1
name += ["0"] # this is a fake name for each column, which is made to use data() function in readdata.
if has_defects and len(predicted) == 0: return [np.array([]), np.array([])]
ss = data(names=name, data=predicted)
return XY(ss, ss.data)
def cppWhich(arfftrain, arfftest, options=None):
cmd = ["././which", "-t", arfftrain, "-T", arfftest, "-score", "effort"]
if options:
temp = options.split(" ")
cmd.extend(temp)
try:
printout = subprocess.check_output(cmd)
x = map(float, printout.split("\n")[0].split(" ")[:-1]) # this line is X
pd = map(float, printout.split("\n")[1].split(" ")[:-1]) # this line is pd, last element is null, ignored.
return [np.array(x), np.array(pd)]
except: # for some parameters, the cpp version can't return a valid results, showing returned exit status -8
return [np.array([]), np.array([])]
# p = subprocess.Popen(cmd,stdout = subprocess.PIPE)
# printout = p.communicate()[0]
def tunedwhich(arfftrain, arfftune, arfftest, csvtune):
tunner = WHICHCPP(arfftrain, arfftune, csvtune)
tunner.DE()
para = "-bins " + str(the.cppWHICH.bins) + " -alpha " + str(the.cppWHICH.alpha) + " -beta " + str(
the.cppWHICH.beta) + " -gamma " + str(the.cppWHICH.gamma) + " -imp " +str(the.cppWHICH.improvements)
print(para)
return cppWhich(arfftrain, arfftest, para)
def plot(result):
# color = ['r-','k-','b-','b^','g-','y-','c-','m-']
# labels = ['WHICH','Tuned_WHICH','manualUp','manualDown','minimum','best','Tuned_CART','CART']
color = ['r-', 'k-', 'b-', 'g-', 'y-', 'c-', 'm-']
labels = ['WHICH', 'manualUp', 'manualDown', 'minimum', 'best', 'CART', 'C4.5']
plt.figure(1)
for j, x in enumerate(result):
plt.plot(x[0], x[1], color[j], label=labels[j])
plt.xlabel("Effort(% LOC inspected)")
plt.ylabel("PD(% probability of detection)")
plt.title("Effort-vs-PD")
plt.ylim(0, 100)
plt.legend(loc='best')
plt.show()
def _rule(train):
LIB(seed=1)
# RULER(tiny=4,better=gt) initialize
# print(train.score, "baseline :",len(train.data))
for z in ruler(train):
print(z.score, z)
try:
best = ruler(train)[0]
except IndexError, e:
return None
return best
def postCalculation(result):
areaLst = []
for data in result:
if data == None:
continue # ignore the none.
areaLst += [area(data)]
return percentage(areaLst)
def preSK(stats):
names = ["manualUp", "manualDown", "C4.5", "RIPPER", "NaiveBayes", "MICRO-20", "WHICH-2", "WHICH-4", "WHICH-8",
"WHICH-Tuned"]
out = []
for key, value in stats.iteritems():
ordered = sorted(value)
ordered.insert(0, names[key])
out += [ordered]
return out
def area(result):
X = result[0]
Y = result[1]
if len(X) == 0 or len(Y) == 0: return 0
if 100 not in X:
X = np.append(X, [100]) # if this curve does not reach top right, we need to add it
Y = np.append(Y, Y[-1]) # just repeat last value in Y
return trapz(Y, X)
def percentage(lst): # lst[0] is the best which is the base.
val = []
if lst[0] == 0 or len(lst) == 0: return val # return empty list
for i in range(1, len(lst)):
val += [lst[i] / lst[0]]
return val
def crossEval(repeats=10, folds=3, src="../DATASET"):
def process(result):
mypercentage = postCalculation(result)
if len(mypercentage) == 0: return # this is the case, where the best is 0
if first_Time: # initialize: for each data set, stats contains all the results of methods for that data set.
for t, each in enumerate(mypercentage):
stats[t] = stats.get(t, []) + [each]
combine[j] = [stats]
else:
for t, each in enumerate(mypercentage):
combine[j][0][t] = combine.get(j)[0][t] + [each]
def learner(csvtest, csvtrain, csvtune, arfftest, arfftrain, arfftune):
result = [] # keep all learners' results for one evaluation
result += [gbest(csvtest)]
result += [manual(csvtest, False)] # up : ascending order
result += [manual(csvtest, True)] # down: descending order
# result += [cart(csvtrain, csvtest, False)] # default cart
result += [C45(arfftrain, arfftest)]
result += [RIPPER(arfftrain, arfftest)]
result += [NaiveBayes(arfftrain, arfftest)]
for para in which_settings:
result += [cppWhich(arfftrain, arfftest, para)]
result += [tunedwhich(arfftrain, arfftune, arfftest, csvtune)]
return result
combine = {}
first_Time = True
files_name = ["ar3", "ar4", "ar5", "cm1", "kc1", "kc2", "kc3", "wm1", "pc"]
which_settings = ["-micro 20 -bins 2", "-bins 2", "-bins 4", "-bins 8"]
# cmd for micro-20, which-2, which-4, which-8
for k in range(repeats):
All(src, folds) # prepare 3 cross-way evaluation data sets
datasets = [join(src, f) for f in listdir(src) if not isfile(join(src, f)) and ".git" not in f and ".idea" not in f]
for j in range(len(datasets)):
stats = {} # keep all learners' results for a complete 3 cross evaluation for one data set.
for i in range(folds):
csvtrain = readcsv(datasets[j] + '/csv/train' + str(i) + '.csv')
csvtest = readcsv(datasets[j] + '/csv/test' + str(i) + '.csv')
csvtune = readcsv(datasets[j] + '/csv/tune' + str(i) + '.csv')
arfftrain = datasets[j] + '/arff/train' + str(i) + '.arff'
arfftest = datasets[j] + '/arff/test' + str(i) + '.arff'
arfftune = datasets[j] + '/arff/tune' + str(i) + '.arff'
process(learner(csvtest, csvtrain, csvtune, arfftest, arfftrain, arfftune)) # calculate percentage and others.
first_Time = False
for key, stats in combine.iteritems(): # print results for each data set
print("*" * 15 + files_name[key] + "*" * 15)
out = preSK(stats[0])
rdivDemo(out)
print("DONE!")
def subprocesstest(options=""):
arfftrain = "/Users/WeiFu/Github/WHICH/CppVersion1.0/cpp/cm1Train.arff"
arfftest = "/Users/WeiFu/Github/WHICH/CppVersion1.0/cpp/cm1Train.arff"
printout = subprocess.check_output(["././which", "-t", arfftrain, "-T", arfftest, "-score", "effort"])
x = map(float, printout.split("\n")[0].split(" ")[:-1]) # this line is X
pd = map(float, printout.split("\n")[1].split(" ")[:-1]) # this line is pd, last element is null, ignored.
Q = [np.array(x), np.array(pd)]
print(printout)
pdb.set_trace()
if __name__ == "__main__":
# run(main())
crossEval()
# subprocesstest() | 8,317 | 0 | 437 |
7d1f1ec839e6ec50e8a4a860444246ac22141aca | 4,062 | py | Python | logster-parsers/SlapdLogster.py | kalaksi/sensu-plugins | a3121010cddd46fc5a5566fb9bd7938c1f9f2405 | [
"MIT"
] | null | null | null | logster-parsers/SlapdLogster.py | kalaksi/sensu-plugins | a3121010cddd46fc5a5566fb9bd7938c1f9f2405 | [
"MIT"
] | null | null | null | logster-parsers/SlapdLogster.py | kalaksi/sensu-plugins | a3121010cddd46fc5a5566fb9bd7938c1f9f2405 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 kalaksi@users.noreply.github.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import re
from logster.logster_helper import MetricObject, LogsterParser
from logster.logster_helper import LogsterParsingException
class SlapdLogster(LogsterParser):
""" A logster parser for slapd, the openldap daemon
"""
| 50.148148 | 208 | 0.55613 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 kalaksi@users.noreply.github.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import re
from logster.logster_helper import MetricObject, LogsterParser
from logster.logster_helper import LogsterParsingException
class SlapdLogster(LogsterParser):
""" A logster parser for slapd, the openldap daemon
"""
def __init__(self, option_string=None):
# Modify monitored_tags according to your needs.
# The key is the tag code and the value is a dict that has:
# - name: arbitrary name for the metric
# - success: match count for successes (err=0)
# - error: match count for other error codes
# In other words, the script doesn't differentiate between error codes,
# which should be enough for most use-cases.
#
# Reference for tag codes (I wish I had a shorter URL...):
# https://access.redhat.com/documentation/en-US/Red_Hat_Directory_Server/8.0/html/Configuration_and_Command_Reference/Configuration_Command_File_Reference-Access_Log_and_Connection_Code_Reference.html
self.monitored_tags = {
97: {'name': 'bind', 'success': 0, 'error': 0}
}
# The regular expression for every line
self.line_re = re.compile('.* RESULT tag=(?P<tag>\w+) err=(?P<error>\w+) .*')
def parse_line(self, line):
try:
match = self.line_re.match(line)
# Only care about the lines that match
if match:
values = match.groupdict()
tag = int(values['tag'])
error_code = int(values['error'])
if tag in self.monitored_tags:
if error_code == 0:
self.monitored_tags[tag]['success'] += 1
else:
self.monitored_tags[tag]['error'] += 1
except Exception, e:
raise LogsterParsingException, "Error while parsing a line: %s" % e
def get_state(self, duration):
result = []
for tag, data in self.monitored_tags.iteritems():
result.append(MetricObject(data['name']+'_success', data['success'], 'Tag '+data['name']+': successes'))
result.append(MetricObject(data['name']+'_error', data['error'], 'Tag '+data['name']+': errors'))
return result
| 1,922 | 0 | 81 |
6a3a039cf8076b41b5f4a96777e26a62ab17cca2 | 240 | py | Python | tests/context.py | RonaldTheodoro/ygoprodeck-api-client | 2a64411e08f1b7e32cc60aca5ccd5b5369a4b380 | [
"MIT"
] | 1 | 2020-04-09T22:55:05.000Z | 2020-04-09T22:55:05.000Z | tests/context.py | RonaldTheodoro/ygoprodeck-api-client | 2a64411e08f1b7e32cc60aca5ccd5b5369a4b380 | [
"MIT"
] | 10 | 2018-12-08T02:48:47.000Z | 2021-10-02T20:18:10.000Z | tests/context.py | RonaldTheodoro/ygoprodeck-api-client | 2a64411e08f1b7e32cc60aca5ccd5b5369a4b380 | [
"MIT"
] | 2 | 2018-12-08T05:46:54.000Z | 2021-05-24T23:25:59.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
tests.context
~~~~~~~~~~~~~~~~~~~~~~
Add the ygoprodeck module to the tests context.
"""
import sys
from settings import settings
sys.path.append(settings.base_dir)
import ygoprodeck
| 12.631579 | 47 | 0.65 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
tests.context
~~~~~~~~~~~~~~~~~~~~~~
Add the ygoprodeck module to the tests context.
"""
import sys
from settings import settings
sys.path.append(settings.base_dir)
import ygoprodeck
| 0 | 0 | 0 |
849a5efdcd94f84937a26a5069763e91262f63a1 | 370 | py | Python | lucia/city/tasks/research.py | LuxAI-Gamers/LucIA | ac0dd691780f4af6a5cfb778ae1882c256020b04 | [
"MIT"
] | 1 | 2021-11-21T22:24:46.000Z | 2021-11-21T22:24:46.000Z | lucia/city/tasks/research.py | LuxAI-Gamers/LucIA | ac0dd691780f4af6a5cfb778ae1882c256020b04 | [
"MIT"
] | null | null | null | lucia/city/tasks/research.py | LuxAI-Gamers/LucIA | ac0dd691780f4af6a5cfb778ae1882c256020b04 | [
"MIT"
] | null | null | null | from ...bh_trees import Task
| 19.473684 | 70 | 0.616216 | from ...bh_trees import Task
class Research(Task):
def __init__(self):
super(Research, self).__init__()
def run(self):
object = self._blackboard.get_value('object')
research = object.research()
if research:
actions = self._blackboard.append_values(actions=research)
return True
return False
| 262 | 0 | 77 |
acabde74e58123908bb6db7007a6aa06d198218d | 793 | py | Python | ctpbee/json/__init__.py | yutiansut/ctpbee | 02ceb3d4456a54b1b4f8066a2662c4b8fac1027f | [
"MIT"
] | null | null | null | ctpbee/json/__init__.py | yutiansut/ctpbee | 02ceb3d4456a54b1b4f8066a2662c4b8fac1027f | [
"MIT"
] | null | null | null | ctpbee/json/__init__.py | yutiansut/ctpbee | 02ceb3d4456a54b1b4f8066a2662c4b8fac1027f | [
"MIT"
] | 3 | 2019-11-21T03:38:14.000Z | 2022-02-14T08:09:11.000Z | from ctpbee.json.pollen import Pollen
loads = Pollen.loads
dumps = Pollen.dumps
if __name__ == '__main__':
from ctpbee.constant import *
t = SubscribeRequest(symbol='1', exchange=Status.ALLTRADED, datetime=datetime.now())
tick = {'float2': 53.12321,
'int': (123, "123", t, Status.ALLTRADED, 23432),
' ': None,
'b': b'ssq',
'str': "hello",
'enum': Status.CANCELLED,
'list_enum': [2, "2", Interval.MINUTE],
'time': datetime.now(),
'timef': "2019-2-12 19:30:2.12312",
"dcit": {"enum": Offset.CLOSE,
'int': 999.9}
}
res = dumps(tick)
print(f'dumps-->{type(res)} {res}')
pp = loads(res)
print(f'loads-->{type(pp)} {pp}')
| 29.37037 | 88 | 0.510719 | from ctpbee.json.pollen import Pollen
loads = Pollen.loads
dumps = Pollen.dumps
if __name__ == '__main__':
from ctpbee.constant import *
t = SubscribeRequest(symbol='1', exchange=Status.ALLTRADED, datetime=datetime.now())
tick = {'float2': 53.12321,
'int': (123, "123", t, Status.ALLTRADED, 23432),
' ': None,
'b': b'ssq',
'str': "hello",
'enum': Status.CANCELLED,
'list_enum': [2, "2", Interval.MINUTE],
'time': datetime.now(),
'timef': "2019-2-12 19:30:2.12312",
"dcit": {"enum": Offset.CLOSE,
'int': 999.9}
}
res = dumps(tick)
print(f'dumps-->{type(res)} {res}')
pp = loads(res)
print(f'loads-->{type(pp)} {pp}')
| 0 | 0 | 0 |
ed83595a224bf334d5261e08f0dd4def4f8f1b7b | 19,980 | py | Python | egs/wsj/s5/steps/cleanup/internal/modify_ctm_edits.py | IcyW/SMIIP | c05fcdcd77837e254998a899c4ae50cf1bfbe497 | [
"Apache-2.0"
] | 61 | 2017-10-03T17:30:36.000Z | 2020-08-17T16:01:26.000Z | egs/wsj/s5/steps/cleanup/internal/modify_ctm_edits.py | Acidburn0zzz/kaldi | 134cf7ee5c9604a431080db14cf6dc19f6524047 | [
"Apache-2.0"
] | 1 | 2018-12-12T16:04:59.000Z | 2019-01-25T17:15:00.000Z | egs/wsj/s5/steps/cleanup/internal/modify_ctm_edits.py | Acidburn0zzz/kaldi | 134cf7ee5c9604a431080db14cf6dc19f6524047 | [
"Apache-2.0"
] | 10 | 2017-10-04T05:46:19.000Z | 2021-04-08T12:54:20.000Z | #!/usr/bin/env python
# Copyright 2016 Vimal Manohar
# 2016 Johns Hopkins University (author: Daniel Povey)
# Apache 2.0
from __future__ import print_function
import argparse
import logging
import sys
from collections import defaultdict
"""
This script reads and writes the 'ctm-edits' file that is
produced by get_ctm_edits.py.
It modifies the ctm-edits so that non-scored words
are not counted as errors: for instance, if there are things like
[COUGH] and [NOISE] in the transcript, deletions, insertions and
substitutions involving them are allowed, and we modify the reference
to correspond to the hypothesis.
If you supply the <lang> directory (the one that corresponds to
how you decoded the data) to this script, it assumes that the <lang>
directory contains phones/align_lexicon.int, and it uses this to work
out a reasonable guess of the non-scored phones, based on which have
a single-word pronunciation that maps to a silence phone.
It then uses the words.txt to work out the written form of those words.
Alternatively, you may specify a file containing the non-scored words one
per line, with the --non-scored-words option.
Non-scored words that were deleted (i.e. they were in the ref but not the
hyp) are simply removed from the ctm. For non-scored words that
were inserted or substituted, we change the reference word to match the
hyp word, but instead of marking the operation as 'cor' (correct), we
mark it as 'fix' (fixed), so that it will not be positively counted as a correct
word for purposes of finding the optimal segment boundaries.
e.g.
<file-id> <channel> <start-time> <duration> <conf> <hyp-word> <ref-word> <edit-type>
[note: the <channel> will always be 1].
AJJacobs_2007P-0001605-0003029 1 0 0.09 <eps> 1.0 <eps> sil
AJJacobs_2007P-0001605-0003029 1 0.09 0.15 i 1.0 i cor
AJJacobs_2007P-0001605-0003029 1 0.24 0.25 thought 1.0 thought cor
AJJacobs_2007P-0001605-0003029 1 0.49 0.14 i'd 1.0 i'd cor
AJJacobs_2007P-0001605-0003029 1 0.63 0.22 tell 1.0 tell cor
AJJacobs_2007P-0001605-0003029 1 0.85 0.11 you 1.0 you cor
AJJacobs_2007P-0001605-0003029 1 0.96 0.05 a 1.0 a cor
AJJacobs_2007P-0001605-0003029 1 1.01 0.24 little 1.0 little cor
AJJacobs_2007P-0001605-0003029 1 1.25 0.5 about 1.0 about cor
AJJacobs_2007P-0001605-0003029 1 1.75 0.48 [UH] 1.0 [UH] cor
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s [%(filename)s:%(lineno)s - '
'%(funcName)s - %(levelname)s ] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
parser = argparse.ArgumentParser(
description = "This program modifies the reference in the ctm-edits which "
"is output by steps/cleanup/internal/get_ctm_edits.py, to allow insertions, deletions and "
"substitutions of non-scored words, and [if --allow-repetitions=true], "
"duplications of single words or pairs of scored words (to account for dysfluencies "
"that were not transcribed). Note: deletions and substitutions of non-scored words "
"after the reference is corrected, will be marked as operation 'fix' rather than "
"'cor' (correct) so that the downstream processing knows that this was not in "
"the original reference. Also by defaults tags non-scored words as such when "
"they are correct; see the --tag-non-scored option.")
parser.add_argument("--verbose", type = int, default = 1,
choices=[0,1,2,3],
help = "Verbose level, higher = more verbose output")
parser.add_argument("--allow-repetitions", type = str, default = 'true',
choices=['true','false'],
help = "If true, allow repetitions in the transcript of one or "
"two-word sequences: for instance if the ref says 'i' but "
"the hyp says 'i i', or the ref says 'but then' and the hyp says "
"'but then but then', fix the reference accordingly. Intervening "
"non-scored words are allowed between the repetitions. These "
"fixes will be marked as 'cor', not as 'fix', since there is "
"generally no way to tell which repetition was the 'real' one "
"(and since we're generally confident that such things were "
"actually uttered).")
parser.add_argument("non_scored_words_in", metavar = "<non-scored-words-file>",
help="Filename of file containing a list of non-scored words, "
"one per line. See steps/cleanup/get_nonscored_words.py.")
parser.add_argument("ctm_edits_in", metavar = "<ctm-edits-in>",
help = "Filename of input ctm-edits file. "
"Use /dev/stdin for standard input.")
parser.add_argument("ctm_edits_out", metavar = "<ctm-edits-out>",
help = "Filename of output ctm-edits file. "
"Use /dev/stdout for standard output.")
args = parser.parse_args()
# The ctm-edits file format is as follows [note: file-id is really utterance-id
# in this context].
# <file-id> <channel> <start-time> <duration> <conf> <hyp-word> <ref-word> <edit>
# e.g.:
# AJJacobs_2007P-0001605-0003029 1 0 0.09 <eps> 1.0 <eps> sil
# AJJacobs_2007P-0001605-0003029 1 0.09 0.15 i 1.0 i cor
# ...
# This function processes a single line of ctm-edits input for fixing
# "non-scored" words. The input 'a' is the split line as an array of fields.
# It modifies the object 'a'. This function returns the modified array,
# and please note that it is destructive of its input 'a'.
# If it returnso the empty array then the line is to be deleted.
# This function processes the split lines of one utterance (as a
# list of lists of fields), to allow repetitions of words, so if the
# reference says 'i' but the hyp says 'i i', or the ref says
# 'you know' and the hyp says 'you know you know', we change the
# ref to match.
# It returns the modified list-of-lists [but note that the input
# is actually modified].
# note: split_lines_of_utt is a list of lists, one per line, each containing the
# sequence of fields.
# Returns the same format of data after processing.
non_scored_words = set()
ReadNonScoredWords(args.non_scored_words_in)
num_lines = 0
num_correct_lines = 0
# ref_change_stats will be a map from a string like
# 'foo -> bar' to an integer count; it keeps track of how much we changed
# the reference.
ref_change_stats = defaultdict(int)
# repetition_stats will be a map from strings like
# 'a', or 'a b' (the repeated strings), to an integer count; like
# ref_change_stats, it keeps track of how many changes we made
# in allowing repetitions.
repetition_stats = defaultdict(int)
ProcessData()
PrintNonScoredStats()
PrintRepetitionStats()
| 45.306122 | 97 | 0.646446 | #!/usr/bin/env python
# Copyright 2016 Vimal Manohar
# 2016 Johns Hopkins University (author: Daniel Povey)
# Apache 2.0
from __future__ import print_function
import argparse
import logging
import sys
from collections import defaultdict
"""
This script reads and writes the 'ctm-edits' file that is
produced by get_ctm_edits.py.
It modifies the ctm-edits so that non-scored words
are not counted as errors: for instance, if there are things like
[COUGH] and [NOISE] in the transcript, deletions, insertions and
substitutions involving them are allowed, and we modify the reference
to correspond to the hypothesis.
If you supply the <lang> directory (the one that corresponds to
how you decoded the data) to this script, it assumes that the <lang>
directory contains phones/align_lexicon.int, and it uses this to work
out a reasonable guess of the non-scored phones, based on which have
a single-word pronunciation that maps to a silence phone.
It then uses the words.txt to work out the written form of those words.
Alternatively, you may specify a file containing the non-scored words one
per line, with the --non-scored-words option.
Non-scored words that were deleted (i.e. they were in the ref but not the
hyp) are simply removed from the ctm. For non-scored words that
were inserted or substituted, we change the reference word to match the
hyp word, but instead of marking the operation as 'cor' (correct), we
mark it as 'fix' (fixed), so that it will not be positively counted as a correct
word for purposes of finding the optimal segment boundaries.
e.g.
<file-id> <channel> <start-time> <duration> <conf> <hyp-word> <ref-word> <edit-type>
[note: the <channel> will always be 1].
AJJacobs_2007P-0001605-0003029 1 0 0.09 <eps> 1.0 <eps> sil
AJJacobs_2007P-0001605-0003029 1 0.09 0.15 i 1.0 i cor
AJJacobs_2007P-0001605-0003029 1 0.24 0.25 thought 1.0 thought cor
AJJacobs_2007P-0001605-0003029 1 0.49 0.14 i'd 1.0 i'd cor
AJJacobs_2007P-0001605-0003029 1 0.63 0.22 tell 1.0 tell cor
AJJacobs_2007P-0001605-0003029 1 0.85 0.11 you 1.0 you cor
AJJacobs_2007P-0001605-0003029 1 0.96 0.05 a 1.0 a cor
AJJacobs_2007P-0001605-0003029 1 1.01 0.24 little 1.0 little cor
AJJacobs_2007P-0001605-0003029 1 1.25 0.5 about 1.0 about cor
AJJacobs_2007P-0001605-0003029 1 1.75 0.48 [UH] 1.0 [UH] cor
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s [%(filename)s:%(lineno)s - '
'%(funcName)s - %(levelname)s ] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
parser = argparse.ArgumentParser(
description = "This program modifies the reference in the ctm-edits which "
"is output by steps/cleanup/internal/get_ctm_edits.py, to allow insertions, deletions and "
"substitutions of non-scored words, and [if --allow-repetitions=true], "
"duplications of single words or pairs of scored words (to account for dysfluencies "
"that were not transcribed). Note: deletions and substitutions of non-scored words "
"after the reference is corrected, will be marked as operation 'fix' rather than "
"'cor' (correct) so that the downstream processing knows that this was not in "
"the original reference. Also by defaults tags non-scored words as such when "
"they are correct; see the --tag-non-scored option.")
parser.add_argument("--verbose", type = int, default = 1,
choices=[0,1,2,3],
help = "Verbose level, higher = more verbose output")
parser.add_argument("--allow-repetitions", type = str, default = 'true',
choices=['true','false'],
help = "If true, allow repetitions in the transcript of one or "
"two-word sequences: for instance if the ref says 'i' but "
"the hyp says 'i i', or the ref says 'but then' and the hyp says "
"'but then but then', fix the reference accordingly. Intervening "
"non-scored words are allowed between the repetitions. These "
"fixes will be marked as 'cor', not as 'fix', since there is "
"generally no way to tell which repetition was the 'real' one "
"(and since we're generally confident that such things were "
"actually uttered).")
parser.add_argument("non_scored_words_in", metavar = "<non-scored-words-file>",
help="Filename of file containing a list of non-scored words, "
"one per line. See steps/cleanup/get_nonscored_words.py.")
parser.add_argument("ctm_edits_in", metavar = "<ctm-edits-in>",
help = "Filename of input ctm-edits file. "
"Use /dev/stdin for standard input.")
parser.add_argument("ctm_edits_out", metavar = "<ctm-edits-out>",
help = "Filename of output ctm-edits file. "
"Use /dev/stdout for standard output.")
args = parser.parse_args()
def ReadNonScoredWords(non_scored_words_file):
global non_scored_words
try:
f = open(non_scored_words_file)
except:
sys.exit("modify_ctm_edits.py: error opening file: "
"--non-scored-words=" + non_scored_words_file)
for line in f.readlines():
a = line.split()
if not len(line.split()) == 1:
sys.exit("modify_ctm_edits.py: bad line in non-scored-words "
"file {0}: {1}".format(non_scored_words_file, line))
non_scored_words.add(a[0])
f.close()
# The ctm-edits file format is as follows [note: file-id is really utterance-id
# in this context].
# <file-id> <channel> <start-time> <duration> <conf> <hyp-word> <ref-word> <edit>
# e.g.:
# AJJacobs_2007P-0001605-0003029 1 0 0.09 <eps> 1.0 <eps> sil
# AJJacobs_2007P-0001605-0003029 1 0.09 0.15 i 1.0 i cor
# ...
# This function processes a single line of ctm-edits input for fixing
# "non-scored" words. The input 'a' is the split line as an array of fields.
# It modifies the object 'a'. This function returns the modified array,
# and please note that it is destructive of its input 'a'.
# If it returnso the empty array then the line is to be deleted.
def ProcessLineForNonScoredWords(a):
global num_lines, num_correct_lines, ref_change_stats
try:
assert len(a) == 8
num_lines += 1
# we could do:
# [ file, channel, start, duration, hyp_word, confidence, ref_word, edit_type ] = a
duration = a[3]
hyp_word = a[4]
ref_word = a[6]
edit_type = a[7]
if edit_type == 'ins':
assert ref_word == '<eps>'
if hyp_word in non_scored_words:
# insert this non-scored word into the reference.
ref_change_stats[ref_word + ' -> ' + hyp_word] += 1
ref_word = hyp_word
edit_type = 'fix'
elif edit_type == 'del':
assert hyp_word == '<eps>' and float(duration) == 0.0
if ref_word in non_scored_words:
ref_change_stats[ref_word + ' -> ' + hyp_word] += 1
return []
elif edit_type == 'sub':
assert hyp_word != '<eps>'
if hyp_word in non_scored_words and ref_word in non_scored_words:
# we also allow replacing one non-scored word with another.
ref_change_stats[ref_word + ' -> ' + hyp_word] += 1
ref_word = hyp_word
edit_type = 'fix'
else:
assert edit_type == 'cor' or edit_type == 'sil'
num_correct_lines += 1
a[4] = hyp_word
a[6] = ref_word
a[7] = edit_type
return a
except Exception:
logger.error("bad line in ctm-edits input: "
"{0}".format(a))
raise RuntimeError
# This function processes the split lines of one utterance (as a
# list of lists of fields), to allow repetitions of words, so if the
# reference says 'i' but the hyp says 'i i', or the ref says
# 'you know' and the hyp says 'you know you know', we change the
# ref to match.
# It returns the modified list-of-lists [but note that the input
# is actually modified].
def ProcessUtteranceForRepetitions(split_lines_of_utt):
global non_scored_words, repetition_stats
# The array 'selected_lines' will contain the indexes of of selected
# elements of 'split_lines_of_utt'. Consider split_line =
# split_lines_of_utt[i]. If the hyp and ref words in split_line are both
# either '<eps>' or non-scoreable words, we discard the index.
# Otherwise we put it into selected_lines.
selected_line_indexes = []
# selected_edits will contain, for each element of selected_line_indexes, the
# corresponding edit_type from the original utterance previous to
# this function call ('cor', 'ins', etc.).
#
# As a special case, if there was a substitution ('sub') where the
# reference word was a non-scored word and the hyp word was a real word,
# we mark it in this array as 'ins', because for purposes of this algorithm
# it behaves the same as an insertion.
#
# Whenever we do any operation that will change the reference, we change
# all the selected_edits in the array to None so that they won't match
# any further operations.
selected_edits = []
# selected_hyp_words will contain, for each element of selected_line_indexes, the
# corresponding hyp_word.
selected_hyp_words = []
for i in range(len(split_lines_of_utt)):
split_line = split_lines_of_utt[i]
hyp_word = split_line[4]
ref_word = split_line[6]
# keep_this_line will be True if we are going to keep this line in the
# 'selected lines' for further processing of repetitions. We only
# eliminate lines involving non-scored words or epsilon in both hyp
# and reference position
# [note: epsilon in hyp position for non-empty segments indicates
# optional-silence, and it does make sense to make this 'invisible',
# just like non-scored words, for the purposes of this code.]
keep_this_line = True
if (hyp_word == '<eps>' or hyp_word in non_scored_words) and \
(ref_word == '<eps>' or ref_word in non_scored_words):
keep_this_line = False
if keep_this_line:
selected_line_indexes.append(i)
edit_type = split_line[7]
if edit_type == 'sub' and ref_word in non_scored_words:
assert not hyp_word in non_scored_words
# For purposes of this algorithm, substitution of, say,
# '[COUGH]' by 'hello' behaves like an insertion of 'hello',
# since we're willing to remove the '[COUGH]' from the
# transript.
edit_type = 'ins'
selected_edits.append(edit_type)
selected_hyp_words.append(hyp_word)
# indexes_to_fix will be a list of indexes into 'selected_indexes' where we
# plan to fix the ref to match the hyp.
indexes_to_fix = []
# This loop scans for, and fixes, two-word insertions that follow,
# or precede, the corresponding correct words.
for i in range(0, len(selected_line_indexes) - 3):
this_indexes = selected_line_indexes[i:i+4]
this_hyp_words = selected_hyp_words[i:i+4]
if this_hyp_words[0] == this_hyp_words[2] and \
this_hyp_words[1] == this_hyp_words[3] and \
this_hyp_words[0] != this_hyp_words[1]:
# if the hyp words were of the form [ 'a', 'b', 'a', 'b' ]...
this_edits = selected_edits[i:i+4]
if this_edits == [ 'cor', 'cor', 'ins', 'ins' ] or \
this_edits == [ 'ins', 'ins', 'cor', 'cor' ]:
if this_edits[0] == 'cor':
indexes_to_fix += [ i+2, i+3 ]
else:
indexes_to_fix += [ i, i+1 ]
# the next line prevents this region of the text being used
# in any further edits.
selected_edits[i:i+4] = [ None, None, None, None ]
word_pair = this_hyp_words[0] + ' ' + this_hyp_words[1]
# e.g. word_pair = 'hi there'
# add 2 because these stats are of words.
repetition_stats[word_pair] += 2
# the next line prevents this region of the text being used
# in any further edits.
selected_edits[i:i+4] = [ None, None, None, None ]
# This loop scans for, and fixes, one-word insertions that follow,
# or precede, the corresponding correct words.
for i in range(0, len(selected_line_indexes) - 1):
this_indexes = selected_line_indexes[i:i+2]
this_hyp_words = selected_hyp_words[i:i+2]
if this_hyp_words[0] == this_hyp_words[1]:
# if the hyp words were of the form [ 'a', 'a' ]...
this_edits = selected_edits[i:i+2]
if this_edits == [ 'cor', 'ins' ] or this_edits == [ 'ins', 'cor' ]:
if this_edits[0] == 'cor':
indexes_to_fix.append(i+1)
else:
indexes_to_fix.append(i)
repetition_stats[this_hyp_words[0]] += 1
# the next line prevents this region of the text being used
# in any further edits.
selected_edits[i:i+2] = [ None, None ]
for i in indexes_to_fix:
j = selected_line_indexes[i]
split_line = split_lines_of_utt[j]
ref_word = split_line[6]
hyp_word = split_line[4]
assert ref_word == '<eps>' or ref_word in non_scored_words
# we replace reference with the decoded word, which will be a
# repetition.
split_line[6] = hyp_word
split_line[7] = 'cor'
return split_lines_of_utt
# note: split_lines_of_utt is a list of lists, one per line, each containing the
# sequence of fields.
# Returns the same format of data after processing.
def ProcessUtterance(split_lines_of_utt):
new_split_lines_of_utt = []
for split_line in split_lines_of_utt:
new_split_line = ProcessLineForNonScoredWords(split_line)
if new_split_line != []:
new_split_lines_of_utt.append(new_split_line)
if args.allow_repetitions == 'true':
new_split_lines_of_utt = ProcessUtteranceForRepetitions(new_split_lines_of_utt)
return new_split_lines_of_utt
def ProcessData():
try:
f_in = open(args.ctm_edits_in)
except:
sys.exit("modify_ctm_edits.py: error opening ctm-edits input "
"file {0}".format(args.ctm_edits_in))
try:
f_out = open(args.ctm_edits_out, 'w')
except:
sys.exit("modify_ctm_edits.py: error opening ctm-edits output "
"file {0}".format(args.ctm_edits_out))
num_lines_processed = 0
# Most of what we're doing in the lines below is splitting the input lines
# and grouping them per utterance, before giving them to ProcessUtterance()
# and then printing the modified lines.
first_line = f_in.readline()
if first_line == '':
sys.exit("modify_ctm_edits.py: empty input")
split_pending_line = first_line.split()
if len(split_pending_line) == 0:
sys.exit("modify_ctm_edits.py: bad input line " + first_line)
cur_utterance = split_pending_line[0]
split_lines_of_cur_utterance = []
while True:
if len(split_pending_line) == 0 or split_pending_line[0] != cur_utterance:
split_lines_of_cur_utterance = ProcessUtterance(split_lines_of_cur_utterance)
for split_line in split_lines_of_cur_utterance:
print(' '.join(split_line), file = f_out)
split_lines_of_cur_utterance = []
if len(split_pending_line) == 0:
break
else:
cur_utterance = split_pending_line[0]
split_lines_of_cur_utterance.append(split_pending_line)
next_line = f_in.readline()
split_pending_line = next_line.split()
if len(split_pending_line) == 0:
if next_line != '':
sys.exit("modify_ctm_edits.py: got an empty or whitespace input line")
try:
f_out.close()
except:
sys.exit("modify_ctm_edits.py: error closing ctm-edits output "
"(broken pipe or full disk?)")
def PrintNonScoredStats():
if args.verbose < 1:
return
if num_lines == 0:
print("modify_ctm_edits.py: processed no input.", file = sys.stderr)
num_lines_modified = sum(ref_change_stats.values())
num_incorrect_lines = num_lines - num_correct_lines
percent_lines_incorrect= '%.2f' % (num_incorrect_lines * 100.0 / num_lines)
percent_modified = '%.2f' % (num_lines_modified * 100.0 / num_lines);
percent_of_incorrect_modified = '%.2f' % (num_lines_modified * 100.0 / num_incorrect_lines)
print("modify_ctm_edits.py: processed {0} lines of ctm ({1}% of which incorrect), "
"of which {2} were changed fixing the reference for non-scored words "
"({3}% of lines, or {4}% of incorrect lines)".format(
num_lines, percent_lines_incorrect, num_lines_modified,
percent_modified, percent_of_incorrect_modified),
file = sys.stderr)
keys = sorted(ref_change_stats.keys(), reverse=True,
key = lambda x: ref_change_stats[x])
num_keys_to_print = 40 if args.verbose >= 2 else 10
print("modify_ctm_edits.py: most common edits (as percentages "
"of all such edits) are:\n" +
('\n'.join([ '%s [%.2f%%]' % (k, ref_change_stats[k]*100.0/num_lines_modified)
for k in keys[0:num_keys_to_print]]))
+ '\n...'if num_keys_to_print < len(keys) else '',
file = sys.stderr)
def PrintRepetitionStats():
if args.verbose < 1 or sum(repetition_stats.values()) == 0:
return
num_lines_modified = sum(repetition_stats.values())
num_incorrect_lines = num_lines - num_correct_lines
percent_lines_incorrect= '%.2f' % (num_incorrect_lines * 100.0 / num_lines)
percent_modified = '%.2f' % (num_lines_modified * 100.0 / num_lines);
percent_of_incorrect_modified = '%.2f' % (num_lines_modified * 100.0 / num_incorrect_lines)
print("modify_ctm_edits.py: processed {0} lines of ctm ({1}% of which incorrect), "
"of which {2} were changed fixing the reference for repetitions ({3}% of "
"lines, or {4}% of incorrect lines)".format(
num_lines, percent_lines_incorrect, num_lines_modified,
percent_modified, percent_of_incorrect_modified),
file = sys.stderr)
keys = sorted(repetition_stats.keys(), reverse=True,
key = lambda x: repetition_stats[x])
num_keys_to_print = 40 if args.verbose >= 2 else 10
print("modify_ctm_edits.py: most common repetitions inserted into reference (as percentages "
"of all words fixed in this way) are:\n" +
('\n'.join([ '%s [%.2f%%]' % (k, repetition_stats[k]*100.0/num_lines_modified)
for k in keys[0:num_keys_to_print]]))
+ '\n...' if num_keys_to_print < len(keys) else '',
file = sys.stderr)
non_scored_words = set()
ReadNonScoredWords(args.non_scored_words_in)
num_lines = 0
num_correct_lines = 0
# ref_change_stats will be a map from a string like
# 'foo -> bar' to an integer count; it keeps track of how much we changed
# the reference.
ref_change_stats = defaultdict(int)
# repetition_stats will be a map from strings like
# 'a', or 'a b' (the repeated strings), to an integer count; like
# ref_change_stats, it keeps track of how many changes we made
# in allowing repetitions.
repetition_stats = defaultdict(int)
ProcessData()
PrintNonScoredStats()
PrintRepetitionStats()
| 12,943 | 0 | 158 |
c682a2796b145c76b83189c8765fa8816a7e59a6 | 4,618 | py | Python | src/spyd/game/room/room_map_mode_state.py | DanSeraf/spyd | af893b7f9c67785613b25754eb2cf150523a9fe4 | [
"Zlib"
] | 4 | 2015-05-05T16:44:42.000Z | 2020-10-27T09:45:23.000Z | src/spyd/game/room/room_map_mode_state.py | DanSeraf/spyd | af893b7f9c67785613b25754eb2cf150523a9fe4 | [
"Zlib"
] | null | null | null | src/spyd/game/room/room_map_mode_state.py | DanSeraf/spyd | af893b7f9c67785613b25754eb2cf150523a9fe4 | [
"Zlib"
] | 2 | 2016-12-13T22:21:08.000Z | 2020-03-14T16:44:20.000Z | from twisted.internet import defer
from cube2common.constants import INTERMISSIONLEN
from spyd.game.client.exceptions import GenericError
from spyd.game.gamemode import gamemodes
from spyd.game.map.map_rotation import MapRotation
from spyd.protocol import swh
| 35.523077 | 128 | 0.66544 | from twisted.internet import defer
from cube2common.constants import INTERMISSIONLEN
from spyd.game.client.exceptions import GenericError
from spyd.game.gamemode import gamemodes
from spyd.game.map.map_rotation import MapRotation
from spyd.protocol import swh
class RoomMapModeState(object):
def __init__(self, room, map_rotation=None, map_meta_data_accessor=None, game_clock=None, ready_up_controller_factory=None):
self.room = room
self._map_name = ""
self._gamemode = None
self._map_meta_data_accessor = map_meta_data_accessor
self._map_rotation = map_rotation or MapRotation.from_test_data()
self._game_clock = game_clock
self._ready_up_controller_factory = ready_up_controller_factory
self._initialized = False
self._initializing = False
self._initializing_deferreds = []
@property
def initialized(self):
return self._initialized
@property
def map_name(self):
if self.gamemode is None:
map_name, _ = self._map_rotation.next_map_mode(peek=True)
return map_name
return self._map_name
@property
def gamemode(self):
return self._gamemode
@property
def mode_num(self):
if self.gamemode is None:
_, mode_name = self._map_rotation.next_map_mode(peek=True)
return gamemodes[mode_name].clientmodenum
return self.gamemode.clientmodenum
@property
def mode_name(self):
if self.gamemode is None:
_, mode_name = self._map_rotation.next_map_mode(peek=True)
return gamemodes[mode_name].clientmodename
return self.gamemode.clientmodename
def get_map_names(self):
return self._map_meta_data_accessor.get_map_names()
@property
def rotate_on_first_player(self):
return self._map_rotation.rotate_on_first_player
@defer.inlineCallbacks
def await_map_mode_initialized(self, player_count):
if self._initializing:
deferred = defer.Deferred()
self._initializing_deferreds.append(deferred)
yield deferred
else:
if not self.initialized or (self.rotate_on_first_player and player_count == 0):
self._initializing = True
map_meta_data = yield self.rotate_map_mode()
self._initializing = False
while len(self._initializing_deferreds):
deferred = self._initializing_deferreds.pop()
deferred.callback(map_meta_data)
defer.returnValue(map_meta_data)
def rotate_map_mode(self):
map_name, mode_name = self._map_rotation.next_map_mode(peek=False)
return self.change_map_mode(map_name, mode_name)
@defer.inlineCallbacks
def change_map_mode(self, map_name, mode_name):
if mode_name not in gamemodes:
raise GenericError("Unsupported game mode.")
self._map_name = map_name
map_meta_data = yield self._map_meta_data_accessor.get_map_data(self._map_name)
map_meta_data = map_meta_data or {}
self._gamemode = gamemodes[mode_name](room=self.room, map_meta_data=map_meta_data)
self._initialized = True
self._new_map_mode_initialize()
defer.returnValue(map_meta_data)
def _new_map_mode_initialize(self):
with self.room.broadcastbuffer(1, True) as cds:
swh.put_mapchange(cds, self.map_name, self.gamemode.clientmodenum, hasitems=False)
if self.gamemode.timed:
self._game_clock.start(self.gamemode.timeout, INTERMISSIONLEN)
else:
self._game_clock.start_untimed()
for player in self.room.players:
self.gamemode.initialize_player(cds, player)
self.room.ready_up_controller = self._ready_up_controller_factory.make_ready_up_controller(self.room)
for player in self.room.players:
player.state.map_change_reset()
player.state.respawn()
self.gamemode.spawn_loadout(player)
for client in self.room.clients:
with client.sendbuffer(1, True) as cds:
if self.gamemode.timed and self.room.timeleft is not None:
swh.put_timeup(cds, self.room.timeleft)
if self.room.is_paused:
swh.put_pausegame(cds, 1)
for player in client.player_iter():
if not player.state.is_spectator:
swh.put_spawnstate(cds, player)
self.room._initialize_demo_recording()
| 3,862 | 471 | 23 |
527884993045334d555f2ba30503a0c47602c556 | 339 | py | Python | sdp/xiaoaiskill/admin.py | liutianfang/skill-dev-platform | 37e76232869ae6eaf23d88398d2e52e7e0a5a80f | [
"Apache-2.0"
] | 3 | 2018-08-31T02:43:39.000Z | 2018-12-23T23:59:23.000Z | sdp/xiaoaiskill/admin.py | liutianfang/skill-dev-platform | 37e76232869ae6eaf23d88398d2e52e7e0a5a80f | [
"Apache-2.0"
] | 5 | 2020-06-05T18:50:52.000Z | 2022-03-11T23:28:58.000Z | sdp/xiaoaiskill/admin.py | liutianfang/skill-dev-platform | 37e76232869ae6eaf23d88398d2e52e7e0a5a80f | [
"Apache-2.0"
] | 1 | 2018-09-13T08:51:41.000Z | 2018-09-13T08:51:41.000Z | from django.contrib import admin
from django.contrib import admin
from xiaoaiskill.models import *
admin.site.register(MI_Log, MI_Whereis_LogAdmin)
admin.site.register(MI_Device)
admin.site.register(MI_DeviceOwner) | 26.076923 | 74 | 0.79056 | from django.contrib import admin
from django.contrib import admin
from xiaoaiskill.models import *
class MI_Whereis_LogAdmin(admin.ModelAdmin):
list_display = ("pk",'create_time','request',"response","ip","exmsg",)
admin.site.register(MI_Log, MI_Whereis_LogAdmin)
admin.site.register(MI_Device)
admin.site.register(MI_DeviceOwner) | 0 | 98 | 23 |
a909608250f6e1df44425da131396682cb259b93 | 1,050 | py | Python | peering/migrations/0083_move_email_to_messaging.py | maznu/peering-manager | d249fcf530f4cc48b39429badb79bc203e0148ba | [
"Apache-2.0"
] | 127 | 2017-10-12T00:27:45.000Z | 2020-08-07T11:13:55.000Z | peering/migrations/0083_move_email_to_messaging.py | maznu/peering-manager | d249fcf530f4cc48b39429badb79bc203e0148ba | [
"Apache-2.0"
] | 247 | 2017-12-26T12:55:34.000Z | 2020-08-08T11:57:35.000Z | peering/migrations/0083_move_email_to_messaging.py | maznu/peering-manager | d249fcf530f4cc48b39429badb79bc203e0148ba | [
"Apache-2.0"
] | 63 | 2017-10-13T06:46:05.000Z | 2020-08-08T00:41:57.000Z | # Generated by Django 3.2.11 on 2022-01-07 22:05
from django.db import migrations
| 37.5 | 86 | 0.728571 | # Generated by Django 3.2.11 on 2022-01-07 22:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("peering", "0082_move_autonomoussystem_contacts")]
database_operations = [migrations.AlterModelTable("Email", "messaging_email")]
state_operations = [migrations.DeleteModel("Email")]
def cleanup_object_changes(apps, schema_editor):
ContentType = apps.get_model("contenttypes", "ContentType")
Email = apps.get_model("peering.Email")
EmailType = ContentType.objects.get_for_model(Email)
ObjectChange = apps.get_model("utils.ObjectChange")
ObjectChange.objects.using(schema_editor.connection.alias).filter(
changed_object_type=EmailType
)._raw_delete(schema_editor.connection.alias)
operations = [
migrations.RunPython(cleanup_object_changes, migrations.RunPython.noop),
migrations.SeparateDatabaseAndState(
database_operations=database_operations, state_operations=state_operations
),
]
| 436 | 507 | 23 |
5cabbd73e2925a6b52abc03307e5edfb7b079d87 | 822 | py | Python | ska-tmc/ska-tmc-cspmasterleafnode-mid/src/ska_tmc_cspmasterleafnode_mid/device_data.py | ska-telescope/tmc-prototype | 4138274e933d4b05f7fe9fc34a11c417b6d0d336 | [
"BSD-3-Clause"
] | 3 | 2019-01-10T11:49:36.000Z | 2019-07-19T03:32:52.000Z | ska-tmc/ska-tmc-cspmasterleafnode-mid/src/ska_tmc_cspmasterleafnode_mid/device_data.py | ska-telescope/tmc-prototype | 4138274e933d4b05f7fe9fc34a11c417b6d0d336 | [
"BSD-3-Clause"
] | 19 | 2019-01-07T14:50:26.000Z | 2019-10-02T13:25:23.000Z | ska-tmc/ska-tmc-cspmasterleafnode-mid/src/ska_tmc_cspmasterleafnode_mid/device_data.py | ska-telescope/tmc-prototype | 4138274e933d4b05f7fe9fc34a11c417b6d0d336 | [
"BSD-3-Clause"
] | 1 | 2018-12-21T13:39:23.000Z | 2018-12-21T13:39:23.000Z | class DeviceData:
"""
This class represents the CSP master as functional device. It mainly comprise the data common
across various functions of a CSP master.
"""
__instance = None
def __init__(self):
"""Private constructor of the class"""
if DeviceData.__instance != None:
raise Exception("This is singletone class")
else:
DeviceData.__instance = self
self.cbf_health_updator = None
self.pss_health_updator = None
self.pst_health_updator = None
self._csp_cbf_health_state_log = ""
self._csp_pss_health_state_log = ""
self._csp_pst_health_state_log = ""
@staticmethod
| 30.444444 | 97 | 0.641119 | class DeviceData:
"""
This class represents the CSP master as functional device. It mainly comprise the data common
across various functions of a CSP master.
"""
__instance = None
def __init__(self):
"""Private constructor of the class"""
if DeviceData.__instance != None:
raise Exception("This is singletone class")
else:
DeviceData.__instance = self
self.cbf_health_updator = None
self.pss_health_updator = None
self.pst_health_updator = None
self._csp_cbf_health_state_log = ""
self._csp_pss_health_state_log = ""
self._csp_pst_health_state_log = ""
@staticmethod
def get_instance():
if DeviceData.__instance == None:
DeviceData()
return DeviceData.__instance
| 102 | 0 | 26 |
2523a0487bc75245a0f34d644b00cf44e670fc0e | 990 | py | Python | cgcrepair/core/corpus/cwe_parser.py | epicosy/cgc-repair | f347a29ef482019563402684e585080836d3d093 | [
"MIT"
] | null | null | null | cgcrepair/core/corpus/cwe_parser.py | epicosy/cgc-repair | f347a29ef482019563402684e585080836d3d093 | [
"MIT"
] | null | null | null | cgcrepair/core/corpus/cwe_parser.py | epicosy/cgc-repair | f347a29ef482019563402684e585080836d3d093 | [
"MIT"
] | null | null | null | from typing import List, Union
from cgcrepair.utils.parse.cwe import cwe_from_info, main_cwe, get_name, top_parent
| 30 | 90 | 0.6 | from typing import List, Union
from cgcrepair.utils.parse.cwe import cwe_from_info, main_cwe, get_name, top_parent
class CWEParser:
def __init__(self, description: str, level: int = 3):
self.description = description
self.main = None
self.level = level
def cwe_ids(self, number: bool = True) -> List[Union[int, str]]:
if number:
return [int(cwe.split('-')[1]) for cwe in cwe_from_info(self.description)]
else:
return [cwe for cwe in cwe_from_info(self.description)]
def cwe_type(self):
ids = self.cwe_ids()
main = main_cwe(ids, count=3)
return f"CWE-{main}: {get_name(main)}"
def get_cwes(self, parent: bool = False, name: bool = False) -> List[Union[str, int]]:
ids = self.cwe_ids()
if parent:
ids = [top_parent(_id, None, count=3) for _id in ids]
if name:
ids = [f"CWE-{_id} {get_name(_id)}" for _id in ids]
return ids
| 749 | -5 | 130 |
a3de2e002a5c726b74b23e2eb3b1f176feb6e207 | 1,366 | py | Python | arrow/middleware/auth/http.py | effordsbeard/arrowstack | 33f2eff3be07cf65e38610f0701743e775c1bbc6 | [
"MIT"
] | 1 | 2018-10-12T11:43:07.000Z | 2018-10-12T11:43:07.000Z | arrow/middleware/auth/http.py | effordsbeard/arrowstack | 33f2eff3be07cf65e38610f0701743e775c1bbc6 | [
"MIT"
] | null | null | null | arrow/middleware/auth/http.py | effordsbeard/arrowstack | 33f2eff3be07cf65e38610f0701743e775c1bbc6 | [
"MIT"
] | null | null | null | import base64
| 30.355556 | 96 | 0.571742 | import base64
class HttpAuth(object):
def __init__(self, username=None, password=None, users=None, auth_func=None):
if not users:
users = {}
users[username] = password
self.users = users
self.auth_func = auth_func
def __call__(self, req, res):
auth_header = req.header('Authorization')
if not auth_header:
res.status(401)
res.header('WWW-Authenticate', 'Basic realm="Provide credentials", charset="UTF-8"')
return res.send()
token = None
try:
authtype, token = auth_header.split(' ')
if not authtype == 'Basic':
return res.abort(403)
except:
return res.abort(400)
username, password = base64.b64decode(token).decode("utf-8").split(':')
if not self.check_user(username, password):
res.status(401)
res.header('WWW-Authenticate', 'Basic realm="Provide credentials", charset="UTF-8"')
return res.send()
def check_user(self, username, password):
for _username, _password in self.users.items():
if username == _username and password == _password:
return True
if self.auth_func:
if self.auth_func(username, password):
return True
return False
| 1,245 | 2 | 104 |
cb24bca0d283586de51576f9798c5ace2f535ce7 | 1,157 | py | Python | tests/test_transforms.py | kmodin/quflow | c12bf42929c349e059d85a8d0ff5830b838e8c91 | [
"MIT"
] | null | null | null | tests/test_transforms.py | kmodin/quflow | c12bf42929c349e059d85a8d0ff5830b838e8c91 | [
"MIT"
] | null | null | null | tests/test_transforms.py | kmodin/quflow | c12bf42929c349e059d85a8d0ff5830b838e8c91 | [
"MIT"
] | null | null | null | import numpy as np
import pytest
import quflow as qf
@pytest.mark.parametrize("f", [get_fun(), get_fun(N=128), get_fun(N=256)[:128, :255]])
@pytest.mark.parametrize("omega_real", [get_random_omega_real(), get_random_omega_real(128)])
@pytest.mark.parametrize("omega_complex", [get_random_omega_complex(), get_random_omega_complex(128)])
| 28.925 | 102 | 0.743302 | import numpy as np
import pytest
import quflow as qf
def get_fun(N=501):
theta, phi = qf.sphgrid(N)
f = np.sin(phi)**2*np.sin(theta/2)
return f
def get_random_omega_real(N=17):
return np.random.randn(N**2)
def get_random_omega_complex(N=17):
return qf.shr2shc(get_random_omega_real(N))
@pytest.mark.parametrize("f", [get_fun(), get_fun(N=128), get_fun(N=256)[:128, :255]])
def test_spherical_harmonics_transform_forward(f):
omega = qf.fun2shc(f)
f = qf.shc2fun(omega)
omega2 = qf.fun2shc(f)
assert omega == pytest.approx(omega2)
@pytest.mark.parametrize("omega_real", [get_random_omega_real(), get_random_omega_real(128)])
def test_real_to_complex_harmonics(omega_real):
omega_complex = qf.shr2shc(omega_real)
omega_real2 = qf.shc2shr(omega_complex)
assert omega_real == pytest.approx(omega_real2)
@pytest.mark.parametrize("omega_complex", [get_random_omega_complex(), get_random_omega_complex(128)])
def test_complex_to_real_harmonics(omega_complex):
omega_real = qf.shc2shr(omega_complex)
omega_complex2 = qf.shr2shc(omega_real)
assert omega_complex == pytest.approx(omega_complex2)
| 676 | 0 | 135 |
c5878b6f3946fbbc760914ab0bb8dab5b27bb7ef | 5,311 | py | Python | lab_07/main.py | migregal/bmstu_iu7_cg | b336e6058329656863d6066d69aabeb5aaef8014 | [
"MIT"
] | null | null | null | lab_07/main.py | migregal/bmstu_iu7_cg | b336e6058329656863d6066d69aabeb5aaef8014 | [
"MIT"
] | null | null | null | lab_07/main.py | migregal/bmstu_iu7_cg | b336e6058329656863d6066d69aabeb5aaef8014 | [
"MIT"
] | null | null | null | import sys
from copy import deepcopy
from PyQt5 import QtWidgets
from PyQt5 import uic
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from funcs import *
from simple_cut import simple_cut
# Класс главного окна
# Отслеживание передвижения мыши
if __name__ == '__main__':
app = QApplication([])
application = Window()
application.show()
sys.exit(app.exec())
| 29.505556 | 80 | 0.567313 | import sys
from copy import deepcopy
from PyQt5 import QtWidgets
from PyQt5 import uic
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from funcs import *
from simple_cut import simple_cut
# Класс главного окна
class Window(QMainWindow):
def __init__(self):
QWidget.__init__(self)
# Загрузка интерфейса
uic.loadUi("design.ui", self)
# Переменные
self.bg_color = QColor(Qt.white)
self.line_color = QColor(Qt.black)
self.cutter_color = QColor(Qt.green)
self.cut_line_color = QColor(Qt.red)
self.ctrl_pressed = False
self.lines: list[Line] = []
self.colored_lines = []
self.cur_line = []
self.follow_line = None
self.cutter: Cutter = None
self.drawing_cutter = False
self.cur_cutter = []
self.follow_cutter = None
# Добавляем полотно
self.scene = MyScene(self, 0, 0, 1920, 1080)
self.mainview.setScene(self.scene)
self.pen: QPen = QPen()
self.mainview.ensureVisible(0, 0, 0, 0)
self.mainview.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.mainview.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.mainview.setRenderHint(QPainter.Antialiasing)
self.but_add_line.clicked.connect(self.get_line)
self.but_add_cutter.clicked.connect(self.get_cutter)
self.but_cut.clicked.connect(self.cut)
self.but_clear.clicked.connect(self.clear)
# Остальные настройки
self.mainview.setMouseTracking(True)
self.mainview.viewport().installEventFilter(self)
# Отслеживание передвижения мыши
def eventFilter(self, src, event):
if event.type() == QEvent.MouseMove and src is self.mainview.viewport():
x = event.x()
y = event.y()
following_line(self, x, y)
following_cutter(self, x, y)
return QWidget.eventFilter(self, src, event)
def keyPressEvent(self, event):
key = event.key()
if key == Qt.Key_Shift:
self.ctrl_pressed = True
def keyReleaseEvent(self, event):
key = event.key()
if key == Qt.Key_Shift:
self.ctrl_pressed = False
def get_line(self):
add_line(
self,
self.inp_x1.value(),
self.inp_y1.value(),
self.inp_x2.value(),
self.inp_y2.value(),
self.line_color)
def get_cutter(self):
self.scene.removeItem(self.follow_cutter)
self.drawing_cutter = False
del_cutter(self)
add_cutter(
self,
self.inp_x_left.value(),
self.inp_y_up.value(),
self.inp_x_right.value(),
self.inp_y_down.value(),
self.cutter_color
)
def cut(self):
if self.cutter:
for i in self.colored_lines:
try:
self.scene.removeItem(i)
except BaseException:
pass
for line in self.lines:
visible, p1, p2 = simple_cut(self.cutter.rect,
deepcopy(line.line))
if visible:
self.pen.setWidth(2)
draw_line(self, p1, p2, self.cut_line_color)
self.pen.setWidth(1)
def draw_apart_line(self, line: QLine, p1:QPoint, p2:QPoint):
print(line)
if p1 == line.p1() and p2 == line.p2():
return
if p1 == line.p1():
draw_line(self, line.p2(), p2, self.line_color)
elif p1 == line.p2():
draw_line(self, line.p1(), p2, self.line_color)
elif p2 == line.p1():
draw_line(self, line.p1(), p2, self.line_color)
elif p2 == line.p2():
draw_line(self, line.p1(), p1, self.line_color)
else:
draw_line(self, line.p1(), line.p2(), self.line_color)
def clear(self):
self.scene.clear()
self.lines.clear()
self.cur_line.clear()
self.follow_line = None
self.cutter = None
self.cur_cutter.clear()
self.follow_cutter = None
self.drawing_cutter = False
class MyScene(QtWidgets.QGraphicsScene):
def __init__(self, win: Window, *args):
super().__init__(*args)
self.window = win
self.last_x = None
self.last_y = None
def mousePressEvent(self,
event: QtWidgets.QGraphicsSceneMouseEvent) -> None:
if event.buttons() == Qt.LeftButton:
if not self.window.drawing_cutter:
line_on_screen(self.window, event.scenePos().x(),
event.scenePos().y())
else:
cutter_on_screen(self.window, event.scenePos().x(),
event.scenePos().y())
elif event.buttons() == Qt.RightButton:
self.window.drawing_cutter = True
del_cutter(self.window)
cutter_on_screen(self.window, event.scenePos().x(),
event.scenePos().y())
if __name__ == '__main__':
app = QApplication([])
application = Window()
application.show()
sys.exit(app.exec())
| 4,588 | 24 | 339 |
959c9786634416f76e5af9a4e4d0557746d5d2df | 283 | py | Python | recommendations/users/urls.py | batalova90/recommendations | 6a006b792f5b8aae6ad1a5c54b7cd81e4c53ae80 | [
"MIT"
] | null | null | null | recommendations/users/urls.py | batalova90/recommendations | 6a006b792f5b8aae6ad1a5c54b7cd81e4c53ae80 | [
"MIT"
] | null | null | null | recommendations/users/urls.py | batalova90/recommendations | 6a006b792f5b8aae6ad1a5c54b7cd81e4c53ae80 | [
"MIT"
] | null | null | null | from django.contrib.auth.views import LogoutView
from django.urls import path
from . import views
urlpatterns = [
path('logout/', LogoutView.as_view(template_name='users/logged_out.html'),
name='logout'),
path('signup/', views.SignUp.as_view(), name='signup')
]
| 21.769231 | 78 | 0.70318 | from django.contrib.auth.views import LogoutView
from django.urls import path
from . import views
urlpatterns = [
path('logout/', LogoutView.as_view(template_name='users/logged_out.html'),
name='logout'),
path('signup/', views.SignUp.as_view(), name='signup')
]
| 0 | 0 | 0 |
0c6ef210d7da12a90e4d6ae5831bdbd4c6f0f39b | 12,962 | py | Python | moztrap/model/library/api.py | bobsilverberg/moztrap | 9f51f098b03f365b8101be2e2c9a38486a43314c | [
"BSD-2-Clause"
] | null | null | null | moztrap/model/library/api.py | bobsilverberg/moztrap | 9f51f098b03f365b8101be2e2c9a38486a43314c | [
"BSD-2-Clause"
] | null | null | null | moztrap/model/library/api.py | bobsilverberg/moztrap | 9f51f098b03f365b8101be2e2c9a38486a43314c | [
"BSD-2-Clause"
] | null | null | null | from tastypie import http, fields
from tastypie.exceptions import ImmediateHttpResponse
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from ..core.api import (ProductVersionResource, ProductResource,
UserResource)
from .models import CaseVersion, Case, Suite, CaseStep, SuiteCase
from ...model.core.models import ProductVersion
from ..mtapi import MTResource, MTAuthorization
from ..environments.api import EnvironmentResource
from ..tags.api import TagResource
import logging
logger = logging.getLogger(__name__)
class SuiteCaseAuthorization(MTAuthorization):
"""Atypically named permission."""
@property
def permission(self):
"""This permission should be checked by is_authorized."""
return "library.manage_suite_cases"
class CaseVersionAuthorization(MTAuthorization):
"""A permission of 'library.manage_caseversions does not exist,
use library.manage_cases instead."""
@property
def permission(self):
"""This permission should be checked by is_authorized."""
return "library.manage_cases"
class SuiteResource(MTResource):
"""
Create, Read, Update and Delete capabilities for Suite.
Filterable by name and product fields.
"""
product = fields.ToOneField(ProductResource, "product")
@property
def model(self):
"""Model class related to this resource."""
return Suite
@property
def read_create_fields(self):
"""List of fields that are required for create but read-only for update."""
return ["product"]
class CaseResource(MTResource):
"""
Create, Read, Update and Delete capabilities for Case.
Filterable by suites and product fields.
"""
suites = fields.ToManyField(
SuiteResource,
"suites",
readonly=True,
null=True,
)
product = fields.ForeignKey(ProductResource, "product")
@property
def model(self):
"""Model class related to this resource."""
return Case
@property
def read_create_fields(self):
"""List of fields that are required for create but read-only for update."""
return ["product"]
class CaseStepResource(MTResource):
"""
Create, Read, Update and Delete capabilities for CaseSteps.
Filterable by caseversion field.
"""
caseversion = fields.ForeignKey(
"moztrap.model.library.api.CaseVersionResource", "caseversion")
@property
def model(self):
"""Model class related to this resource."""
return CaseStep
@property
def read_create_fields(self):
"""caseversion is read-only"""
return ["caseversion"]
class SuiteCaseResource(MTResource):
"""
Create, Read, Update and Delete capabilities for SuiteCase.
Filterable by suite and case fields.
"""
case = fields.ForeignKey(CaseResource, 'case')
suite = fields.ForeignKey(SuiteResource, 'suite')
@property
@property
def read_create_fields(self):
"""case and suite are read-only"""
return ["suite", "case"]
def hydrate_case(self, bundle):
"""case is read-only on PUT
case.product must match suite.product on CREATE
"""
# CREATE
if bundle.request.META['REQUEST_METHOD'] == 'POST':
case_id = self._id_from_uri(bundle.data['case'])
case = Case.objects.get(id=case_id)
suite_id = self._id_from_uri(bundle.data['suite'])
suite = Suite.objects.get(id=suite_id)
if case.product.id != suite.product.id:
error_message = str(
"case's product must match suite's product."
)
logger.error(
"\n".join([error_message, "case prod: %s, suite prod: %s"]),
case.product.id, suite.product.id)
raise ImmediateHttpResponse(
response=http.HttpBadRequest(error_message))
return bundle
class CaseVersionResource(MTResource):
"""
Create, Read, Update and Delete capabilities for CaseVersions.
Filterable by environments, productversion, case, and tags fields.
"""
case = fields.ForeignKey(CaseResource, "case")
steps = fields.ToManyField(
CaseStepResource, "steps", full=True, readonly=True)
environments = fields.ToManyField(
EnvironmentResource, "environments", full=True, readonly=True)
productversion = fields.ForeignKey(
ProductVersionResource, "productversion")
tags = fields.ToManyField(TagResource, "tags", full=True, readonly=True)
#@@@ attachments
@property
def model(self):
"""Model class related to this resource."""
return CaseVersion
@property
def read_create_fields(self):
"""List of fields that are required for create but read-only for update."""
return ["case", "productversion"]
def obj_update(self, bundle, request=None, **kwargs):
"""Set the modified_by field for the object to the request's user,
avoid ConcurrencyError by updating cc_version."""
# this try/except logging is more helpful than 500 / 404 errors on the
# client side
bundle = self.check_read_create(bundle)
try:
bundle = super(MTResource, self).obj_update(
bundle=bundle, request=request, **kwargs)
# avoid ConcurrencyError
bundle.obj.cc_version = self.model.objects.get(
id=bundle.obj.id).cc_version
bundle.obj.save(user=request.user)
return bundle
except Exception: # pragma: no cover
logger.exception("error updating %s", bundle) # pragma: no cover
raise # pragma: no cover
def hydrate_productversion(self, bundle):
"""case.product must match productversion.product on CREATE"""
# create
if bundle.request.META['REQUEST_METHOD'] == 'POST':
pv_id = self._id_from_uri(bundle.data['productversion'])
pv = ProductVersion.objects.get(id=pv_id)
case_id = self._id_from_uri(bundle.data['case'])
case = Case.objects.get(id=case_id)
if not case.product.id == pv.product.id:
message = str("productversion must match case's product")
logger.error("\n".join([message,
"productversion product id: %s case product id: %s"], ),
pv.product.id,
case.product.id)
raise ImmediateHttpResponse(
response=http.HttpBadRequest(message))
return bundle
class BaseSelectionResource(ModelResource):
"""Adds filtering by negation for use with multi-select widget"""
#@@@ move this to mtapi.py when that code is merged in.
def apply_filters(self,
request, applicable_filters, applicable_excludes={}):
"""Apply included and excluded filters to query."""
return self.get_object_list(request).filter(
**applicable_filters).exclude(**applicable_excludes)
def obj_get_list(self, request=None, **kwargs):
"""Return the list with included and excluded filters, if they exist."""
filters = {}
if hasattr(request, 'GET'): # pragma: no cover
# Grab a mutable copy.
filters = request.GET.copy()
# Update with the provided kwargs.
filters.update(kwargs)
# Splitting out filtering and excluding items
new_filters = {}
excludes = {}
for key, value in filters.items():
# If the given key is filtered by ``not equal`` token, exclude it
if key.endswith('__ne'):
key = key[:-4] # Stripping out trailing ``__ne``
excludes[key] = value
else:
new_filters[key] = value
filters = new_filters
# Building filters
applicable_filters = self.build_filters(filters=filters)
applicable_excludes = self.build_filters(filters=excludes)
base_object_list = self.apply_filters(
request, applicable_filters, applicable_excludes)
return self.apply_authorization_limits(request, base_object_list)
class CaseSelectionResource(BaseSelectionResource):
"""
Specialty end-point for an AJAX call in the Suite form multi-select widget
for selecting cases.
"""
case = fields.ForeignKey(CaseResource, "case")
productversion = fields.ForeignKey(
ProductVersionResource, "productversion")
tags = fields.ToManyField(TagResource, "tags", full=True)
created_by = fields.ForeignKey(
UserResource,
"created_by",
full=True,
null=True,
)
def dehydrate(self, bundle):
"""Add some convenience fields to the return JSON."""
case = bundle.obj.case
bundle.data["case_id"] = unicode(case.id)
bundle.data["product_id"] = unicode(case.product_id)
bundle.data["product"] = {"id": unicode(case.product_id)}
bundle.data["priority"] = unicode(case.priority)
return bundle
class CaseVersionSelectionResource(BaseSelectionResource):
"""
Specialty end-point for an AJAX call in the Tag form multi-select widget
for selecting caseversions.
"""
case = fields.ForeignKey(CaseResource, "case")
productversion = fields.ForeignKey(
ProductVersionResource, "productversion", full=True)
tags = fields.ToManyField(TagResource, "tags", full=True)
created_by = fields.ForeignKey(
UserResource,
"created_by",
full=True,
null=True,
)
def dehydrate(self, bundle):
"""Add some convenience fields to the return JSON."""
case = bundle.obj.case
bundle.data["case_id"] = unicode(case.id)
bundle.data["product_id"] = unicode(case.product_id)
bundle.data["product"] = {"id": unicode(case.product_id)}
bundle.data["productversion_name"] = bundle.obj.productversion.name
bundle.data["priority"] = unicode(case.priority)
return bundle
| 31.158654 | 83 | 0.612637 | from tastypie import http, fields
from tastypie.exceptions import ImmediateHttpResponse
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from ..core.api import (ProductVersionResource, ProductResource,
UserResource)
from .models import CaseVersion, Case, Suite, CaseStep, SuiteCase
from ...model.core.models import ProductVersion
from ..mtapi import MTResource, MTAuthorization
from ..environments.api import EnvironmentResource
from ..tags.api import TagResource
import logging
logger = logging.getLogger(__name__)
class SuiteCaseAuthorization(MTAuthorization):
"""Atypically named permission."""
@property
def permission(self):
"""This permission should be checked by is_authorized."""
return "library.manage_suite_cases"
class CaseVersionAuthorization(MTAuthorization):
"""A permission of 'library.manage_caseversions does not exist,
use library.manage_cases instead."""
@property
def permission(self):
"""This permission should be checked by is_authorized."""
return "library.manage_cases"
class SuiteResource(MTResource):
"""
Create, Read, Update and Delete capabilities for Suite.
Filterable by name and product fields.
"""
product = fields.ToOneField(ProductResource, "product")
class Meta(MTResource.Meta):
queryset = Suite.objects.all()
fields = ["name", "product", "description", "status", "id"]
filtering = {
"name": ALL,
"product": ALL_WITH_RELATIONS,
}
ordering = ['name', 'product__id', 'id']
@property
def model(self):
"""Model class related to this resource."""
return Suite
@property
def read_create_fields(self):
"""List of fields that are required for create but read-only for update."""
return ["product"]
class CaseResource(MTResource):
"""
Create, Read, Update and Delete capabilities for Case.
Filterable by suites and product fields.
"""
suites = fields.ToManyField(
SuiteResource,
"suites",
readonly=True,
null=True,
)
product = fields.ForeignKey(ProductResource, "product")
class Meta(MTResource.Meta):
queryset = Case.objects.all()
fields = ["id", "suites", "product", "idprefix"]
filtering = {
"suites": ALL_WITH_RELATIONS,
"product": ALL_WITH_RELATIONS,
}
@property
def model(self):
"""Model class related to this resource."""
return Case
@property
def read_create_fields(self):
"""List of fields that are required for create but read-only for update."""
return ["product"]
class CaseStepResource(MTResource):
"""
Create, Read, Update and Delete capabilities for CaseSteps.
Filterable by caseversion field.
"""
caseversion = fields.ForeignKey(
"moztrap.model.library.api.CaseVersionResource", "caseversion")
class Meta(MTResource.Meta):
queryset = CaseStep.objects.all()
fields = ["id", "caseversion", "instruction", "expected", "number"]
filtering = {
"caseversion": ALL_WITH_RELATIONS,
}
ordering = ["number", "id"]
authorization = CaseVersionAuthorization()
@property
def model(self):
"""Model class related to this resource."""
return CaseStep
@property
def read_create_fields(self):
"""caseversion is read-only"""
return ["caseversion"]
class SuiteCaseResource(MTResource):
"""
Create, Read, Update and Delete capabilities for SuiteCase.
Filterable by suite and case fields.
"""
case = fields.ForeignKey(CaseResource, 'case')
suite = fields.ForeignKey(SuiteResource, 'suite')
class Meta(MTResource.Meta):
queryset = SuiteCase.objects.all()
fields = ["suite", "case", "order", "id"]
filtering = {
"suite": ALL_WITH_RELATIONS,
"case": ALL_WITH_RELATIONS
}
authorization = SuiteCaseAuthorization()
@property
def model(self):
return SuiteCase
@property
def read_create_fields(self):
"""case and suite are read-only"""
return ["suite", "case"]
def hydrate_case(self, bundle):
"""case is read-only on PUT
case.product must match suite.product on CREATE
"""
# CREATE
if bundle.request.META['REQUEST_METHOD'] == 'POST':
case_id = self._id_from_uri(bundle.data['case'])
case = Case.objects.get(id=case_id)
suite_id = self._id_from_uri(bundle.data['suite'])
suite = Suite.objects.get(id=suite_id)
if case.product.id != suite.product.id:
error_message = str(
"case's product must match suite's product."
)
logger.error(
"\n".join([error_message, "case prod: %s, suite prod: %s"]),
case.product.id, suite.product.id)
raise ImmediateHttpResponse(
response=http.HttpBadRequest(error_message))
return bundle
class CaseVersionResource(MTResource):
"""
Create, Read, Update and Delete capabilities for CaseVersions.
Filterable by environments, productversion, case, and tags fields.
"""
case = fields.ForeignKey(CaseResource, "case")
steps = fields.ToManyField(
CaseStepResource, "steps", full=True, readonly=True)
environments = fields.ToManyField(
EnvironmentResource, "environments", full=True, readonly=True)
productversion = fields.ForeignKey(
ProductVersionResource, "productversion")
tags = fields.ToManyField(TagResource, "tags", full=True, readonly=True)
#@@@ attachments
class Meta(MTResource.Meta):
queryset = CaseVersion.objects.all()
fields = ["id", "name", "description", "case", "status"]
filtering = {
"environments": ALL,
"productversion": ALL_WITH_RELATIONS,
"case": ALL_WITH_RELATIONS,
"tags": ALL_WITH_RELATIONS,
"latest": ALL,
}
authorization = CaseVersionAuthorization()
@property
def model(self):
"""Model class related to this resource."""
return CaseVersion
@property
def read_create_fields(self):
"""List of fields that are required for create but read-only for update."""
return ["case", "productversion"]
def obj_update(self, bundle, request=None, **kwargs):
"""Set the modified_by field for the object to the request's user,
avoid ConcurrencyError by updating cc_version."""
# this try/except logging is more helpful than 500 / 404 errors on the
# client side
bundle = self.check_read_create(bundle)
try:
bundle = super(MTResource, self).obj_update(
bundle=bundle, request=request, **kwargs)
# avoid ConcurrencyError
bundle.obj.cc_version = self.model.objects.get(
id=bundle.obj.id).cc_version
bundle.obj.save(user=request.user)
return bundle
except Exception: # pragma: no cover
logger.exception("error updating %s", bundle) # pragma: no cover
raise # pragma: no cover
def hydrate_productversion(self, bundle):
"""case.product must match productversion.product on CREATE"""
# create
if bundle.request.META['REQUEST_METHOD'] == 'POST':
pv_id = self._id_from_uri(bundle.data['productversion'])
pv = ProductVersion.objects.get(id=pv_id)
case_id = self._id_from_uri(bundle.data['case'])
case = Case.objects.get(id=case_id)
if not case.product.id == pv.product.id:
message = str("productversion must match case's product")
logger.error("\n".join([message,
"productversion product id: %s case product id: %s"], ),
pv.product.id,
case.product.id)
raise ImmediateHttpResponse(
response=http.HttpBadRequest(message))
return bundle
class BaseSelectionResource(ModelResource):
"""Adds filtering by negation for use with multi-select widget"""
#@@@ move this to mtapi.py when that code is merged in.
def apply_filters(self,
request, applicable_filters, applicable_excludes={}):
"""Apply included and excluded filters to query."""
return self.get_object_list(request).filter(
**applicable_filters).exclude(**applicable_excludes)
def obj_get_list(self, request=None, **kwargs):
"""Return the list with included and excluded filters, if they exist."""
filters = {}
if hasattr(request, 'GET'): # pragma: no cover
# Grab a mutable copy.
filters = request.GET.copy()
# Update with the provided kwargs.
filters.update(kwargs)
# Splitting out filtering and excluding items
new_filters = {}
excludes = {}
for key, value in filters.items():
# If the given key is filtered by ``not equal`` token, exclude it
if key.endswith('__ne'):
key = key[:-4] # Stripping out trailing ``__ne``
excludes[key] = value
else:
new_filters[key] = value
filters = new_filters
# Building filters
applicable_filters = self.build_filters(filters=filters)
applicable_excludes = self.build_filters(filters=excludes)
base_object_list = self.apply_filters(
request, applicable_filters, applicable_excludes)
return self.apply_authorization_limits(request, base_object_list)
class CaseSelectionResource(BaseSelectionResource):
"""
Specialty end-point for an AJAX call in the Suite form multi-select widget
for selecting cases.
"""
case = fields.ForeignKey(CaseResource, "case")
productversion = fields.ForeignKey(
ProductVersionResource, "productversion")
tags = fields.ToManyField(TagResource, "tags", full=True)
created_by = fields.ForeignKey(
UserResource,
"created_by",
full=True,
null=True,
)
class Meta:
queryset = CaseVersion.objects.filter(latest=True).select_related(
"case",
"productversion",
"created_by",
).prefetch_related(
"tags",
"tags__product",
)
list_allowed_methods = ['get']
fields = ["id", "name", "created_by"]
filtering = {
"productversion": ALL_WITH_RELATIONS,
"tags": ALL_WITH_RELATIONS,
"case": ALL_WITH_RELATIONS,
"created_by": ALL_WITH_RELATIONS
}
ordering = ["case"]
def dehydrate(self, bundle):
"""Add some convenience fields to the return JSON."""
case = bundle.obj.case
bundle.data["case_id"] = unicode(case.id)
bundle.data["product_id"] = unicode(case.product_id)
bundle.data["product"] = {"id": unicode(case.product_id)}
bundle.data["priority"] = unicode(case.priority)
return bundle
class CaseVersionSelectionResource(BaseSelectionResource):
"""
Specialty end-point for an AJAX call in the Tag form multi-select widget
for selecting caseversions.
"""
case = fields.ForeignKey(CaseResource, "case")
productversion = fields.ForeignKey(
ProductVersionResource, "productversion", full=True)
tags = fields.ToManyField(TagResource, "tags", full=True)
created_by = fields.ForeignKey(
UserResource,
"created_by",
full=True,
null=True,
)
class Meta:
queryset = CaseVersion.objects.all().select_related(
"case",
"productversion",
"created_by",
).prefetch_related(
"tags",
)
list_allowed_methods = ['get']
fields = ["id", "name", "latest", "created_by_id"]
filtering = {
"productversion": ALL_WITH_RELATIONS,
"tags": ALL_WITH_RELATIONS,
"case": ALL_WITH_RELATIONS,
"created_by": ALL_WITH_RELATIONS
}
ordering = ["name"]
def dehydrate(self, bundle):
"""Add some convenience fields to the return JSON."""
case = bundle.obj.case
bundle.data["case_id"] = unicode(case.id)
bundle.data["product_id"] = unicode(case.product_id)
bundle.data["product"] = {"id": unicode(case.product_id)}
bundle.data["productversion_name"] = bundle.obj.productversion.name
bundle.data["priority"] = unicode(case.priority)
return bundle
| 20 | 2,546 | 215 |
1bfe241b43b3ad59ad7f4c55395602c668570444 | 1,348 | py | Python | lbpi/wrappers/ensembles/ensembles_copy.py | nairvinayv/random_scripts | 6e1cc8d82cf61ae245108a69ffa1359f636f37f7 | [
"MIT"
] | null | null | null | lbpi/wrappers/ensembles/ensembles_copy.py | nairvinayv/random_scripts | 6e1cc8d82cf61ae245108a69ffa1359f636f37f7 | [
"MIT"
] | null | null | null | lbpi/wrappers/ensembles/ensembles_copy.py | nairvinayv/random_scripts | 6e1cc8d82cf61ae245108a69ffa1359f636f37f7 | [
"MIT"
] | null | null | null | import os
import shutil
import sys
dirr = sys.argv[1] # this should be the path for a common folder from forms
flags = sys.argv[2]
print(dirr)
def folders_target():
"""
Make multiple target folders for receptor
"""
if not os.path.exists('{}/ensembles'.format(dirr)):
os.path.join(os.mkdir('{}/ensembles'.format(dirr)))
elif os.path.exists('{}/ensembles'.format(dirr)):
dirr
source = '{}/protein/protein.pdb'.format(dirr)
dest = '{}/ensembles/folder{}'.format(dirr, len(os.listdir('{}/ensembles'.format(dirr))))
os.mkdir(os.path.join(dest))
shutil.move(source, dest)
def ligand_copy():
"""
Copy ligand file to each folder and set the folder ready
"""
folder_list = []
for i in os.listdir('{}/ensembles'.format(dirr)):
folder_list.append('{}/ensembles/{}/'.format(dirr, i))
ligand_path = '{}/ligand.pdbqt'.format(dirr)
for i in folder_list:
shutil.copy(ligand_path, i)
if __name__ == '__main__':
if flags == 'folderplace':
folders_target()
elif flags == 'ligcopy':
ligand_copy()
#folders_target()
#ligand_copy()
#source = 'source/test.dat'
#dest = 'dest/'
#os.mkdir('dest')
#
#shutil.move(source, dest)
| 16.439024 | 93 | 0.577151 | import os
import shutil
import sys
dirr = sys.argv[1] # this should be the path for a common folder from forms
flags = sys.argv[2]
print(dirr)
def folders_target():
"""
Make multiple target folders for receptor
"""
if not os.path.exists('{}/ensembles'.format(dirr)):
os.path.join(os.mkdir('{}/ensembles'.format(dirr)))
elif os.path.exists('{}/ensembles'.format(dirr)):
dirr
source = '{}/protein/protein.pdb'.format(dirr)
dest = '{}/ensembles/folder{}'.format(dirr, len(os.listdir('{}/ensembles'.format(dirr))))
os.mkdir(os.path.join(dest))
shutil.move(source, dest)
def ligand_copy():
"""
Copy ligand file to each folder and set the folder ready
"""
folder_list = []
for i in os.listdir('{}/ensembles'.format(dirr)):
folder_list.append('{}/ensembles/{}/'.format(dirr, i))
ligand_path = '{}/ligand.pdbqt'.format(dirr)
for i in folder_list:
shutil.copy(ligand_path, i)
if __name__ == '__main__':
if flags == 'folderplace':
folders_target()
elif flags == 'ligcopy':
ligand_copy()
#folders_target()
#ligand_copy()
#source = 'source/test.dat'
#dest = 'dest/'
#os.mkdir('dest')
#
#shutil.move(source, dest)
| 0 | 0 | 0 |
624018f1a61d00e55e13767dd664755a55e6d5b5 | 6,666 | py | Python | test.py | buriedms/-3D-ResNets-Paddle | 2b8e51faa03e86be7308cb0706b4d41d24570ebb | [
"MIT"
] | 1 | 2022-02-28T00:48:16.000Z | 2022-02-28T00:48:16.000Z | test.py | buriedms/-3D-ResNets-Paddle | 2b8e51faa03e86be7308cb0706b4d41d24570ebb | [
"MIT"
] | null | null | null | test.py | buriedms/-3D-ResNets-Paddle | 2b8e51faa03e86be7308cb0706b4d41d24570ebb | [
"MIT"
] | 1 | 2022-02-28T00:48:20.000Z | 2022-02-28T00:48:20.000Z | import paddle
import time
import os
import sys
import json
from opts import parse_opts
from model import generate_model
from mean import get_mean, get_std
from spatial_transforms import (
Compose, Normalize, Scale, CenterCrop, CornerCrop, MultiScaleCornerCrop,
MultiScaleRandomCrop, RandomHorizontalFlip, ToTensor)
from temporal_transforms import LoopPadding, TemporalRandomCrop
from target_transforms import ClassLabel, VideoID
from target_transforms import Compose as TargetCompose
from dataset import get_test_set,get_validation_set
from utils import AverageMeter,calculate_accuracy
from utils import Logger
if __name__ == '__main__':
main() | 34.184615 | 80 | 0.636064 | import paddle
import time
import os
import sys
import json
from opts import parse_opts
from model import generate_model
from mean import get_mean, get_std
from spatial_transforms import (
Compose, Normalize, Scale, CenterCrop, CornerCrop, MultiScaleCornerCrop,
MultiScaleRandomCrop, RandomHorizontalFlip, ToTensor)
from temporal_transforms import LoopPadding, TemporalRandomCrop
from target_transforms import ClassLabel, VideoID
from target_transforms import Compose as TargetCompose
from dataset import get_test_set,get_validation_set
from utils import AverageMeter,calculate_accuracy
from utils import Logger
def calculate_video_results(output_buffer, video_id, test_results, class_names):
video_outputs = paddle.stack(output_buffer)
average_scores = paddle.mean(video_outputs, axis=0)
sorted_scores, locs = paddle.topk(average_scores, k=10)
video_results = []
for i in range(sorted_scores.shape[0]):
video_results.append({
'label': class_names[locs.tolist()[i]],
'score': sorted_scores.tolist()[i]
})
test_results['results'][video_id] = video_results
def test(data_loader, model, opt, class_names):
print('test')
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end_time = time.time()
output_buffer = []
previous_video_id = ''
test_results = {'results': {}}
for i, (inputs, targets) in enumerate(data_loader):
data_time.update(time.time() - end_time)
inputs = paddle.to_tensor(inputs)
outputs = model(inputs)
if not opt.no_softmax_in_test:
outputs = paddle.nn.functional.softmax(outputs,axis=1)
for j in range(outputs.shape[0]):
if not (i == 0 and j == 0) and targets[j] != previous_video_id:
calculate_video_results(output_buffer, previous_video_id,
test_results, class_names)
output_buffer = []
output_buffer.append(outputs[j])
previous_video_id = targets[j]
if (i % 100) == 0:
with open(
os.path.join(opt.result_path, '{}.json'.format(
opt.test_subset)), 'w') as f:
json.dump(test_results, f)
batch_time.update(time.time() - end_time)
end_time = time.time()
print('[{}/{}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'.format(
i + 1,
len(data_loader),
batch_time=batch_time,
data_time=data_time))
with open(
os.path.join(opt.result_path, '{}.json'.format(opt.test_subset)),
'w') as f:
json.dump(test_results, f)
def val_epoch(epoch, data_loader, model, criterion, opt,):
print('validation at epoch {}'.format(epoch))
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
accuracies = AverageMeter()
end_time = time.time()
for i, (inputs, targets) in enumerate(data_loader):
data_time.update(time.time() - end_time)
inputs = paddle.to_tensor(inputs)
targets = paddle.to_tensor(targets)
outputs = model(inputs)
loss = criterion(outputs, targets)
acc = calculate_accuracy(outputs, targets)
losses.update(loss.numpy()[0], inputs.shape[0])
accuracies.update(acc, inputs.shape[0])
batch_time.update(time.time() - end_time)
end_time = time.time()
print('Test :\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc {acc.val:.3f} ({acc.avg:.3f})'.format(
batch_time=batch_time,
data_time=data_time,
loss=losses,
acc=accuracies))
print('The test Acc result : {acc.avg:.6f}'.format(acc=accuracies))
def main():
opt = parse_opts()
if opt.root_path != '':
opt.video_path = os.path.join(opt.root_path, opt.video_path)
opt.annotation_path = os.path.join(opt.root_path, opt.annotation_path)
opt.result_path = os.path.join(opt.root_path, opt.result_path)
if opt.resume_path:
opt.resume_path = os.path.join(opt.root_path, opt.resume_path)
if opt.pretrain_path:
opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path)
opt.scales = [opt.initial_scale]
for i in range(1, opt.n_scales):
opt.scales.append(opt.scales[-1] * opt.scale_step)
opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
opt.mean = get_mean(opt.norm_value, dataset=opt.mean_dataset)
opt.std = get_std(opt.norm_value)
os.makedirs(opt.result_path, exist_ok=True)
with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file:
json.dump(vars(opt), opt_file)
criterion = paddle.nn.CrossEntropyLoss()
paddle.seed(opt.manual_seed)
model, parameters = generate_model(opt)
if opt.no_mean_norm and not opt.std_norm:
norm_method = Normalize([0, 0, 0], [1, 1, 1])
elif not opt.std_norm:
norm_method = Normalize(opt.mean, [1, 1, 1])
else:
norm_method = Normalize(opt.mean, opt.std)
spatial_transform = Compose([
Scale(opt.sample_size),
CenterCrop(opt.sample_size),
ToTensor(opt.norm_value), norm_method
])
temporal_transform = LoopPadding(opt.sample_duration)
target_transform = ClassLabel()
validation_data = get_validation_set(
opt, spatial_transform, temporal_transform, target_transform)
val_loader = paddle.io.DataLoader(
validation_data,
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.n_threads)
spatial_transform = Compose([
Scale(int(opt.sample_size / opt.scale_in_test)),
CornerCrop(opt.sample_size, opt.crop_position_in_test),
ToTensor(opt.norm_value), norm_method
])
temporal_transform = LoopPadding(opt.sample_duration)
target_transform = VideoID()
test_data = get_test_set(opt, spatial_transform, temporal_transform,
target_transform)
test_loader = paddle.io.DataLoader(
test_data,
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.n_threads)
val_epoch('Test', val_loader, model, criterion, opt)
test(test_loader, model, opt, test_data.class_names)
if __name__ == '__main__':
main() | 5,911 | 0 | 92 |
6d841ec89fca6bdcf9485738eb500bfbfe999af1 | 346 | py | Python | blog/App/views/__init__.py | caoluyang8/blog | 98b702e7b038036986895fac4daefccd90c22bdc | [
"BSD-2-Clause"
] | null | null | null | blog/App/views/__init__.py | caoluyang8/blog | 98b702e7b038036986895fac4daefccd90c22bdc | [
"BSD-2-Clause"
] | null | null | null | blog/App/views/__init__.py | caoluyang8/blog | 98b702e7b038036986895fac4daefccd90c22bdc | [
"BSD-2-Clause"
] | null | null | null | from .main import main #首页蓝本
from .user import user #用户处理的蓝本对象
from .posts import posts
from .owncenter import center
blueprintConfig = [
(main,''),
(user,''),
(posts,''),
(center,''),
]
#注册蓝本 | 19.222222 | 59 | 0.684971 | from .main import main #首页蓝本
from .user import user #用户处理的蓝本对象
from .posts import posts
from .owncenter import center
blueprintConfig = [
(main,''),
(user,''),
(posts,''),
(center,''),
]
#注册蓝本
def register_blueprint(app):
for blueprint,prefix in blueprintConfig:
app.register_blueprint(blueprint,url_prefix=prefix) | 112 | 0 | 22 |
2cf89d6f0c52f8a3936d5f44f7dd186b241c5cce | 589 | py | Python | src/remote_blink.py | RochesterMakerSpace/Raspberry-Pi-class | eef4b5de958571eced00d00db18edca4d7d34c94 | [
"MIT"
] | null | null | null | src/remote_blink.py | RochesterMakerSpace/Raspberry-Pi-class | eef4b5de958571eced00d00db18edca4d7d34c94 | [
"MIT"
] | null | null | null | src/remote_blink.py | RochesterMakerSpace/Raspberry-Pi-class | eef4b5de958571eced00d00db18edca4d7d34c94 | [
"MIT"
] | null | null | null | from gpiozero import LED
from gpiozero.pins.pigpio import PiGPIOFactory
from time import sleep
# Change localhost to Raspberry Pi host and run
# this program on a remote computer
factory = PiGPIOFactory(host='localhost')
led = LED(21, pin_factory=factory)
try:
# toggle the led forever
while True:
led.on()
sleep(2)
led.off()
sleep(2)
# Prevent Traceback warning when using Ctrl-C to exit the program
except KeyboardInterrupt:
pass
# Cleanup if program closes due to a signal
finally:
led.close() # close the conntection to the pin
| 23.56 | 65 | 0.706282 | from gpiozero import LED
from gpiozero.pins.pigpio import PiGPIOFactory
from time import sleep
# Change localhost to Raspberry Pi host and run
# this program on a remote computer
factory = PiGPIOFactory(host='localhost')
led = LED(21, pin_factory=factory)
try:
# toggle the led forever
while True:
led.on()
sleep(2)
led.off()
sleep(2)
# Prevent Traceback warning when using Ctrl-C to exit the program
except KeyboardInterrupt:
pass
# Cleanup if program closes due to a signal
finally:
led.close() # close the conntection to the pin
| 0 | 0 | 0 |
5f76a2f4a84049efe77d3b7b628525f29b3f92b6 | 2,824 | py | Python | package/src/masonry/utils.py | MrKriss/stonemason | d78becc9168c2566b31b48c9a951e2823bc98362 | [
"MIT"
] | 2 | 2017-11-13T17:40:52.000Z | 2021-05-08T15:58:28.000Z | package/src/masonry/utils.py | MrKriss/masonry | d78becc9168c2566b31b48c9a951e2823bc98362 | [
"MIT"
] | 3 | 2017-09-03T22:58:37.000Z | 2017-09-12T21:45:27.000Z | package/src/masonry/utils.py | MrKriss/stonemason | d78becc9168c2566b31b48c9a951e2823bc98362 | [
"MIT"
] | null | null | null |
import json
import logging
import os
import shlex
import subprocess
from itertools import dropwhile
from pathlib import Path
from clint.textui import colored, indent, puts
def load_application_data(location=None):
"""Return the stored application data on where templates live"""
if not location:
location = os.getenv("HOME")
app_data_dir = Path(os.path.join(location, ".masonry"))
template_metadata_path = app_data_dir / "templates.json"
if template_metadata_path.exists():
obj = json.loads(template_metadata_path.read_text())
else:
obj = {}
return obj
def save_application_data(obj, location=None):
"""Save application data on where templates live"""
if not location:
location = os.getenv("HOME")
app_data_dir = Path(os.path.join(location, ".masonry"))
app_data_dir.mkdir(exist_ok=True)
template_metadata_path = app_data_dir / "templates.json"
json.dump(obj, template_metadata_path.open('w'), indent=4)
def rindex(lst, item):
"""Return the index position of the last item in a list."""
try:
return next(dropwhile(index_ne, reversed(range(len(lst)))))
except StopIteration:
raise ValueError("rindex(lst, item): item not in list")
def setup_logger(name=__name__, logfile=None, level=logging.DEBUG):
"""
A utility function that you can call to easily set up logging to the
console and optionally to a file. No hassles.
"""
logger = logging.getLogger(name)
logger.propagate = False
logger.setLevel(level)
# Remove old handlers to allow updating settings
for handler in list(logger.handlers):
logger.removeHandler(handler)
# create console handler
stream_handler = logging.StreamHandler()
stream_handler.setLevel(level) # propagate all messages
# add the formatter to the handler
formatter = logging.Formatter('%(name)s - %(asctime)-15s - %(levelname)s: %(message)s')
stream_handler.setFormatter(formatter)
# setup logger and add the handlers
logger.addHandler(stream_handler)
if logfile:
filehandler = logging.FileHandler(logfile)
filehandler.setLevel(logging.NOTSET)
filehandler.setFormatter(formatter)
logger.addHandler(filehandler)
logger.debug("logger set up. level=%s", level)
return logger
| 28.24 | 92 | 0.693343 |
import json
import logging
import os
import shlex
import subprocess
from itertools import dropwhile
from pathlib import Path
from clint.textui import colored, indent, puts
def load_application_data(location=None):
"""Return the stored application data on where templates live"""
if not location:
location = os.getenv("HOME")
app_data_dir = Path(os.path.join(location, ".masonry"))
template_metadata_path = app_data_dir / "templates.json"
if template_metadata_path.exists():
obj = json.loads(template_metadata_path.read_text())
else:
obj = {}
return obj
def save_application_data(obj, location=None):
"""Save application data on where templates live"""
if not location:
location = os.getenv("HOME")
app_data_dir = Path(os.path.join(location, ".masonry"))
app_data_dir.mkdir(exist_ok=True)
template_metadata_path = app_data_dir / "templates.json"
json.dump(obj, template_metadata_path.open('w'), indent=4)
def rindex(lst, item):
"""Return the index position of the last item in a list."""
def index_ne(x):
return lst[x] != item
try:
return next(dropwhile(index_ne, reversed(range(len(lst)))))
except StopIteration:
raise ValueError("rindex(lst, item): item not in list")
def setup_logger(name=__name__, logfile=None, level=logging.DEBUG):
"""
A utility function that you can call to easily set up logging to the
console and optionally to a file. No hassles.
"""
logger = logging.getLogger(name)
logger.propagate = False
logger.setLevel(level)
# Remove old handlers to allow updating settings
for handler in list(logger.handlers):
logger.removeHandler(handler)
# create console handler
stream_handler = logging.StreamHandler()
stream_handler.setLevel(level) # propagate all messages
# add the formatter to the handler
formatter = logging.Formatter('%(name)s - %(asctime)-15s - %(levelname)s: %(message)s')
stream_handler.setFormatter(formatter)
# setup logger and add the handlers
logger.addHandler(stream_handler)
if logfile:
filehandler = logging.FileHandler(logfile)
filehandler.setLevel(logging.NOTSET)
filehandler.setFormatter(formatter)
logger.addHandler(filehandler)
logger.debug("logger set up. level=%s", level)
return logger
def run_and_capture(command, give_feedback=True):
p = subprocess.run(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.returncode != 0 and give_feedback:
puts(colored.red('Error in executing "%s"' % command))
puts(colored.red(p.stderr.decode().strip()))
elif p.returncode == 0 and give_feedback:
puts(colored.green('Executed "%s"' % command))
return p
| 423 | 0 | 49 |
236f56ebee647da1e638c0c765b9af34368035f1 | 6,025 | py | Python | gfapy/alignment/cigar.py | ujjwalsh/gfapy | 891ef3df695f20c67809e5a54549c876d90690b4 | [
"ISC"
] | 44 | 2017-03-18T08:08:04.000Z | 2021-11-10T16:11:15.000Z | gfapy/alignment/cigar.py | ujjwalsh/gfapy | 891ef3df695f20c67809e5a54549c876d90690b4 | [
"ISC"
] | 22 | 2017-04-04T21:20:31.000Z | 2022-03-09T19:05:30.000Z | gfapy/alignment/cigar.py | ujjwalsh/gfapy | 891ef3df695f20c67809e5a54549c876d90690b4 | [
"ISC"
] | 5 | 2017-07-07T02:56:56.000Z | 2020-09-30T20:10:49.000Z | import re
import gfapy
class CIGAR(list):
"""
Representation of the contents of a CIGAR string.
Each operation is represented by a
:class:`CIGAR.Operation <gfapy.alignment.cigar.CIGAR.Operation>`,
which specifies an operation length and operation symbol.
Instances are usually created from their string representations, using the
:class:`~gfapy.alignment.alignment.Alignment` factory class constructor.
Warning:
Although the GFA1 specification does not forbid the
operation symbols NSHX=, these are not allowed in GFA2
and thus their use in GFA1 is discouraged.
"""
def complement(self):
"""The CIGAR when switching the role of the two aligned segments.
Example:
>>> import gfapy
>>> str(gfapy.Alignment("2M1D3M").complement())
'3M1I2M'
Returns:
CIGAR: the complement CIGAR
"""
comp = list(reversed(self))
for op in comp:
if op.code == "I": op.code = "D"
elif op.code == "S": op.code = "D"
elif op.code == "D": op.code = "I"
elif op.code == "N": op.code = "I"
return CIGAR(comp)
def validate(self, version = "gfa1"):
"""Validates the instance.
Parameters:
version (str): 'gfa1' or 'gfa2'
Raises:
~gfapy.error.VersionError: If a wrong **version** is specified.
~gfapy.error.TypeError: If a component of the list is not a
CIGAR Operation; If the CIGAR operation length is not an integer or
a string representing an integer.
~gfapy.error.ValueError: If the length of an operation is < 0; If an
operation code is invalid in general or for the specified GFA version.
"""
if version != "gfa1" and version != "gfa2":
raise gfapy.VersionError(
"Version error: {}".format(repr(version)))
for op in self:
if not isinstance(op, gfapy.CIGAR.Operation):
raise gfapy.TypeError(
"Element is not a CIGAR operation: {}\n".format(op)+
"CIGAR instance is invalid: {}".format(self))
op.validate(version = version)
def length_on_reference(self):
"""Length of the aligned substring on the reference sequence
(**from** sequence for GFA1 links/containments;
**sid1** sequence for GFA2 edges)
Returns:
int
"""
l = 0
for op in self:
if op.code in ["M", "=", "X", "D" , "N"]:
l += op.length
return l
def length_on_query(self):
"""
Lenght of the aligned substring on the query sequence
(**to** sequence for GFA1 links/containments;
**sid2** sequence for GFA2 edges)
Returns:
int
"""
l = 0
for op in self:
if op.code in ["M", "=", "X", "I", "S"]:
l += op.length
return l
@classmethod
def _from_string(cls, string, valid = False, version = "gfa1"):
"""Create a CIGAR instance from its string representation.
Parameters:
string (str)
valid (bool): If **True** the string is guaranteed to be valid.
(Defaults to **False**)
version (str): 'gfa1' or 'gfa2'
Returns:
~gfapy.alignment.cigar.CIGAR or
~gfapy.alignment.placeholder.AlignmentPlaceholder
Raises:
~gfapy.error.FormatError: If the string is not a valid CIGAR string.
"""
if string == "*":
return gfapy.AlignmentPlaceholder()
cigar = CIGAR()
if not valid:
if version == "gfa1":
if not re.match(r"^([0-9]+[MIDNSHPX=])+$", string):
raise gfapy.FormatError()
elif version == "gfa2":
if not re.match(r"^([0-9]+[MIDP])+$", string):
raise gfapy.FormatError()
for m in re.finditer("([0-9]+)([MIDNSHPX=])", string):
cigar.append(CIGAR.Operation(int(m.group(1)), m.group(2)))
return cigar
class Operation:
"""An operation in a CIGAR string.
Attributes:
~Operation.length (int): Operation length.
code (str): Operation code, one of
:attr:`~Operation.CODE`.
"""
CODE_GFA1_ONLY = ["S", "H", "N", "X", "="]
"""Operations only valid in GFA1"""
CODE_GFA1_GFA2 = ["M", "I", "D", "P"]
"""Operations valid in GFA1 and GFA2"""
CODE = CODE_GFA1_ONLY + CODE_GFA1_GFA2
"""CIGAR operation codes"""
def validate(self, version = "gfa1"):
"""Validates the CIGAR operation.
Parameters:
version (str): 'gfa1' or 'gfa2'
Raises:
~gfapy.error.VersionError: If a wrong **version** is specified.
~gfapy.error.TypeError: If the CIGAR operation length is not an integer
or a string representing an integer.
~gfapy.error.ValueError: If the length of an operation is < 0; If an
operation code is invalid in general or for the specified GFA
version.
"""
if version != "gfa1" and version != "gfa2":
raise gfapy.VersionError(
"Version error: {}".format(repr(version)))
if not isinstance(self.length, int) and not isinstance(self.length, str):
raise gfapy.TypeError(
"Type error: length of CIGAR is {}".format(self.length))
if(int(self.length) < 0):
raise gfapy.ValueError("Length of CIGAR is {}".format(self.length))
if version == "gfa2":
if not self.code in Operation.CODE_GFA1_GFA2:
raise gfapy.ValueError()
else:
if not self.code in Operation.CODE:
raise gfapy.ValueError()
Operation = CIGAR.Operation
| 30.583756 | 80 | 0.61195 | import re
import gfapy
class CIGAR(list):
"""
Representation of the contents of a CIGAR string.
Each operation is represented by a
:class:`CIGAR.Operation <gfapy.alignment.cigar.CIGAR.Operation>`,
which specifies an operation length and operation symbol.
Instances are usually created from their string representations, using the
:class:`~gfapy.alignment.alignment.Alignment` factory class constructor.
Warning:
Although the GFA1 specification does not forbid the
operation symbols NSHX=, these are not allowed in GFA2
and thus their use in GFA1 is discouraged.
"""
def complement(self):
"""The CIGAR when switching the role of the two aligned segments.
Example:
>>> import gfapy
>>> str(gfapy.Alignment("2M1D3M").complement())
'3M1I2M'
Returns:
CIGAR: the complement CIGAR
"""
comp = list(reversed(self))
for op in comp:
if op.code == "I": op.code = "D"
elif op.code == "S": op.code = "D"
elif op.code == "D": op.code = "I"
elif op.code == "N": op.code = "I"
return CIGAR(comp)
def validate(self, version = "gfa1"):
"""Validates the instance.
Parameters:
version (str): 'gfa1' or 'gfa2'
Raises:
~gfapy.error.VersionError: If a wrong **version** is specified.
~gfapy.error.TypeError: If a component of the list is not a
CIGAR Operation; If the CIGAR operation length is not an integer or
a string representing an integer.
~gfapy.error.ValueError: If the length of an operation is < 0; If an
operation code is invalid in general or for the specified GFA version.
"""
if version != "gfa1" and version != "gfa2":
raise gfapy.VersionError(
"Version error: {}".format(repr(version)))
for op in self:
if not isinstance(op, gfapy.CIGAR.Operation):
raise gfapy.TypeError(
"Element is not a CIGAR operation: {}\n".format(op)+
"CIGAR instance is invalid: {}".format(self))
op.validate(version = version)
def length_on_reference(self):
"""Length of the aligned substring on the reference sequence
(**from** sequence for GFA1 links/containments;
**sid1** sequence for GFA2 edges)
Returns:
int
"""
l = 0
for op in self:
if op.code in ["M", "=", "X", "D" , "N"]:
l += op.length
return l
def length_on_query(self):
"""
Lenght of the aligned substring on the query sequence
(**to** sequence for GFA1 links/containments;
**sid2** sequence for GFA2 edges)
Returns:
int
"""
l = 0
for op in self:
if op.code in ["M", "=", "X", "I", "S"]:
l += op.length
return l
@classmethod
def _from_string(cls, string, valid = False, version = "gfa1"):
"""Create a CIGAR instance from its string representation.
Parameters:
string (str)
valid (bool): If **True** the string is guaranteed to be valid.
(Defaults to **False**)
version (str): 'gfa1' or 'gfa2'
Returns:
~gfapy.alignment.cigar.CIGAR or
~gfapy.alignment.placeholder.AlignmentPlaceholder
Raises:
~gfapy.error.FormatError: If the string is not a valid CIGAR string.
"""
if string == "*":
return gfapy.AlignmentPlaceholder()
cigar = CIGAR()
if not valid:
if version == "gfa1":
if not re.match(r"^([0-9]+[MIDNSHPX=])+$", string):
raise gfapy.FormatError()
elif version == "gfa2":
if not re.match(r"^([0-9]+[MIDP])+$", string):
raise gfapy.FormatError()
for m in re.finditer("([0-9]+)([MIDNSHPX=])", string):
cigar.append(CIGAR.Operation(int(m.group(1)), m.group(2)))
return cigar
def __str__(self):
if not self:
return "*"
else:
return "".join([str(op) for op in self])
def __repr__(self):
return "gfapy.CIGAR([{}])".format(", ".join([repr(op) for op in self]))
class Operation:
"""An operation in a CIGAR string.
Attributes:
~Operation.length (int): Operation length.
code (str): Operation code, one of
:attr:`~Operation.CODE`.
"""
CODE_GFA1_ONLY = ["S", "H", "N", "X", "="]
"""Operations only valid in GFA1"""
CODE_GFA1_GFA2 = ["M", "I", "D", "P"]
"""Operations valid in GFA1 and GFA2"""
CODE = CODE_GFA1_ONLY + CODE_GFA1_GFA2
"""CIGAR operation codes"""
def validate(self, version = "gfa1"):
"""Validates the CIGAR operation.
Parameters:
version (str): 'gfa1' or 'gfa2'
Raises:
~gfapy.error.VersionError: If a wrong **version** is specified.
~gfapy.error.TypeError: If the CIGAR operation length is not an integer
or a string representing an integer.
~gfapy.error.ValueError: If the length of an operation is < 0; If an
operation code is invalid in general or for the specified GFA
version.
"""
if version != "gfa1" and version != "gfa2":
raise gfapy.VersionError(
"Version error: {}".format(repr(version)))
if not isinstance(self.length, int) and not isinstance(self.length, str):
raise gfapy.TypeError(
"Type error: length of CIGAR is {}".format(self.length))
if(int(self.length) < 0):
raise gfapy.ValueError("Length of CIGAR is {}".format(self.length))
if version == "gfa2":
if not self.code in Operation.CODE_GFA1_GFA2:
raise gfapy.ValueError()
else:
if not self.code in Operation.CODE:
raise gfapy.ValueError()
def __init__(self, length, code):
self.length = length
self.code = code
def __len__(self):
return self.length
def __str__(self):
return "{}{}".format(self.length, self.code)
def __repr__(self):
return "gfapy.CIGAR.Operation({},{})".format(self.length, repr(self.code))
def __eq__(self, other):
return self.length == other.length and self.code == other.code
Operation = CIGAR.Operation
| 445 | 0 | 185 |
442e7e55cc3c82406b780fb1efbd567afcfc80d5 | 3,653 | py | Python | go_nn.py | ejmejm/GoHeuristics | 9336d661abd48aa31ff5c9ed50cc2fbbd4472ebe | [
"Apache-2.0"
] | 1 | 2017-07-18T22:24:30.000Z | 2017-07-18T22:24:30.000Z | go_nn.py | ejmejm/GoHeuristics | 9336d661abd48aa31ff5c9ed50cc2fbbd4472ebe | [
"Apache-2.0"
] | null | null | null | go_nn.py | ejmejm/GoHeuristics | 9336d661abd48aa31ff5c9ed50cc2fbbd4472ebe | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
import numpy as np
import time
import board3d as go_board
import matplotlib.pyplot as plt
import global_vars_go as gvg
import models.cnn
batch_size = gvg.process_batch_size # How many board states (not full games) to send to GPU at once, about 200 is the limit of this GPU's RAM
# Source: https://stackoverflow.com/questions/21989513/finding-index-of-maximum-value-in-array-with-numpy
| 48.065789 | 141 | 0.658363 | import tensorflow as tf
import numpy as np
import time
import board3d as go_board
import matplotlib.pyplot as plt
import global_vars_go as gvg
import models.cnn
batch_size = gvg.process_batch_size # How many board states (not full games) to send to GPU at once, about 200 is the limit of this GPU's RAM
def train_network(game_data, model):
vs = gvg.validation_split # What fraction of games are reserved for validation
hm_batches = int(len(game_data)/batch_size)
train_boards = []
train_next_moves = []
for game_index in range(len(game_data)):
board = go_board.setup_board(game_data[game_index])
for node in game_data[game_index].get_main_sequence():
board = go_board.switch_player_perspec(board) # Changes player perspective, black becomes white and vice versa
node_move = node.get_move()[1]
if node_move is not None:
train_boards.append(go_board.get_encoded_board(board))
next_move = np.zeros(gvg.board_size * gvg.board_size).reshape(gvg.board_size, gvg.board_size)
next_move[node_move[0], node_move[1]] = gvg.filled # y = an array in the form [board_x_position, board_y_position]
train_next_moves.append(next_move.reshape(gvg.board_size * gvg.board_size))
board = go_board.make_move(board, node_move, gvg.bot_channel, gvg.player_channel) # Update board with new move
if board is None:
print("ERROR! Illegal move, {}, while training".format(node_move[1]))
if len(train_boards) >= batch_size: # Send chunk to GPU at batch limit
model.fit({"input": train_boards[:int(-len(train_boards)*vs)]},
{"target": train_next_moves[:int(-len(train_boards)*vs)]},
validation_set=({"input": train_boards[int(-len(train_boards)*vs):]},
{"target": train_next_moves[int(-len(train_boards)*vs):]}), n_epoch=1,
batch_size=gvg.train_batch_size, snapshot_step=7500, show_metric=True)
train_boards = []
train_next_moves = []
# Finish of what is remaining in the batch
model.fit({"input": train_boards[:int(-len(train_boards)*vs)]},
{"target": train_next_moves[:int(-len(train_boards)*vs)]},
validation_set=({"input": train_boards[int(-len(train_boards)*vs):]},
{"target": train_next_moves[int(-len(train_boards)*vs):]}), n_epoch=1,
batch_size=gvg.train_batch_size, snapshot_step=7500, show_metric=True)
#model.save("test.tflearn")
def predict_move(orig_board, model, level=0, prob_board=None):
if prob_board is None:
prob_board = np.array(model.predict(go_board.get_encoded_board(orig_board))).reshape(gvg.board_size, gvg.board_size)
found_move = False
while found_move == False:
move = nanargmax(prob_board)
if go_board.make_move(orig_board, move, gvg.bot_channel, gvg.player_channel, result_only=True) == False:
prob_board[move[0]][move[1]] = -999999.0
else:
found_move = True
return move
# Source: https://stackoverflow.com/questions/21989513/finding-index-of-maximum-value-in-array-with-numpy
def nanargmax(a):
idx = np.argmax(a, axis=None)
multi_idx = np.unravel_index(idx, a.shape)
if np.isnan(a[multi_idx]):
nan_count = np.sum(np.isnan(a))
# In numpy < 1.8 use idx = np.argsort(a, axis=None)[-nan_count-1]
idx = np.argpartition(a, -nan_count-1, axis=None)[-nan_count-1]
multi_idx = np.unravel_index(idx, a.shape)
return multi_idx
| 3,174 | 0 | 68 |
07c7ed0e82580e02f8fc7ae7c19aaf4b7078ddf1 | 9,607 | py | Python | main/views.py | jhdulaney/oh-dataselfie-source | ea40129ae01a14e86e8f87eb62948c23ae2dc30e | [
"MIT"
] | null | null | null | main/views.py | jhdulaney/oh-dataselfie-source | ea40129ae01a14e86e8f87eb62948c23ae2dc30e | [
"MIT"
] | 5 | 2019-05-09T14:44:40.000Z | 2021-06-10T20:39:04.000Z | main/views.py | jhdulaney/oh-dataselfie-source | ea40129ae01a14e86e8f87eb62948c23ae2dc30e | [
"MIT"
] | 2 | 2018-07-13T00:06:22.000Z | 2018-12-27T18:19:28.000Z | import json
import logging
try:
from urllib2 import HTTPError
except ImportError:
from urllib.error import HTTPError
from django.conf import settings
from django.contrib.auth import login, logout
from django.shortcuts import redirect, render
from django.contrib import messages
from django.utils.safestring import mark_safe
from django.core.serializers import serialize
import ohapi
import requests
from project_admin.models import ProjectConfiguration, FileMetaData
from .helpers import oh_code_to_member
logger = logging.getLogger(__name__)
OH_BASE_URL = settings.OPENHUMANS_OH_BASE_URL
OH_API_BASE = OH_BASE_URL + '/api/direct-sharing'
OH_DIRECT_UPLOAD = OH_API_BASE + '/project/files/upload/direct/'
OH_DIRECT_UPLOAD_COMPLETE = OH_API_BASE + '/project/files/upload/complete/'
OH_OAUTH2_REDIRECT_URI = '{}/complete'.format(settings.OPENHUMANS_APP_BASE_URL)
def delete_file(request, file_id):
"""
Delete specified file in Open Humans for this project member.
"""
if request.user.is_authenticated and request.user.username != 'admin':
oh_member = request.user.openhumansmember
client_info = ProjectConfiguration.objects.get(id=1).client_info
ohapi.api.delete_files(
project_member_id=oh_member.oh_id,
access_token=oh_member.get_access_token(**client_info),
file_id=file_id,
base_url=OH_BASE_URL)
return redirect('list')
return redirect('index')
def delete_all_oh_files(oh_member):
"""
Delete all current project files in Open Humans for this project member.
"""
client_info = ProjectConfiguration.objects.get(id=1).client_info
ohapi.api.delete_files(
project_member_id=oh_member.oh_id,
access_token=oh_member.get_access_token(**client_info),
all_files=True,
base_url=OH_BASE_URL)
def upload_file_to_oh(oh_member, filehandle, metadata):
"""
This demonstrates using the Open Humans "large file" upload process.
The small file upload process is simpler, but it can time out. This
alternate approach is required for large files, and still appropriate
for small files.
This process is "direct to S3" using three steps: 1. get S3 target URL from
Open Humans, 2. Perform the upload, 3. Notify Open Humans when complete.
"""
client_info = ProjectConfiguration.objects.get(id=1).client_info
# Get the S3 target from Open Humans.
upload_url = '{}?access_token={}'.format(
OH_DIRECT_UPLOAD, oh_member.get_access_token(**client_info))
req1 = requests.post(upload_url,
data={'project_member_id': oh_member.oh_id,
'filename': filehandle.name,
'metadata': json.dumps(metadata)})
if req1.status_code != 201:
raise raise_http_error(upload_url, req1,
'Bad response when starting file upload.')
# Upload to S3 target.
req2 = requests.put(url=req1.json()['url'], data=filehandle)
if req2.status_code != 200:
raise raise_http_error(req1.json()['url'], req2,
'Bad response when uploading to target.')
# Report completed upload to Open Humans.
complete_url = ('{}?access_token={}'.format(
OH_DIRECT_UPLOAD_COMPLETE, oh_member.get_access_token(**client_info)))
req3 = requests.post(complete_url,
data={'project_member_id': oh_member.oh_id,
'file_id': req1.json()['id']})
if req3.status_code != 200:
raise raise_http_error(complete_url, req2,
'Bad response when completing upload.')
def iterate_files_upload(request):
"""
iterate over all files to upload them to OH.
"""
files = FileMetaData.objects.all()
for file in files:
tags = request.POST.get('tags_{}'.format(file.id))
tags = [i.strip() for i in tags.split(',')]
description = request.POST.get('file_desc_{}'.format(file.id))
uploaded_file = request.FILES.get('file_{}'.format(file.id))
if uploaded_file is not None:
metadata = {'tags': tags,
'description': description}
upload_file_to_oh(
request.user.openhumansmember,
uploaded_file,
metadata)
def index(request):
"""
Starting page for app.
"""
proj_config = ProjectConfiguration.objects.get(id=1)
file_num = FileMetaData.objects.all().count()
auth_url = set_auth_url(proj_config)
if not proj_config.oh_client_secret or \
not proj_config.oh_client_id or \
not file_num:
messages.info(request,
mark_safe(
"<b><a href='/project-admin'>"
"Click here to set up the app."
"</a></b>"
))
context = {'auth_url': auth_url,
'index_page': "".join(proj_config.homepage_text),
'file_num': file_num}
if request.user.is_authenticated and request.user.username != 'admin':
return redirect('overview')
return render(request, 'main/index.html', context=context)
def complete(request):
"""
Receive user from Open Humans. Store data, start data upload task.
"""
logger.debug("Received user returning from Open Humans.")
proj_config = ProjectConfiguration.objects.get(id=1)
if request.method == 'GET':
login_member(request)
if not request.user.is_authenticated:
logger.debug('Invalid code exchange. User returned to start page.')
return redirect('/')
else:
oh_member = request.user.openhumansmember
context = file_upload_prep_context(oh_member, proj_config)
return render(request, 'main/complete.html',
context=context)
elif request.method == 'POST':
iterate_files_upload(request)
return redirect('index')
def logout_user(request):
"""
Logout user
"""
if request.method == 'POST':
logout(request)
return redirect('index')
def about(request):
"""
Render about page
"""
proj_config = ProjectConfiguration.objects.get(id=1)
context = {'about': proj_config.about,
'faq': proj_config.faq}
return render(request, 'main/about.html',
context=context)
| 35.713755 | 79 | 0.640991 | import json
import logging
try:
from urllib2 import HTTPError
except ImportError:
from urllib.error import HTTPError
from django.conf import settings
from django.contrib.auth import login, logout
from django.shortcuts import redirect, render
from django.contrib import messages
from django.utils.safestring import mark_safe
from django.core.serializers import serialize
import ohapi
import requests
from project_admin.models import ProjectConfiguration, FileMetaData
from .helpers import oh_code_to_member
logger = logging.getLogger(__name__)
OH_BASE_URL = settings.OPENHUMANS_OH_BASE_URL
OH_API_BASE = OH_BASE_URL + '/api/direct-sharing'
OH_DIRECT_UPLOAD = OH_API_BASE + '/project/files/upload/direct/'
OH_DIRECT_UPLOAD_COMPLETE = OH_API_BASE + '/project/files/upload/complete/'
OH_OAUTH2_REDIRECT_URI = '{}/complete'.format(settings.OPENHUMANS_APP_BASE_URL)
def delete_file(request, file_id):
"""
Delete specified file in Open Humans for this project member.
"""
if request.user.is_authenticated and request.user.username != 'admin':
oh_member = request.user.openhumansmember
client_info = ProjectConfiguration.objects.get(id=1).client_info
ohapi.api.delete_files(
project_member_id=oh_member.oh_id,
access_token=oh_member.get_access_token(**client_info),
file_id=file_id,
base_url=OH_BASE_URL)
return redirect('list')
return redirect('index')
def delete_all_oh_files(oh_member):
"""
Delete all current project files in Open Humans for this project member.
"""
client_info = ProjectConfiguration.objects.get(id=1).client_info
ohapi.api.delete_files(
project_member_id=oh_member.oh_id,
access_token=oh_member.get_access_token(**client_info),
all_files=True,
base_url=OH_BASE_URL)
def raise_http_error(url, response, message):
raise HTTPError(url, response.status_code, message, hdrs=None, fp=None)
def upload_file_to_oh(oh_member, filehandle, metadata):
"""
This demonstrates using the Open Humans "large file" upload process.
The small file upload process is simpler, but it can time out. This
alternate approach is required for large files, and still appropriate
for small files.
This process is "direct to S3" using three steps: 1. get S3 target URL from
Open Humans, 2. Perform the upload, 3. Notify Open Humans when complete.
"""
client_info = ProjectConfiguration.objects.get(id=1).client_info
# Get the S3 target from Open Humans.
upload_url = '{}?access_token={}'.format(
OH_DIRECT_UPLOAD, oh_member.get_access_token(**client_info))
req1 = requests.post(upload_url,
data={'project_member_id': oh_member.oh_id,
'filename': filehandle.name,
'metadata': json.dumps(metadata)})
if req1.status_code != 201:
raise raise_http_error(upload_url, req1,
'Bad response when starting file upload.')
# Upload to S3 target.
req2 = requests.put(url=req1.json()['url'], data=filehandle)
if req2.status_code != 200:
raise raise_http_error(req1.json()['url'], req2,
'Bad response when uploading to target.')
# Report completed upload to Open Humans.
complete_url = ('{}?access_token={}'.format(
OH_DIRECT_UPLOAD_COMPLETE, oh_member.get_access_token(**client_info)))
req3 = requests.post(complete_url,
data={'project_member_id': oh_member.oh_id,
'file_id': req1.json()['id']})
if req3.status_code != 200:
raise raise_http_error(complete_url, req2,
'Bad response when completing upload.')
def iterate_files_upload(request):
"""
iterate over all files to upload them to OH.
"""
files = FileMetaData.objects.all()
for file in files:
tags = request.POST.get('tags_{}'.format(file.id))
tags = [i.strip() for i in tags.split(',')]
description = request.POST.get('file_desc_{}'.format(file.id))
uploaded_file = request.FILES.get('file_{}'.format(file.id))
if uploaded_file is not None:
metadata = {'tags': tags,
'description': description}
upload_file_to_oh(
request.user.openhumansmember,
uploaded_file,
metadata)
def file_upload_prep_context(oh_member, proj_config):
files = FileMetaData.objects.all()
files_js = serialize('json', files)
for file in files:
file.tags = file.get_tags()
context = {'oh_id': oh_member.oh_id,
'oh_member': oh_member,
'files': files,
'files_js': files_js,
'oh_direct_upload_url': OH_DIRECT_UPLOAD,
'oh_direct_upload_complete_url': OH_DIRECT_UPLOAD_COMPLETE,
'upload_description': proj_config.upload_description}
return context
def set_auth_url(proj_config):
if proj_config.oh_client_id:
auth_url = ohapi.api.oauth2_auth_url(
client_id=proj_config.oh_client_id,
redirect_uri=OH_OAUTH2_REDIRECT_URI,
base_url=OH_BASE_URL)
else:
auth_url = 'http://www.example.com'
return auth_url
def index(request):
"""
Starting page for app.
"""
proj_config = ProjectConfiguration.objects.get(id=1)
file_num = FileMetaData.objects.all().count()
auth_url = set_auth_url(proj_config)
if not proj_config.oh_client_secret or \
not proj_config.oh_client_id or \
not file_num:
messages.info(request,
mark_safe(
"<b><a href='/project-admin'>"
"Click here to set up the app."
"</a></b>"
))
context = {'auth_url': auth_url,
'index_page': "".join(proj_config.homepage_text),
'file_num': file_num}
if request.user.is_authenticated and request.user.username != 'admin':
return redirect('overview')
return render(request, 'main/index.html', context=context)
def overview(request):
client_info = ProjectConfiguration.objects.get(id=1).client_info
if request.user.is_authenticated and request.user.username != 'admin':
oh_member = request.user.openhumansmember
proj_config = ProjectConfiguration.objects.get(id=1)
files = FileMetaData.objects.all()
files_js = serialize('json', files)
for file in files:
file.tags = file.get_tags()
context = {'oh_id': oh_member.oh_id,
'oh_member': oh_member,
'files': files,
'files_js': files_js,
'access_token': oh_member.get_access_token(**client_info),
'oh_direct_upload_url': OH_DIRECT_UPLOAD,
'oh_direct_upload_complete_url': OH_DIRECT_UPLOAD_COMPLETE,
"overview": "".join(proj_config.overview)}
return render(request, 'main/overview.html', context=context)
return redirect('index')
def login_member(request):
code = request.GET.get('code', '')
oh_member = oh_code_to_member(code=code)
if oh_member:
# Log in the user.
user = oh_member.user
login(request, user,
backend='django.contrib.auth.backends.ModelBackend')
def complete(request):
"""
Receive user from Open Humans. Store data, start data upload task.
"""
logger.debug("Received user returning from Open Humans.")
proj_config = ProjectConfiguration.objects.get(id=1)
if request.method == 'GET':
login_member(request)
if not request.user.is_authenticated:
logger.debug('Invalid code exchange. User returned to start page.')
return redirect('/')
else:
oh_member = request.user.openhumansmember
context = file_upload_prep_context(oh_member, proj_config)
return render(request, 'main/complete.html',
context=context)
elif request.method == 'POST':
iterate_files_upload(request)
return redirect('index')
def logout_user(request):
"""
Logout user
"""
if request.method == 'POST':
logout(request)
return redirect('index')
def upload_old(request):
proj_config = ProjectConfiguration.objects.get(id=1)
files = FileMetaData.objects.all()
for file in files:
file.tags = file.get_tags()
if request.user.is_authenticated:
context = {'upload_description': proj_config.upload_description,
'files': files}
return render(request, 'main/upload_old.html',
context=context)
return redirect('index')
def about(request):
"""
Render about page
"""
proj_config = ProjectConfiguration.objects.get(id=1)
context = {'about': proj_config.about,
'faq': proj_config.faq}
return render(request, 'main/about.html',
context=context)
def list_files(request):
if request.user.is_authenticated and request.user.username != 'admin':
oh_member = request.user.openhumansmember
data = ohapi.api.exchange_oauth2_member(
oh_member.get_access_token(),
base_url=OH_BASE_URL)
context = {'files': data['data']}
return render(request, 'main/list.html',
context=context)
return redirect('index')
| 2,983 | 0 | 161 |
ef31d0cec9f6459d47be8bfaa11b135ea2c9a8f9 | 3,542 | py | Python | tests/test_workers.py | toastdriven/alligator | 2bb41e0cf0a57baf90a9c15b3bb01bd74438bc69 | [
"BSD-3-Clause"
] | 21 | 2015-01-01T02:06:25.000Z | 2022-03-19T13:57:42.000Z | tests/test_workers.py | toastdriven/alligator | 2bb41e0cf0a57baf90a9c15b3bb01bd74438bc69 | [
"BSD-3-Clause"
] | 2 | 2015-01-07T07:38:53.000Z | 2015-01-07T07:39:01.000Z | tests/test_workers.py | toastdriven/alligator | 2bb41e0cf0a57baf90a9c15b3bb01bd74438bc69 | [
"BSD-3-Clause"
] | 7 | 2015-01-03T19:45:14.000Z | 2022-03-19T13:57:44.000Z | import os
import unittest
from unittest import mock
from alligator.gator import Gator
from alligator.workers import Worker
ALLOW_SLOW = bool(os.environ.get("ALLIGATOR_SLOW", False))
CONN_STRING = os.environ.get("ALLIGATOR_CONN")
FILENAME = "/tmp/alligator_test_workers.txt"
@unittest.skipIf(not ALLOW_SLOW, "Skipping slow worker tests")
| 28.796748 | 71 | 0.65415 | import os
import unittest
from unittest import mock
from alligator.gator import Gator
from alligator.workers import Worker
ALLOW_SLOW = bool(os.environ.get("ALLIGATOR_SLOW", False))
CONN_STRING = os.environ.get("ALLIGATOR_CONN")
FILENAME = "/tmp/alligator_test_workers.txt"
def touch_file():
with open(FILENAME, "w") as write_file:
write_file.write("0")
def read_file():
with open(FILENAME, "r") as read_file:
return int(read_file.read().strip())
def incr_file(incr):
value = read_file()
with open(FILENAME, "w") as write_file:
value += incr
write_file.write(str(value))
def rm_file():
try:
os.unlink(FILENAME)
except OSError:
pass
def raise_error(val):
raise ValueError("You've chosen... poorly.")
@unittest.skipIf(not ALLOW_SLOW, "Skipping slow worker tests")
class WorkerTestCase(unittest.TestCase):
def setUp(self):
super(WorkerTestCase, self).setUp()
self.gator = Gator("locmem://")
self.worker = Worker(self.gator, max_tasks=2, nap_time=1)
self.gator.backend.drop_all("all")
rm_file()
touch_file()
def test_init(self):
self.assertEqual(self.worker.gator, self.gator)
self.assertEqual(self.worker.max_tasks, 2)
self.assertEqual(self.worker.to_consume, "all")
self.assertEqual(self.worker.nap_time, 1)
self.assertEqual(self.worker.tasks_complete, 0)
def test_ident(self):
ident = self.worker.ident()
self.assertTrue(ident.startswith("Alligator Worker (#"))
def test_check_and_run_task(self):
self.assertEqual(read_file(), 0)
self.gator.task(incr_file, 2)
self.gator.task(incr_file, 3)
self.assertEqual(self.gator.backend.len("all"), 2)
self.assertEqual(self.worker.tasks_complete, 0)
self.worker.check_and_run_task()
self.assertEqual(self.gator.backend.len("all"), 1)
self.assertEqual(self.worker.tasks_complete, 1)
self.assertEqual(read_file(), 2)
def test_check_and_run_task_trap_exception(self):
self.assertEqual(read_file(), 0)
self.gator.task(incr_file, 2)
self.gator.task(incr_file, 3)
self.gator.task(raise_error, 75)
self.gator.task(incr_file, 4)
self.assertEqual(self.gator.backend.len("all"), 4)
self.assertEqual(self.worker.tasks_complete, 0)
with mock.patch.object(
self.worker.log, "exception"
) as mock_exception:
self.assertTrue(self.worker.check_and_run_task())
self.assertTrue(self.worker.check_and_run_task())
# Here, it should hit the exception BUT not stop execution.
self.assertFalse(self.worker.check_and_run_task())
self.assertTrue(self.worker.check_and_run_task())
# Make sure we tried to log that exception.
mock_exception.called_once()
self.assertEqual(self.gator.backend.len("all"), 0)
self.assertEqual(self.worker.tasks_complete, 3)
self.assertEqual(read_file(), 9)
def test_run_forever(self):
self.assertEqual(read_file(), 0)
self.gator.task(incr_file, 2)
self.gator.task(incr_file, 3)
self.gator.task(incr_file, 4)
self.assertEqual(self.gator.backend.len("all"), 3)
# Should actually only run for two of the three tasks.
self.worker.run_forever()
self.assertEqual(self.gator.backend.len("all"), 1)
self.assertEqual(read_file(), 5)
| 2,878 | 19 | 298 |
3fa00e78910eee773dfd47ebe1a32119bd178d04 | 345 | py | Python | apps/webmarks/upload/urls.py | EricMuller/mynote-backend | 69bc39b8cfad52d6c42003cfa7bd629f3e8eccb7 | [
"MIT"
] | 1 | 2017-04-26T10:24:21.000Z | 2017-04-26T10:24:21.000Z | apps/webmarks/upload/urls.py | EricMuller/mynotes-backend | 69bc39b8cfad52d6c42003cfa7bd629f3e8eccb7 | [
"MIT"
] | 5 | 2020-06-05T18:16:39.000Z | 2022-01-13T00:45:49.000Z | apps/webmarks/upload/urls.py | EricMuller/webmarks-rest-api | 69bc39b8cfad52d6c42003cfa7bd629f3e8eccb7 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.conf.urls import include
from rest_framework import routers
from webmarks.bookmarks import viewsets
apiRouter = routers.DefaultRouter()
apiRouter.register(r'upload', viewsets.FileUploaderViewSet)
urlpatterns = [
# API V1
url(r'v1/', include(apiRouter.urls, namespace='external_apis')),
]
| 23 | 68 | 0.776812 | from django.conf.urls import url
from django.conf.urls import include
from rest_framework import routers
from webmarks.bookmarks import viewsets
apiRouter = routers.DefaultRouter()
apiRouter.register(r'upload', viewsets.FileUploaderViewSet)
urlpatterns = [
# API V1
url(r'v1/', include(apiRouter.urls, namespace='external_apis')),
]
| 0 | 0 | 0 |
b2f55a72355b229af932ac8ad82b7589a66ce05b | 26,979 | py | Python | orbitize/plot.py | sblunt/orbitize | 665ca4843d10ee1593665254354d934f37e1b5fc | [
"BSD-3-Clause-Clear"
] | 60 | 2018-01-12T17:16:53.000Z | 2022-02-14T01:39:39.000Z | orbitize/plot.py | sblunt/orbitize | 665ca4843d10ee1593665254354d934f37e1b5fc | [
"BSD-3-Clause-Clear"
] | 278 | 2018-01-12T17:25:47.000Z | 2022-03-31T21:28:27.000Z | orbitize/plot.py | sblunt/orbitize | 665ca4843d10ee1593665254354d934f37e1b5fc | [
"BSD-3-Clause-Clear"
] | 49 | 2018-10-30T19:34:05.000Z | 2021-10-31T13:28:45.000Z | import numpy as np
import corner
import warnings
import itertools
import astropy.units as u
import astropy.constants as consts
from astropy.time import Time
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
import matplotlib.colors as colors
from erfa import ErfaWarning
import orbitize
import orbitize.kepler as kepler
# TODO: deprecatation warning for plots in results
# define modified color map for default use in orbit plots
cmap = mpl.cm.Purples_r
cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=0.0, b=0.7),
cmap(np.linspace(0.0, 0.7, 1000))
)
def plot_corner(results, param_list=None, **corner_kwargs):
"""
Make a corner plot of posterior on orbit fit from any sampler
Args:
param_list (list of strings): each entry is a name of a parameter to include.
Valid strings::
sma1: semimajor axis
ecc1: eccentricity
inc1: inclination
aop1: argument of periastron
pan1: position angle of nodes
tau1: epoch of periastron passage, expressed as fraction of orbital period
per1: period
K1: stellar radial velocity semi-amplitude
[repeat for 2, 3, 4, etc if multiple objects]
plx: parallax
pm_ra: RA proper motion
pm_dec: Dec proper motion
alpha0: primary offset from reported Hipparcos RA @ alphadec0_epoch (generally 1991.25)
delta0: primary offset from reported Hipparcos Dec @ alphadec0_epoch (generally 1991.25)
gamma: rv offset
sigma: rv jitter
mi: mass of individual body i, for i = 0, 1, 2, ... (only if fit_secondary_mass == True)
mtot: total mass (only if fit_secondary_mass == False)
**corner_kwargs: any remaining keyword args are sent to ``corner.corner``.
See `here <https://corner.readthedocs.io/>`_.
Note: default axis labels used unless overwritten by user input.
Return:
``matplotlib.pyplot.Figure``: corner plot
.. Note:: **Example**: Use ``param_list = ['sma1,ecc1,inc1,sma2,ecc2,inc2']`` to only
plot posteriors for semimajor axis, eccentricity and inclination
of the first two companions
Written: Henry Ngo, 2018
"""
# Define array of default axis labels (overwritten if user specifies list)
default_labels = {
'sma': '$a_{0}$ [au]',
'ecc': '$ecc_{0}$',
'inc': '$inc_{0}$ [$^\\circ$]',
'aop': '$\\omega_{0}$ [$^\\circ$]',
'pan': '$\\Omega_{0}$ [$^\\circ$]',
'tau': '$\\tau_{0}$',
'plx': '$\\pi$ [mas]',
'gam': '$\\gamma$ [km/s]',
'sig': '$\\sigma$ [km/s]',
'mtot': '$M_T$ [M$_{{\\odot}}$]',
'm0': '$M_0$ [M$_{{\\odot}}$]',
'm': '$M_{0}$ [M$_{{\\rm Jup}}$]',
'pm_ra': '$\\mu_{{\\alpha}}$ [mas/yr]',
'pm_dec': '$\\mu_{{\\delta}}$ [mas/yr]',
'alpha0': '$\\alpha^{{*}}_{{0}}$ [mas]',
'delta0': '$\\delta_0$ [mas]',
'm': '$M_{0}$ [M$_{{\\rm Jup}}$]',
'per' : '$P_{0}$ [yr]',
'K' : '$K_{0}$ [km/s]',
'x' : '$X_{0}$ [AU]',
'y' : '$Y_{0}$ [AU]',
'z' : '$Z_{0}$ [AU]',
'xdot' : '$xdot_{0}$ [km/s]',
'ydot' : '$ydot_{0}$ [km/s]',
'zdot' : '$zdot_{0}$ [km/s]'
}
if param_list is None:
param_list = results.labels
param_indices = []
angle_indices = []
secondary_mass_indices = []
for i, param in enumerate(param_list):
index_num = results.param_idx[param]
# only plot non-fixed parameters
if np.std(results.post[:, index_num]) > 0:
param_indices.append(index_num)
label_key = param
if label_key.startswith('aop') or label_key.startswith('pan') or label_key.startswith('inc'):
angle_indices.append(i)
if label_key.startswith('m') and label_key != 'm0' and label_key != 'mtot':
secondary_mass_indices.append(i)
samples = np.copy(results.post[:, param_indices]) # keep only chains for selected parameters
samples[:, angle_indices] = np.degrees(
samples[:, angle_indices]) # convert angles from rad to deg
samples[:, secondary_mass_indices] *= u.solMass.to(u.jupiterMass) # convert to Jupiter masses for companions
if 'labels' not in corner_kwargs: # use default labels if user didn't already supply them
reduced_labels_list = []
for i in np.arange(len(param_indices)):
label_key = param_list[i]
if label_key.startswith("m") and label_key != 'm0' and label_key != 'mtot':
body_num = label_key[1]
label_key = "m"
elif label_key == 'm0' or label_key == 'mtot' or label_key.startswith('plx'):
body_num = ""
# maintain original label key
elif label_key in ['pm_ra', 'pm_dec', 'alpha0', 'delta0']:
body_num = ""
elif label_key.startswith("gamma") or label_key.startswith("sigma"):
body_num = ""
label_key = label_key[0:3]
else:
body_num = label_key[-1]
label_key = label_key[0:-1]
reduced_labels_list.append(default_labels[label_key].format(body_num))
corner_kwargs['labels'] = reduced_labels_list
figure = corner.corner(samples, **corner_kwargs)
return figure
def plot_orbits(results, object_to_plot=1, start_mjd=51544.,
num_orbits_to_plot=100, num_epochs_to_plot=100,
square_plot=True, show_colorbar=True, cmap=cmap,
sep_pa_color='lightgrey', sep_pa_end_year=2025.0,
cbar_param='Epoch [year]', mod180=False, rv_time_series=False, plot_astrometry=True,
plot_astrometry_insts=False, fig=None):
"""
Plots one orbital period for a select number of fitted orbits
for a given object, with line segments colored according to time
Args:
object_to_plot (int): which object to plot (default: 1)
start_mjd (float): MJD in which to start plotting orbits (default: 51544,
the year 2000)
num_orbits_to_plot (int): number of orbits to plot (default: 100)
num_epochs_to_plot (int): number of points to plot per orbit (default: 100)
square_plot (Boolean): Aspect ratio is always equal, but if
square_plot is True (default), then the axes will be square,
otherwise, white space padding is used
show_colorbar (Boolean): Displays colorbar to the right of the plot [True]
cmap (matplotlib.cm.ColorMap): color map to use for making orbit tracks
(default: modified Purples_r)
sep_pa_color (string): any valid matplotlib color string, used to set the
color of the orbit tracks in the Sep/PA panels (default: 'lightgrey').
sep_pa_end_year (float): decimal year specifying when to stop plotting orbit
tracks in the Sep/PA panels (default: 2025.0).
cbar_param (string): options are the following: 'Epoch [year]', 'sma1', 'ecc1', 'inc1', 'aop1',
'pan1', 'tau1', 'plx. Number can be switched out. Default is Epoch [year].
mod180 (Bool): if True, PA will be plotted in range [180, 540]. Useful for plotting short
arcs with PAs that cross 360 deg during observations (default: False)
rv_time_series (Boolean): if fitting for secondary mass using MCMC for rv fitting and want to
display time series, set to True.
plot_astrometry (Boolean): set to True by default. Plots the astrometric data.
plot_astrometry_insts (Boolean): set to False by default. Plots the astrometric data by instruments.
fig (matplotlib.pyplot.Figure): optionally include a predefined Figure object to plot the orbit on.
Most users will not need this keyword.
Return:
``matplotlib.pyplot.Figure``: the orbit plot if input is valid, ``None`` otherwise
(written): Henry Ngo, Sarah Blunt, 2018
Additions by Malena Rice, 2019
"""
if Time(start_mjd, format='mjd').decimalyear >= sep_pa_end_year:
raise ValueError('start_mjd keyword date must be less than sep_pa_end_year keyword date.')
if object_to_plot > results.num_secondary_bodies:
raise ValueError("Only {0} secondary bodies being fit. Requested to plot body {1} which is out of range".format(results.num_secondary_bodies, object_to_plot))
if object_to_plot == 0:
raise ValueError("Plotting the primary's orbit is currently unsupported. Stay tuned.")
with warnings.catch_warnings():
warnings.simplefilter('ignore', ErfaWarning)
data = results.data[results.data['object'] == object_to_plot]
possible_cbar_params = [
'sma',
'ecc',
'inc',
'aop'
'pan',
'tau',
'plx'
]
if cbar_param == 'Epoch [year]':
pass
elif cbar_param[0:3] in possible_cbar_params:
index = results.param_idx[cbar_param]
else:
raise Exception(
"Invalid input; acceptable inputs include 'Epoch [year]', 'plx', 'sma1', 'ecc1', 'inc1', 'aop1', 'pan1', 'tau1', 'sma2', 'ecc2', ...)"
)
# Select random indices for plotted orbit
num_orbits = len(results.post[:, 0])
if num_orbits_to_plot > num_orbits:
num_orbits_to_plot = num_orbits
choose = np.random.randint(0, high=num_orbits, size=num_orbits_to_plot)
# Get posteriors from random indices
standard_post = []
if results.sampler_name == 'MCMC':
# Convert the randomly chosen posteriors to standard keplerian set
for i in np.arange(num_orbits_to_plot):
orb_ind = choose[i]
param_set = np.copy(results.post[orb_ind])
standard_post.append(results.basis.to_standard_basis(param_set))
else: # For OFTI, posteriors are already converted
for i in np.arange(num_orbits_to_plot):
orb_ind = choose[i]
standard_post.append(results.post[orb_ind])
standard_post = np.array(standard_post)
sma = standard_post[:, results.standard_param_idx['sma{}'.format(object_to_plot)]]
ecc = standard_post[:, results.standard_param_idx['ecc{}'.format(object_to_plot)]]
inc = standard_post[:, results.standard_param_idx['inc{}'.format(object_to_plot)]]
aop = standard_post[:, results.standard_param_idx['aop{}'.format(object_to_plot)]]
pan = standard_post[:, results.standard_param_idx['pan{}'.format(object_to_plot)]]
tau = standard_post[:, results.standard_param_idx['tau{}'.format(object_to_plot)]]
plx = standard_post[:, results.standard_param_idx['plx']]
# Then, get the other parameters
if 'mtot' in results.labels:
mtot = standard_post[:, results.standard_param_idx['mtot']]
elif 'm0' in results.labels:
m0 = standard_post[:, results.standard_param_idx['m0']]
m1 = standard_post[:, results.standard_param_idx['m{}'.format(object_to_plot)]]
mtot = m0 + m1
raoff = np.zeros((num_orbits_to_plot, num_epochs_to_plot))
deoff = np.zeros((num_orbits_to_plot, num_epochs_to_plot))
vz_star = np.zeros((num_orbits_to_plot, num_epochs_to_plot))
epochs = np.zeros((num_orbits_to_plot, num_epochs_to_plot))
# Loop through each orbit to plot and calcualte ra/dec offsets for all points in orbit
# Need this loops since epochs[] vary for each orbit, unless we want to just plot the same time period for all orbits
for i in np.arange(num_orbits_to_plot):
# Compute period (from Kepler's third law)
period = np.sqrt(4*np.pi**2.0*(sma*u.AU)**3/(consts.G*(mtot*u.Msun)))
period = period.to(u.day).value
# Create an epochs array to plot num_epochs_to_plot points over one orbital period
epochs[i, :] = np.linspace(start_mjd, float(
start_mjd+period[i]), num_epochs_to_plot)
# Calculate ra/dec offsets for all epochs of this orbit
raoff0, deoff0, _ = kepler.calc_orbit(
epochs[i, :], sma[i], ecc[i], inc[i], aop[i], pan[i],
tau[i], plx[i], mtot[i], tau_ref_epoch=results.tau_ref_epoch
)
raoff[i, :] = raoff0
deoff[i, :] = deoff0
# Create a linearly increasing colormap for our range of epochs
if cbar_param != 'Epoch [year]':
cbar_param_arr = results.post[:, index]
norm = mpl.colors.Normalize(vmin=np.min(cbar_param_arr),
vmax=np.max(cbar_param_arr))
norm_yr = mpl.colors.Normalize(vmin=np.min(
cbar_param_arr), vmax=np.max(cbar_param_arr))
elif cbar_param == 'Epoch [year]':
min_cbar_date = np.min(epochs)
max_cbar_date = np.max(epochs[-1, :])
# if we're plotting orbital periods greater than 1,000 yrs, limit the colorbar dynamic range
if max_cbar_date - min_cbar_date > 1000 * 365.25:
max_cbar_date = min_cbar_date + 1000 * 365.25
norm = mpl.colors.Normalize(vmin=min_cbar_date, vmax=max_cbar_date)
norm_yr = mpl.colors.Normalize(
vmin=Time(min_cbar_date, format='mjd').decimalyear,
vmax=Time(max_cbar_date, format='mjd').decimalyear
)
# Before starting to plot rv data, make sure rv data exists:
rv_indices = np.where(data['quant_type'] == 'rv')
if rv_time_series and len(rv_indices) == 0:
warnings.warn("Unable to plot radial velocity data.")
rv_time_series = False
# Create figure for orbit plots
if fig is None:
fig = plt.figure(figsize=(14, 6))
if rv_time_series:
fig = plt.figure(figsize=(14, 9))
ax = plt.subplot2grid((3, 14), (0, 0), rowspan=2, colspan=6)
else:
fig = plt.figure(figsize=(14, 6))
ax = plt.subplot2grid((2, 14), (0, 0), rowspan=2, colspan=6)
else:
plt.set_current_figure(fig)
if rv_time_series:
ax = plt.subplot2grid((3, 14), (0, 0), rowspan=2, colspan=6)
else:
ax = plt.subplot2grid((2, 14), (0, 0), rowspan=2, colspan=6)
astr_inds=np.where((~np.isnan(data['quant1'])) & (~np.isnan(data['quant2'])))
astr_epochs=data['epoch'][astr_inds]
radec_inds = np.where(data['quant_type'] == 'radec')
seppa_inds = np.where(data['quant_type'] == 'seppa')
sep_data, sep_err=data['quant1'][seppa_inds],data['quant1_err'][seppa_inds]
pa_data, pa_err=data['quant2'][seppa_inds],data['quant2_err'][seppa_inds]
if len(radec_inds[0] > 0):
sep_from_ra_data, pa_from_dec_data = orbitize.system.radec2seppa(
data['quant1'][radec_inds], data['quant2'][radec_inds]
)
num_radec_pts = len(radec_inds[0])
sep_err_from_ra_data = np.empty(num_radec_pts)
pa_err_from_dec_data = np.empty(num_radec_pts)
for j in np.arange(num_radec_pts):
sep_err_from_ra_data[j], pa_err_from_dec_data[j], _ = orbitize.system.transform_errors(
np.array(data['quant1'][radec_inds][j]), np.array(data['quant2'][radec_inds][j]),
np.array(data['quant1_err'][radec_inds][j]), np.array(data['quant2_err'][radec_inds][j]),
np.array(data['quant12_corr'][radec_inds][j]), orbitize.system.radec2seppa
)
sep_data = np.append(sep_data, sep_from_ra_data)
sep_err = np.append(sep_err, sep_err_from_ra_data)
pa_data = np.append(pa_data, pa_from_dec_data)
pa_err = np.append(pa_err, pa_err_from_dec_data)
# For plotting different astrometry instruments
if plot_astrometry_insts:
astr_colors = ('#FF7F11', '#11FFE3', '#14FF11', '#7A11FF', '#FF1919')
astr_symbols = ('*', 'o', 'p', 's')
ax_colors = itertools.cycle(astr_colors)
ax_symbols = itertools.cycle(astr_symbols)
astr_data = data[astr_inds]
astr_insts = np.unique(data[astr_inds]['instrument'])
# Indices corresponding to each instrument in datafile
astr_inst_inds = {}
for i in range(len(astr_insts)):
astr_inst_inds[astr_insts[i]]=np.where(astr_data['instrument']==astr_insts[i].encode())[0]
# Plot each orbit (each segment between two points coloured using colormap)
for i in np.arange(num_orbits_to_plot):
points = np.array([raoff[i, :], deoff[i, :]]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(
segments, cmap=cmap, norm=norm, linewidth=1.0
)
if cbar_param != 'Epoch [year]':
lc.set_array(np.ones(len(epochs[0]))*cbar_param_arr[i])
elif cbar_param == 'Epoch [year]':
lc.set_array(epochs[i, :])
ax.add_collection(lc)
if plot_astrometry:
ra_data,dec_data=orbitize.system.seppa2radec(sep_data,pa_data)
# Plot astrometry along with instruments
if plot_astrometry_insts:
for i in range(len(astr_insts)):
ra = ra_data[astr_inst_inds[astr_insts[i]]]
dec = dec_data[astr_inst_inds[astr_insts[i]]]
ax.scatter(ra, dec, marker=next(ax_symbols), c=next(ax_colors), zorder=10, s=60, label=astr_insts[i])
else:
ax.scatter(ra_data, dec_data, marker='*', c='#FF7F11', zorder=10, s=60)
# modify the axes
if square_plot:
adjustable_param = 'datalim'
else:
adjustable_param = 'box'
ax.set_aspect('equal', adjustable=adjustable_param)
ax.set_xlabel('$\\Delta$RA [mas]')
ax.set_ylabel('$\\Delta$Dec [mas]')
ax.locator_params(axis='x', nbins=6)
ax.locator_params(axis='y', nbins=6)
ax.invert_xaxis() # To go to a left-handed coordinate system
# plot sep/PA and/or rv zoom-in panels
if rv_time_series:
ax1 = plt.subplot2grid((3, 14), (0, 8), colspan=6)
ax2 = plt.subplot2grid((3, 14), (1, 8), colspan=6)
ax3 = plt.subplot2grid((3, 14), (2, 0), colspan=14, rowspan=1)
ax2.set_ylabel('PA [$^{{\\circ}}$]')
ax1.set_ylabel('$\\rho$ [mas]')
ax3.set_ylabel('RV [km/s]')
ax3.set_xlabel('Epoch')
ax2.set_xlabel('Epoch')
plt.subplots_adjust(hspace=0.3)
else:
ax1 = plt.subplot2grid((2, 14), (0, 9), colspan=6)
ax2 = plt.subplot2grid((2, 14), (1, 9), colspan=6)
ax2.set_ylabel('PA [$^{{\\circ}}$]')
ax1.set_ylabel('$\\rho$ [mas]')
ax2.set_xlabel('Epoch')
if plot_astrometry_insts:
ax1_colors = itertools.cycle(astr_colors)
ax1_symbols = itertools.cycle(astr_symbols)
ax2_colors = itertools.cycle(astr_colors)
ax2_symbols = itertools.cycle(astr_symbols)
epochs_seppa = np.zeros((num_orbits_to_plot, num_epochs_to_plot))
for i in np.arange(num_orbits_to_plot):
epochs_seppa[i, :] = np.linspace(
start_mjd,
Time(sep_pa_end_year, format='decimalyear').mjd,
num_epochs_to_plot
)
# Calculate ra/dec offsets for all epochs of this orbit
if rv_time_series:
raoff0, deoff0, _ = kepler.calc_orbit(
epochs_seppa[i, :], sma[i], ecc[i], inc[i], aop[i], pan[i],
tau[i], plx[i], mtot[i], tau_ref_epoch=results.tau_ref_epoch,
mass_for_Kamp=m0[i]
)
raoff[i, :] = raoff0
deoff[i, :] = deoff0
else:
raoff0, deoff0, _ = kepler.calc_orbit(
epochs_seppa[i, :], sma[i], ecc[i], inc[i], aop[i], pan[i],
tau[i], plx[i], mtot[i], tau_ref_epoch=results.tau_ref_epoch
)
raoff[i, :] = raoff0
deoff[i, :] = deoff0
yr_epochs = Time(epochs_seppa[i, :], format='mjd').decimalyear
seps, pas = orbitize.system.radec2seppa(raoff[i, :], deoff[i, :], mod180=mod180)
plt.sca(ax1)
plt.plot(yr_epochs, seps, color=sep_pa_color)
plt.sca(ax2)
plt.plot(yr_epochs, pas, color=sep_pa_color)
# Plot sep/pa instruments
if plot_astrometry_insts:
for i in range(len(astr_insts)):
sep = sep_data[astr_inst_inds[astr_insts[i]]]
pa = pa_data[astr_inst_inds[astr_insts[i]]]
epochs = astr_epochs[astr_inst_inds[astr_insts[i]]]
plt.sca(ax1)
plt.scatter(Time(epochs,format='mjd').decimalyear,sep,s=10,marker=next(ax1_symbols),c=next(ax1_colors),zorder=10,label=astr_insts[i])
plt.sca(ax2)
plt.scatter(Time(epochs,format='mjd').decimalyear,pa,s=10,marker=next(ax2_symbols),c=next(ax2_colors),zorder=10)
plt.sca(ax1)
plt.legend(title='Instruments', bbox_to_anchor=(1.3, 1), loc='upper right')
else:
plt.sca(ax1)
plt.scatter(Time(astr_epochs,format='mjd').decimalyear,sep_data,s=10,marker='*',c='purple',zorder=10)
plt.sca(ax2)
plt.scatter(Time(astr_epochs,format='mjd').decimalyear,pa_data,s=10,marker='*',c='purple',zorder=10)
if rv_time_series:
rv_data = results.data[results.data['object'] == 0]
rv_data = rv_data[rv_data['quant_type'] == 'rv']
# switch current axis to rv panel
plt.sca(ax3)
# get list of rv instruments
insts = np.unique(rv_data['instrument'])
if len(insts) == 0:
insts = ['defrv']
# get gamma/sigma labels and corresponding positions in the posterior
gams=['gamma_'+inst for inst in insts]
if isinstance(results.labels,list):
labels=np.array(results.labels)
else:
labels=results.labels
# get the indices corresponding to each gamma within results.labels
gam_idx=[np.where(labels==inst_gamma)[0] for inst_gamma in gams]
# indices corresponding to each instrument in the datafile
inds={}
for i in range(len(insts)):
inds[insts[i]]=np.where(rv_data['instrument']==insts[i].encode())[0]
# choose the orbit with the best log probability
best_like=np.where(results.lnlike==np.amax(results.lnlike))[0][0]
med_ga=[results.post[best_like,i] for i in gam_idx]
# Get the posteriors for this index and convert to standard basis
best_post = results.basis.to_standard_basis(results.post[best_like].copy())
# Get the masses for the best posteriors:
best_m0 = best_post[results.standard_param_idx['m0']]
best_m1 = best_post[results.standard_param_idx['m{}'.format(object_to_plot)]]
best_mtot = best_m0 + best_m1
# colour/shape scheme scheme for rv data points
clrs=('#0496FF','#372554','#FF1053','#3A7CA5','#143109')
symbols=('o','^','v','s')
ax3_colors = itertools.cycle(clrs)
ax3_symbols = itertools.cycle(symbols)
# get rvs and plot them
for i,name in enumerate(inds.keys()):
inst_data=rv_data[inds[name]]
rvs=inst_data['quant1']
epochs=inst_data['epoch']
epochs=Time(epochs, format='mjd').decimalyear
rvs-=med_ga[i]
rvs -= best_post[results.param_idx[gams[i]]]
plt.scatter(epochs,rvs,s=5,marker=next(ax3_symbols),c=next(ax3_colors),label=name,zorder=5)
if len(inds.keys()) == 1 and 'defrv' in inds.keys():
pass
else:
plt.legend()
# calculate the predicted rv trend using the best orbit
_, _, vz = kepler.calc_orbit(
epochs_seppa[0, :],
best_post[results.standard_param_idx['sma{}'.format(object_to_plot)]],
best_post[results.standard_param_idx['ecc{}'.format(object_to_plot)]],
best_post[results.standard_param_idx['inc{}'.format(object_to_plot)]],
best_post[results.standard_param_idx['aop{}'.format(object_to_plot)]],
best_post[results.standard_param_idx['pan{}'.format(object_to_plot)]],
best_post[results.standard_param_idx['tau{}'.format(object_to_plot)]],
best_post[results.standard_param_idx['plx']], best_mtot,
tau_ref_epoch=results.tau_ref_epoch, mass_for_Kamp=best_m0
)
vz=vz*-(best_m1)/np.median(best_m0)
# plot rv trend
plt.plot(Time(epochs_seppa[0, :],format='mjd').decimalyear, vz, color=sep_pa_color)
# add colorbar
if show_colorbar:
if rv_time_series:
# Create an axes for colorbar. The position of the axes is calculated based on the position of ax.
# You can change x1.0.05 to adjust the distance between the main image and the colorbar.
# You can change 0.02 to adjust the width of the colorbar.
cbar_ax = fig.add_axes(
[ax.get_position().x1+0.005, ax.get_position().y0, 0.02, ax.get_position().height])
cbar = mpl.colorbar.ColorbarBase(
cbar_ax, cmap=cmap, norm=norm_yr, orientation='vertical', label=cbar_param)
else:
# xpos, ypos, width, height, in fraction of figure size
cbar_ax = fig.add_axes([0.47, 0.15, 0.015, 0.7])
cbar = mpl.colorbar.ColorbarBase(
cbar_ax, cmap=cmap, norm=norm_yr, orientation='vertical', label=cbar_param)
ax1.locator_params(axis='x', nbins=6)
ax1.locator_params(axis='y', nbins=6)
ax2.locator_params(axis='x', nbins=6)
ax2.locator_params(axis='y', nbins=6)
return fig | 44.815615 | 166 | 0.585715 | import numpy as np
import corner
import warnings
import itertools
import astropy.units as u
import astropy.constants as consts
from astropy.time import Time
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
import matplotlib.colors as colors
from erfa import ErfaWarning
import orbitize
import orbitize.kepler as kepler
# TODO: deprecatation warning for plots in results
# define modified color map for default use in orbit plots
cmap = mpl.cm.Purples_r
cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=0.0, b=0.7),
cmap(np.linspace(0.0, 0.7, 1000))
)
def plot_corner(results, param_list=None, **corner_kwargs):
"""
Make a corner plot of posterior on orbit fit from any sampler
Args:
param_list (list of strings): each entry is a name of a parameter to include.
Valid strings::
sma1: semimajor axis
ecc1: eccentricity
inc1: inclination
aop1: argument of periastron
pan1: position angle of nodes
tau1: epoch of periastron passage, expressed as fraction of orbital period
per1: period
K1: stellar radial velocity semi-amplitude
[repeat for 2, 3, 4, etc if multiple objects]
plx: parallax
pm_ra: RA proper motion
pm_dec: Dec proper motion
alpha0: primary offset from reported Hipparcos RA @ alphadec0_epoch (generally 1991.25)
delta0: primary offset from reported Hipparcos Dec @ alphadec0_epoch (generally 1991.25)
gamma: rv offset
sigma: rv jitter
mi: mass of individual body i, for i = 0, 1, 2, ... (only if fit_secondary_mass == True)
mtot: total mass (only if fit_secondary_mass == False)
**corner_kwargs: any remaining keyword args are sent to ``corner.corner``.
See `here <https://corner.readthedocs.io/>`_.
Note: default axis labels used unless overwritten by user input.
Return:
``matplotlib.pyplot.Figure``: corner plot
.. Note:: **Example**: Use ``param_list = ['sma1,ecc1,inc1,sma2,ecc2,inc2']`` to only
plot posteriors for semimajor axis, eccentricity and inclination
of the first two companions
Written: Henry Ngo, 2018
"""
# Define array of default axis labels (overwritten if user specifies list)
default_labels = {
'sma': '$a_{0}$ [au]',
'ecc': '$ecc_{0}$',
'inc': '$inc_{0}$ [$^\\circ$]',
'aop': '$\\omega_{0}$ [$^\\circ$]',
'pan': '$\\Omega_{0}$ [$^\\circ$]',
'tau': '$\\tau_{0}$',
'plx': '$\\pi$ [mas]',
'gam': '$\\gamma$ [km/s]',
'sig': '$\\sigma$ [km/s]',
'mtot': '$M_T$ [M$_{{\\odot}}$]',
'm0': '$M_0$ [M$_{{\\odot}}$]',
'm': '$M_{0}$ [M$_{{\\rm Jup}}$]',
'pm_ra': '$\\mu_{{\\alpha}}$ [mas/yr]',
'pm_dec': '$\\mu_{{\\delta}}$ [mas/yr]',
'alpha0': '$\\alpha^{{*}}_{{0}}$ [mas]',
'delta0': '$\\delta_0$ [mas]',
'm': '$M_{0}$ [M$_{{\\rm Jup}}$]',
'per' : '$P_{0}$ [yr]',
'K' : '$K_{0}$ [km/s]',
'x' : '$X_{0}$ [AU]',
'y' : '$Y_{0}$ [AU]',
'z' : '$Z_{0}$ [AU]',
'xdot' : '$xdot_{0}$ [km/s]',
'ydot' : '$ydot_{0}$ [km/s]',
'zdot' : '$zdot_{0}$ [km/s]'
}
if param_list is None:
param_list = results.labels
param_indices = []
angle_indices = []
secondary_mass_indices = []
for i, param in enumerate(param_list):
index_num = results.param_idx[param]
# only plot non-fixed parameters
if np.std(results.post[:, index_num]) > 0:
param_indices.append(index_num)
label_key = param
if label_key.startswith('aop') or label_key.startswith('pan') or label_key.startswith('inc'):
angle_indices.append(i)
if label_key.startswith('m') and label_key != 'm0' and label_key != 'mtot':
secondary_mass_indices.append(i)
samples = np.copy(results.post[:, param_indices]) # keep only chains for selected parameters
samples[:, angle_indices] = np.degrees(
samples[:, angle_indices]) # convert angles from rad to deg
samples[:, secondary_mass_indices] *= u.solMass.to(u.jupiterMass) # convert to Jupiter masses for companions
if 'labels' not in corner_kwargs: # use default labels if user didn't already supply them
reduced_labels_list = []
for i in np.arange(len(param_indices)):
label_key = param_list[i]
if label_key.startswith("m") and label_key != 'm0' and label_key != 'mtot':
body_num = label_key[1]
label_key = "m"
elif label_key == 'm0' or label_key == 'mtot' or label_key.startswith('plx'):
body_num = ""
# maintain original label key
elif label_key in ['pm_ra', 'pm_dec', 'alpha0', 'delta0']:
body_num = ""
elif label_key.startswith("gamma") or label_key.startswith("sigma"):
body_num = ""
label_key = label_key[0:3]
else:
body_num = label_key[-1]
label_key = label_key[0:-1]
reduced_labels_list.append(default_labels[label_key].format(body_num))
corner_kwargs['labels'] = reduced_labels_list
figure = corner.corner(samples, **corner_kwargs)
return figure
def plot_orbits(results, object_to_plot=1, start_mjd=51544.,
num_orbits_to_plot=100, num_epochs_to_plot=100,
square_plot=True, show_colorbar=True, cmap=cmap,
sep_pa_color='lightgrey', sep_pa_end_year=2025.0,
cbar_param='Epoch [year]', mod180=False, rv_time_series=False, plot_astrometry=True,
plot_astrometry_insts=False, fig=None):
"""
Plots one orbital period for a select number of fitted orbits
for a given object, with line segments colored according to time
Args:
object_to_plot (int): which object to plot (default: 1)
start_mjd (float): MJD in which to start plotting orbits (default: 51544,
the year 2000)
num_orbits_to_plot (int): number of orbits to plot (default: 100)
num_epochs_to_plot (int): number of points to plot per orbit (default: 100)
square_plot (Boolean): Aspect ratio is always equal, but if
square_plot is True (default), then the axes will be square,
otherwise, white space padding is used
show_colorbar (Boolean): Displays colorbar to the right of the plot [True]
cmap (matplotlib.cm.ColorMap): color map to use for making orbit tracks
(default: modified Purples_r)
sep_pa_color (string): any valid matplotlib color string, used to set the
color of the orbit tracks in the Sep/PA panels (default: 'lightgrey').
sep_pa_end_year (float): decimal year specifying when to stop plotting orbit
tracks in the Sep/PA panels (default: 2025.0).
cbar_param (string): options are the following: 'Epoch [year]', 'sma1', 'ecc1', 'inc1', 'aop1',
'pan1', 'tau1', 'plx. Number can be switched out. Default is Epoch [year].
mod180 (Bool): if True, PA will be plotted in range [180, 540]. Useful for plotting short
arcs with PAs that cross 360 deg during observations (default: False)
rv_time_series (Boolean): if fitting for secondary mass using MCMC for rv fitting and want to
display time series, set to True.
plot_astrometry (Boolean): set to True by default. Plots the astrometric data.
plot_astrometry_insts (Boolean): set to False by default. Plots the astrometric data by instruments.
fig (matplotlib.pyplot.Figure): optionally include a predefined Figure object to plot the orbit on.
Most users will not need this keyword.
Return:
``matplotlib.pyplot.Figure``: the orbit plot if input is valid, ``None`` otherwise
(written): Henry Ngo, Sarah Blunt, 2018
Additions by Malena Rice, 2019
"""
if Time(start_mjd, format='mjd').decimalyear >= sep_pa_end_year:
raise ValueError('start_mjd keyword date must be less than sep_pa_end_year keyword date.')
if object_to_plot > results.num_secondary_bodies:
raise ValueError("Only {0} secondary bodies being fit. Requested to plot body {1} which is out of range".format(results.num_secondary_bodies, object_to_plot))
if object_to_plot == 0:
raise ValueError("Plotting the primary's orbit is currently unsupported. Stay tuned.")
with warnings.catch_warnings():
warnings.simplefilter('ignore', ErfaWarning)
data = results.data[results.data['object'] == object_to_plot]
possible_cbar_params = [
'sma',
'ecc',
'inc',
'aop'
'pan',
'tau',
'plx'
]
if cbar_param == 'Epoch [year]':
pass
elif cbar_param[0:3] in possible_cbar_params:
index = results.param_idx[cbar_param]
else:
raise Exception(
"Invalid input; acceptable inputs include 'Epoch [year]', 'plx', 'sma1', 'ecc1', 'inc1', 'aop1', 'pan1', 'tau1', 'sma2', 'ecc2', ...)"
)
# Select random indices for plotted orbit
num_orbits = len(results.post[:, 0])
if num_orbits_to_plot > num_orbits:
num_orbits_to_plot = num_orbits
choose = np.random.randint(0, high=num_orbits, size=num_orbits_to_plot)
# Get posteriors from random indices
standard_post = []
if results.sampler_name == 'MCMC':
# Convert the randomly chosen posteriors to standard keplerian set
for i in np.arange(num_orbits_to_plot):
orb_ind = choose[i]
param_set = np.copy(results.post[orb_ind])
standard_post.append(results.basis.to_standard_basis(param_set))
else: # For OFTI, posteriors are already converted
for i in np.arange(num_orbits_to_plot):
orb_ind = choose[i]
standard_post.append(results.post[orb_ind])
standard_post = np.array(standard_post)
sma = standard_post[:, results.standard_param_idx['sma{}'.format(object_to_plot)]]
ecc = standard_post[:, results.standard_param_idx['ecc{}'.format(object_to_plot)]]
inc = standard_post[:, results.standard_param_idx['inc{}'.format(object_to_plot)]]
aop = standard_post[:, results.standard_param_idx['aop{}'.format(object_to_plot)]]
pan = standard_post[:, results.standard_param_idx['pan{}'.format(object_to_plot)]]
tau = standard_post[:, results.standard_param_idx['tau{}'.format(object_to_plot)]]
plx = standard_post[:, results.standard_param_idx['plx']]
# Then, get the other parameters
if 'mtot' in results.labels:
mtot = standard_post[:, results.standard_param_idx['mtot']]
elif 'm0' in results.labels:
m0 = standard_post[:, results.standard_param_idx['m0']]
m1 = standard_post[:, results.standard_param_idx['m{}'.format(object_to_plot)]]
mtot = m0 + m1
raoff = np.zeros((num_orbits_to_plot, num_epochs_to_plot))
deoff = np.zeros((num_orbits_to_plot, num_epochs_to_plot))
vz_star = np.zeros((num_orbits_to_plot, num_epochs_to_plot))
epochs = np.zeros((num_orbits_to_plot, num_epochs_to_plot))
# Loop through each orbit to plot and calcualte ra/dec offsets for all points in orbit
# Need this loops since epochs[] vary for each orbit, unless we want to just plot the same time period for all orbits
for i in np.arange(num_orbits_to_plot):
# Compute period (from Kepler's third law)
period = np.sqrt(4*np.pi**2.0*(sma*u.AU)**3/(consts.G*(mtot*u.Msun)))
period = period.to(u.day).value
# Create an epochs array to plot num_epochs_to_plot points over one orbital period
epochs[i, :] = np.linspace(start_mjd, float(
start_mjd+period[i]), num_epochs_to_plot)
# Calculate ra/dec offsets for all epochs of this orbit
raoff0, deoff0, _ = kepler.calc_orbit(
epochs[i, :], sma[i], ecc[i], inc[i], aop[i], pan[i],
tau[i], plx[i], mtot[i], tau_ref_epoch=results.tau_ref_epoch
)
raoff[i, :] = raoff0
deoff[i, :] = deoff0
# Create a linearly increasing colormap for our range of epochs
if cbar_param != 'Epoch [year]':
cbar_param_arr = results.post[:, index]
norm = mpl.colors.Normalize(vmin=np.min(cbar_param_arr),
vmax=np.max(cbar_param_arr))
norm_yr = mpl.colors.Normalize(vmin=np.min(
cbar_param_arr), vmax=np.max(cbar_param_arr))
elif cbar_param == 'Epoch [year]':
min_cbar_date = np.min(epochs)
max_cbar_date = np.max(epochs[-1, :])
# if we're plotting orbital periods greater than 1,000 yrs, limit the colorbar dynamic range
if max_cbar_date - min_cbar_date > 1000 * 365.25:
max_cbar_date = min_cbar_date + 1000 * 365.25
norm = mpl.colors.Normalize(vmin=min_cbar_date, vmax=max_cbar_date)
norm_yr = mpl.colors.Normalize(
vmin=Time(min_cbar_date, format='mjd').decimalyear,
vmax=Time(max_cbar_date, format='mjd').decimalyear
)
# Before starting to plot rv data, make sure rv data exists:
rv_indices = np.where(data['quant_type'] == 'rv')
if rv_time_series and len(rv_indices) == 0:
warnings.warn("Unable to plot radial velocity data.")
rv_time_series = False
# Create figure for orbit plots
if fig is None:
fig = plt.figure(figsize=(14, 6))
if rv_time_series:
fig = plt.figure(figsize=(14, 9))
ax = plt.subplot2grid((3, 14), (0, 0), rowspan=2, colspan=6)
else:
fig = plt.figure(figsize=(14, 6))
ax = plt.subplot2grid((2, 14), (0, 0), rowspan=2, colspan=6)
else:
plt.set_current_figure(fig)
if rv_time_series:
ax = plt.subplot2grid((3, 14), (0, 0), rowspan=2, colspan=6)
else:
ax = plt.subplot2grid((2, 14), (0, 0), rowspan=2, colspan=6)
astr_inds=np.where((~np.isnan(data['quant1'])) & (~np.isnan(data['quant2'])))
astr_epochs=data['epoch'][astr_inds]
radec_inds = np.where(data['quant_type'] == 'radec')
seppa_inds = np.where(data['quant_type'] == 'seppa')
sep_data, sep_err=data['quant1'][seppa_inds],data['quant1_err'][seppa_inds]
pa_data, pa_err=data['quant2'][seppa_inds],data['quant2_err'][seppa_inds]
if len(radec_inds[0] > 0):
sep_from_ra_data, pa_from_dec_data = orbitize.system.radec2seppa(
data['quant1'][radec_inds], data['quant2'][radec_inds]
)
num_radec_pts = len(radec_inds[0])
sep_err_from_ra_data = np.empty(num_radec_pts)
pa_err_from_dec_data = np.empty(num_radec_pts)
for j in np.arange(num_radec_pts):
sep_err_from_ra_data[j], pa_err_from_dec_data[j], _ = orbitize.system.transform_errors(
np.array(data['quant1'][radec_inds][j]), np.array(data['quant2'][radec_inds][j]),
np.array(data['quant1_err'][radec_inds][j]), np.array(data['quant2_err'][radec_inds][j]),
np.array(data['quant12_corr'][radec_inds][j]), orbitize.system.radec2seppa
)
sep_data = np.append(sep_data, sep_from_ra_data)
sep_err = np.append(sep_err, sep_err_from_ra_data)
pa_data = np.append(pa_data, pa_from_dec_data)
pa_err = np.append(pa_err, pa_err_from_dec_data)
# For plotting different astrometry instruments
if plot_astrometry_insts:
astr_colors = ('#FF7F11', '#11FFE3', '#14FF11', '#7A11FF', '#FF1919')
astr_symbols = ('*', 'o', 'p', 's')
ax_colors = itertools.cycle(astr_colors)
ax_symbols = itertools.cycle(astr_symbols)
astr_data = data[astr_inds]
astr_insts = np.unique(data[astr_inds]['instrument'])
# Indices corresponding to each instrument in datafile
astr_inst_inds = {}
for i in range(len(astr_insts)):
astr_inst_inds[astr_insts[i]]=np.where(astr_data['instrument']==astr_insts[i].encode())[0]
# Plot each orbit (each segment between two points coloured using colormap)
for i in np.arange(num_orbits_to_plot):
points = np.array([raoff[i, :], deoff[i, :]]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(
segments, cmap=cmap, norm=norm, linewidth=1.0
)
if cbar_param != 'Epoch [year]':
lc.set_array(np.ones(len(epochs[0]))*cbar_param_arr[i])
elif cbar_param == 'Epoch [year]':
lc.set_array(epochs[i, :])
ax.add_collection(lc)
if plot_astrometry:
ra_data,dec_data=orbitize.system.seppa2radec(sep_data,pa_data)
# Plot astrometry along with instruments
if plot_astrometry_insts:
for i in range(len(astr_insts)):
ra = ra_data[astr_inst_inds[astr_insts[i]]]
dec = dec_data[astr_inst_inds[astr_insts[i]]]
ax.scatter(ra, dec, marker=next(ax_symbols), c=next(ax_colors), zorder=10, s=60, label=astr_insts[i])
else:
ax.scatter(ra_data, dec_data, marker='*', c='#FF7F11', zorder=10, s=60)
# modify the axes
if square_plot:
adjustable_param = 'datalim'
else:
adjustable_param = 'box'
ax.set_aspect('equal', adjustable=adjustable_param)
ax.set_xlabel('$\\Delta$RA [mas]')
ax.set_ylabel('$\\Delta$Dec [mas]')
ax.locator_params(axis='x', nbins=6)
ax.locator_params(axis='y', nbins=6)
ax.invert_xaxis() # To go to a left-handed coordinate system
# plot sep/PA and/or rv zoom-in panels
if rv_time_series:
ax1 = plt.subplot2grid((3, 14), (0, 8), colspan=6)
ax2 = plt.subplot2grid((3, 14), (1, 8), colspan=6)
ax3 = plt.subplot2grid((3, 14), (2, 0), colspan=14, rowspan=1)
ax2.set_ylabel('PA [$^{{\\circ}}$]')
ax1.set_ylabel('$\\rho$ [mas]')
ax3.set_ylabel('RV [km/s]')
ax3.set_xlabel('Epoch')
ax2.set_xlabel('Epoch')
plt.subplots_adjust(hspace=0.3)
else:
ax1 = plt.subplot2grid((2, 14), (0, 9), colspan=6)
ax2 = plt.subplot2grid((2, 14), (1, 9), colspan=6)
ax2.set_ylabel('PA [$^{{\\circ}}$]')
ax1.set_ylabel('$\\rho$ [mas]')
ax2.set_xlabel('Epoch')
if plot_astrometry_insts:
ax1_colors = itertools.cycle(astr_colors)
ax1_symbols = itertools.cycle(astr_symbols)
ax2_colors = itertools.cycle(astr_colors)
ax2_symbols = itertools.cycle(astr_symbols)
epochs_seppa = np.zeros((num_orbits_to_plot, num_epochs_to_plot))
for i in np.arange(num_orbits_to_plot):
epochs_seppa[i, :] = np.linspace(
start_mjd,
Time(sep_pa_end_year, format='decimalyear').mjd,
num_epochs_to_plot
)
# Calculate ra/dec offsets for all epochs of this orbit
if rv_time_series:
raoff0, deoff0, _ = kepler.calc_orbit(
epochs_seppa[i, :], sma[i], ecc[i], inc[i], aop[i], pan[i],
tau[i], plx[i], mtot[i], tau_ref_epoch=results.tau_ref_epoch,
mass_for_Kamp=m0[i]
)
raoff[i, :] = raoff0
deoff[i, :] = deoff0
else:
raoff0, deoff0, _ = kepler.calc_orbit(
epochs_seppa[i, :], sma[i], ecc[i], inc[i], aop[i], pan[i],
tau[i], plx[i], mtot[i], tau_ref_epoch=results.tau_ref_epoch
)
raoff[i, :] = raoff0
deoff[i, :] = deoff0
yr_epochs = Time(epochs_seppa[i, :], format='mjd').decimalyear
seps, pas = orbitize.system.radec2seppa(raoff[i, :], deoff[i, :], mod180=mod180)
plt.sca(ax1)
plt.plot(yr_epochs, seps, color=sep_pa_color)
plt.sca(ax2)
plt.plot(yr_epochs, pas, color=sep_pa_color)
# Plot sep/pa instruments
if plot_astrometry_insts:
for i in range(len(astr_insts)):
sep = sep_data[astr_inst_inds[astr_insts[i]]]
pa = pa_data[astr_inst_inds[astr_insts[i]]]
epochs = astr_epochs[astr_inst_inds[astr_insts[i]]]
plt.sca(ax1)
plt.scatter(Time(epochs,format='mjd').decimalyear,sep,s=10,marker=next(ax1_symbols),c=next(ax1_colors),zorder=10,label=astr_insts[i])
plt.sca(ax2)
plt.scatter(Time(epochs,format='mjd').decimalyear,pa,s=10,marker=next(ax2_symbols),c=next(ax2_colors),zorder=10)
plt.sca(ax1)
plt.legend(title='Instruments', bbox_to_anchor=(1.3, 1), loc='upper right')
else:
plt.sca(ax1)
plt.scatter(Time(astr_epochs,format='mjd').decimalyear,sep_data,s=10,marker='*',c='purple',zorder=10)
plt.sca(ax2)
plt.scatter(Time(astr_epochs,format='mjd').decimalyear,pa_data,s=10,marker='*',c='purple',zorder=10)
if rv_time_series:
rv_data = results.data[results.data['object'] == 0]
rv_data = rv_data[rv_data['quant_type'] == 'rv']
# switch current axis to rv panel
plt.sca(ax3)
# get list of rv instruments
insts = np.unique(rv_data['instrument'])
if len(insts) == 0:
insts = ['defrv']
# get gamma/sigma labels and corresponding positions in the posterior
gams=['gamma_'+inst for inst in insts]
if isinstance(results.labels,list):
labels=np.array(results.labels)
else:
labels=results.labels
# get the indices corresponding to each gamma within results.labels
gam_idx=[np.where(labels==inst_gamma)[0] for inst_gamma in gams]
# indices corresponding to each instrument in the datafile
inds={}
for i in range(len(insts)):
inds[insts[i]]=np.where(rv_data['instrument']==insts[i].encode())[0]
# choose the orbit with the best log probability
best_like=np.where(results.lnlike==np.amax(results.lnlike))[0][0]
med_ga=[results.post[best_like,i] for i in gam_idx]
# Get the posteriors for this index and convert to standard basis
best_post = results.basis.to_standard_basis(results.post[best_like].copy())
# Get the masses for the best posteriors:
best_m0 = best_post[results.standard_param_idx['m0']]
best_m1 = best_post[results.standard_param_idx['m{}'.format(object_to_plot)]]
best_mtot = best_m0 + best_m1
# colour/shape scheme scheme for rv data points
clrs=('#0496FF','#372554','#FF1053','#3A7CA5','#143109')
symbols=('o','^','v','s')
ax3_colors = itertools.cycle(clrs)
ax3_symbols = itertools.cycle(symbols)
# get rvs and plot them
for i,name in enumerate(inds.keys()):
inst_data=rv_data[inds[name]]
rvs=inst_data['quant1']
epochs=inst_data['epoch']
epochs=Time(epochs, format='mjd').decimalyear
rvs-=med_ga[i]
rvs -= best_post[results.param_idx[gams[i]]]
plt.scatter(epochs,rvs,s=5,marker=next(ax3_symbols),c=next(ax3_colors),label=name,zorder=5)
if len(inds.keys()) == 1 and 'defrv' in inds.keys():
pass
else:
plt.legend()
# calculate the predicted rv trend using the best orbit
_, _, vz = kepler.calc_orbit(
epochs_seppa[0, :],
best_post[results.standard_param_idx['sma{}'.format(object_to_plot)]],
best_post[results.standard_param_idx['ecc{}'.format(object_to_plot)]],
best_post[results.standard_param_idx['inc{}'.format(object_to_plot)]],
best_post[results.standard_param_idx['aop{}'.format(object_to_plot)]],
best_post[results.standard_param_idx['pan{}'.format(object_to_plot)]],
best_post[results.standard_param_idx['tau{}'.format(object_to_plot)]],
best_post[results.standard_param_idx['plx']], best_mtot,
tau_ref_epoch=results.tau_ref_epoch, mass_for_Kamp=best_m0
)
vz=vz*-(best_m1)/np.median(best_m0)
# plot rv trend
plt.plot(Time(epochs_seppa[0, :],format='mjd').decimalyear, vz, color=sep_pa_color)
# add colorbar
if show_colorbar:
if rv_time_series:
# Create an axes for colorbar. The position of the axes is calculated based on the position of ax.
# You can change x1.0.05 to adjust the distance between the main image and the colorbar.
# You can change 0.02 to adjust the width of the colorbar.
cbar_ax = fig.add_axes(
[ax.get_position().x1+0.005, ax.get_position().y0, 0.02, ax.get_position().height])
cbar = mpl.colorbar.ColorbarBase(
cbar_ax, cmap=cmap, norm=norm_yr, orientation='vertical', label=cbar_param)
else:
# xpos, ypos, width, height, in fraction of figure size
cbar_ax = fig.add_axes([0.47, 0.15, 0.015, 0.7])
cbar = mpl.colorbar.ColorbarBase(
cbar_ax, cmap=cmap, norm=norm_yr, orientation='vertical', label=cbar_param)
ax1.locator_params(axis='x', nbins=6)
ax1.locator_params(axis='y', nbins=6)
ax2.locator_params(axis='x', nbins=6)
ax2.locator_params(axis='y', nbins=6)
return fig | 0 | 0 | 0 |
29cf3ca08014999c12eddf1018f67e6c18064f76 | 155 | py | Python | src/assets/scene_config/console/5/solution.py | DataArt/kiddo | 58fc41bd8d1ee93c9af464179fe6e3c0d6150b50 | [
"Apache-2.0"
] | 12 | 2020-10-01T12:37:26.000Z | 2022-01-11T09:00:24.000Z | src/assets/scene_config/console/5/solution.py | ra1amx/kiddo | dfb1c9324152a14edc62e1e89f7b108bc69b7d63 | [
"Apache-2.0"
] | 1 | 2021-01-15T07:15:54.000Z | 2021-01-15T07:15:54.000Z | src/assets/scene_config/console/5/solution.py | ra1amx/kiddo | dfb1c9324152a14edc62e1e89f7b108bc69b7d63 | [
"Apache-2.0"
] | 2 | 2020-10-16T09:27:33.000Z | 2021-07-11T13:19:31.000Z | import console
x = console.get_value("x")
y = console.get_value("y")
if x >= 0:
console.set_value("c", x + y)
else:
console.set_value("c", x * y) | 17.222222 | 33 | 0.612903 | import console
x = console.get_value("x")
y = console.get_value("y")
if x >= 0:
console.set_value("c", x + y)
else:
console.set_value("c", x * y) | 0 | 0 | 0 |
dd2ae237043f5511cbae8bbc3116d9709f19e8cc | 240 | py | Python | setup.py | davidmeijer/mol2html | 606d4cccc93fc0505779342e76a0166cef32377a | [
"MIT"
] | 1 | 2021-11-30T14:15:09.000Z | 2021-11-30T14:15:09.000Z | setup.py | davidmeijer/mol2html | 606d4cccc93fc0505779342e76a0166cef32377a | [
"MIT"
] | null | null | null | setup.py | davidmeijer/mol2html | 606d4cccc93fc0505779342e76a0166cef32377a | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from mol2html.__version__ import __version__
setup(
name='mol2html',
version=__version__,
pacakges=find_packages(),
author='David Meijer',
author_email='david.meijer@wur.nl'
) | 24 | 44 | 0.745833 | from setuptools import setup, find_packages
from mol2html.__version__ import __version__
setup(
name='mol2html',
version=__version__,
pacakges=find_packages(),
author='David Meijer',
author_email='david.meijer@wur.nl'
) | 0 | 0 | 0 |
fa203b374d6b8852ccb15e6949867b415cb5fd6d | 1,349 | py | Python | alien_game/settings.py | Brucehanyf/python_tutorial | fc5c888ac818ad6e8fda9ee7fbcc3ecbca94436b | [
"Apache-2.0"
] | null | null | null | alien_game/settings.py | Brucehanyf/python_tutorial | fc5c888ac818ad6e8fda9ee7fbcc3ecbca94436b | [
"Apache-2.0"
] | null | null | null | alien_game/settings.py | Brucehanyf/python_tutorial | fc5c888ac818ad6e8fda9ee7fbcc3ecbca94436b | [
"Apache-2.0"
] | null | null | null | # 设置类
class Settings():
'''存储外星人入侵所有的设置的类 '''
def __init__(self):
""" 初始化游戏的设置 """
# 屏幕设置
self.screen_width = 600
self.screen_height = 600
self.bg_color = (230, 230, 230)
# 飞船移动的速度设置
self.ship_speed_factor = 1.5
self.ship_limit = 3
#子弹设置
self.bullet_speed_factor = 2
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = 60,60,60
self.bullet_allow = 3
# 外星人设置
self.alien_speed_factor = 2
self.feet_drop_speed = 10
# fleet_direction 1 表示向右移动 -1表示向左移动
self.feet_direction = 1
# 以什么样的速度加快游戏节奏
self.sppedup_scale = 1.1
# 外星人点数提高等级
self.score_scale = 1.5
self.initialize_dynamic_settings()
def initialize_dynamic_settings(self):
""" 初始化随游戏进行而变化的设置 """
self.ship_speed_factor = 1.5
self.bullet_speed_factor = 3
self.alien_speed_factor = 1
self.feet_direction = 1
# 记分
self.alien_points = 5
def increase_speed(self):
""" 提高游戏速度 """
self.ship_speed_factor *= self.sppedup_scale
self.bullet_speed_factor *= self.sppedup_scale
self.alien_speed_factor *= self.sppedup_scale
self.alien_points = int(self.alien_points *self.score_scale)
| 26.45098 | 68 | 0.595256 | # 设置类
class Settings():
'''存储外星人入侵所有的设置的类 '''
def __init__(self):
""" 初始化游戏的设置 """
# 屏幕设置
self.screen_width = 600
self.screen_height = 600
self.bg_color = (230, 230, 230)
# 飞船移动的速度设置
self.ship_speed_factor = 1.5
self.ship_limit = 3
#子弹设置
self.bullet_speed_factor = 2
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = 60,60,60
self.bullet_allow = 3
# 外星人设置
self.alien_speed_factor = 2
self.feet_drop_speed = 10
# fleet_direction 1 表示向右移动 -1表示向左移动
self.feet_direction = 1
# 以什么样的速度加快游戏节奏
self.sppedup_scale = 1.1
# 外星人点数提高等级
self.score_scale = 1.5
self.initialize_dynamic_settings()
def initialize_dynamic_settings(self):
""" 初始化随游戏进行而变化的设置 """
self.ship_speed_factor = 1.5
self.bullet_speed_factor = 3
self.alien_speed_factor = 1
self.feet_direction = 1
# 记分
self.alien_points = 5
def increase_speed(self):
""" 提高游戏速度 """
self.ship_speed_factor *= self.sppedup_scale
self.bullet_speed_factor *= self.sppedup_scale
self.alien_speed_factor *= self.sppedup_scale
self.alien_points = int(self.alien_points *self.score_scale)
| 0 | 0 | 0 |
26357af55397b9f718b1c3fddda93b2341102317 | 3,081 | py | Python | app/lib/qiniu/auth.py | zhongxinghong/pkuyouth | 9262346b3c88109ca731387095d5678071a05d60 | [
"Apache-2.0"
] | 1 | 2018-06-19T04:00:35.000Z | 2018-06-19T04:00:35.000Z | app/lib/qiniu/auth.py | zhongxinghong/PKUyouth | 9262346b3c88109ca731387095d5678071a05d60 | [
"Apache-2.0"
] | null | null | null | app/lib/qiniu/auth.py | zhongxinghong/PKUyouth | 9262346b3c88109ca731387095d5678071a05d60 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# filename: app/lib/qiniu/auth.py
import time
import simplejson as json
import hmac
from hashlib import sha1
from urllib.parse import urlparse
from requests.auth import AuthBase
try:
from .util import to_bytes, urlsafe_base64_encode
except (ImportError,SystemError,ValueError):
from util import to_bytes, urlsafe_base64_encode
| 32.431579 | 106 | 0.635508 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# filename: app/lib/qiniu/auth.py
import time
import simplejson as json
import hmac
from hashlib import sha1
from urllib.parse import urlparse
from requests.auth import AuthBase
try:
from .util import to_bytes, urlsafe_base64_encode
except (ImportError,SystemError,ValueError):
from util import to_bytes, urlsafe_base64_encode
class Auth(object):
def __init__(self, access_key, secret_key):
self.__access_key = access_key
self.__secret_key = to_bytes(secret_key)
def __get_sign(self, data):
return urlsafe_base64_encode(hmac.new(to_bytes(self.__secret_key), to_bytes(data), sha1).digest())
def token_without_body(self, data):
data = urlsafe_base64_encode(data)
return '{access_key}:{encoded_sign}'.format(
access_key = self.__access_key,
encoded_sign = self.__get_sign(data),
)
def token_with_body(self, data):
data = urlsafe_base64_encode(data)
return '{access_key}:{encoded_sign}:{encoded_body}'.format(
access_key = self.__access_key,
encoded_sign = self.__get_sign(data),
encoded_body = data,
)
def get_authorization(self, url, body=None, content_type=None):
parsed_url = urlparse(url)
query = parsed_url.query
path = parsed_url.path
if query != '':
data = '{path}?{query}\n'.format(path=path, query=query)
else:
data = '{path}\n'.format(path=path)
if body is not None and content_type == 'application/x-www-form-urlencoded':
data = body
return '{access_key}:{encoded_sign}'.format(
access_key = self.__access_key,
encoded_sign = self.__get_sign(data),
)
def upload_token(self, bucket, filename, policy=None, expire=3660):
return self.token_with_body(json.dumps({
'scope': '{bucket}:{key}'.format(bucket=bucket,key=filename),
'deadline': int(time.time() + expire),
}, separators=(',', ':')))
def encoded_entry_uri(self, bucket, key):
return urlsafe_base64_encode('{bucket}:{key}'.format(bucket=bucket, key=key))
class QiniuAuth(AuthBase):
def __init__(self, access_key, secret_key):
self.__access_key = access_key
self.__secret_key = secret_key
self.__auth = Auth(access_key, secret_key)
def upload_token(self, *args, **kwargs):
return self.__auth.upload_token(*args, **kwargs)
def encoded_entry_uri(self, *args, **kwargs):
return self.__auth.encoded_entry_uri(*args, **kwargs)
def __call__(self, r):
if r.body is not None and r.headers['Content-Type'] == 'application/x-www-form-urlencoded':
access_token = self.__auth.get_authorization(r.url, r.body, r.headers['Content-Type'])
else:
access_token = self.__auth.get_authorization(r.url)
r.headers['Authorization'] = 'QBox %s' % access_token
return r | 2,346 | 3 | 343 |
0ad4f0e064a2c13e6139134cba5c7ab58eb899c1 | 2,423 | py | Python | services/auth/app/payments/views.py | Counter0021/Anti-Freelancer-microservices-back-end | e55481e0a4353107036cd5ba664fee57e29c7597 | [
"MIT"
] | null | null | null | services/auth/app/payments/views.py | Counter0021/Anti-Freelancer-microservices-back-end | e55481e0a4353107036cd5ba664fee57e29c7597 | [
"MIT"
] | null | null | null | services/auth/app/payments/views.py | Counter0021/Anti-Freelancer-microservices-back-end | e55481e0a4353107036cd5ba664fee57e29c7597 | [
"MIT"
] | null | null | null | import uuid
from fastapi import status, HTTPException
from sqlalchemy.ext.asyncio import AsyncSession
from app import requests
from app.crud import payment_crud, user_crud
from app.models import User
from config import PUBLIC_QIWI_KEY
async def pay(db: AsyncSession, user: User, amount: int) -> dict[str, str]:
"""
Pay
:param db: DB
:type db: AsyncSession
:param user: User
:type user: User
:param amount: Amount
:type amount: int
:return: URL
:rtype: dict
"""
if await payment_crud.exist(db, user_id=user.id, is_completed=False):
await payment_crud.remove(db, user_id=user.id, is_completed=False)
payment = await payment_crud.create(
db, uuid=str(uuid.uuid4()), amount=amount, comment=f'Buy level for user {user.username}', user_id=user.id
)
_url = f'https://oplata.qiwi.com/create?' \
f'publicKey={PUBLIC_QIWI_KEY}&billId={payment.uuid}&amount={payment.amount}&comment={payment.comment}'
payment_url = await requests.pay_request(_url)
return {'url': f'{payment_url}', **payment.__dict__}
async def check(db: AsyncSession, pk: int) -> dict[str, str]:
"""
Check payment and level up
:param db: DB
:type db: AsyncSession
:param pk: Payment ID
:type pk: int
:return: Message
:rtype: dict
:raise HTTPException 400: Payment not found
:raise HTTPException 400: The purchase has already been credited
:raise HTTPException 400: Payment not paid
"""
if not await payment_crud.exist(db, id=pk):
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail='Payment not found')
payment = await payment_crud.get(db, id=pk)
if payment.is_completed:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail='The purchase has already been credited')
response = await requests.check_request(f'https://api.qiwi.com/partner/bill/v1/bills/{payment.uuid}')
if response.get('status').get('value') != 'PAID':
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail='Payment not paid')
user = await user_crud.get(db, id=payment.user_id)
await user_crud.update(db, {'id': user.id}, level=user.level + payment.amount)
await payment_crud.update(db, {'id': payment.id}, is_completed=True)
return {'msg': 'Level has been up'}
| 35.632353 | 117 | 0.675609 | import uuid
from fastapi import status, HTTPException
from sqlalchemy.ext.asyncio import AsyncSession
from app import requests
from app.crud import payment_crud, user_crud
from app.models import User
from config import PUBLIC_QIWI_KEY
async def pay(db: AsyncSession, user: User, amount: int) -> dict[str, str]:
"""
Pay
:param db: DB
:type db: AsyncSession
:param user: User
:type user: User
:param amount: Amount
:type amount: int
:return: URL
:rtype: dict
"""
if await payment_crud.exist(db, user_id=user.id, is_completed=False):
await payment_crud.remove(db, user_id=user.id, is_completed=False)
payment = await payment_crud.create(
db, uuid=str(uuid.uuid4()), amount=amount, comment=f'Buy level for user {user.username}', user_id=user.id
)
_url = f'https://oplata.qiwi.com/create?' \
f'publicKey={PUBLIC_QIWI_KEY}&billId={payment.uuid}&amount={payment.amount}&comment={payment.comment}'
payment_url = await requests.pay_request(_url)
return {'url': f'{payment_url}', **payment.__dict__}
async def check(db: AsyncSession, pk: int) -> dict[str, str]:
"""
Check payment and level up
:param db: DB
:type db: AsyncSession
:param pk: Payment ID
:type pk: int
:return: Message
:rtype: dict
:raise HTTPException 400: Payment not found
:raise HTTPException 400: The purchase has already been credited
:raise HTTPException 400: Payment not paid
"""
if not await payment_crud.exist(db, id=pk):
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail='Payment not found')
payment = await payment_crud.get(db, id=pk)
if payment.is_completed:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail='The purchase has already been credited')
response = await requests.check_request(f'https://api.qiwi.com/partner/bill/v1/bills/{payment.uuid}')
if response.get('status').get('value') != 'PAID':
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail='Payment not paid')
user = await user_crud.get(db, id=payment.user_id)
await user_crud.update(db, {'id': user.id}, level=user.level + payment.amount)
await payment_crud.update(db, {'id': payment.id}, is_completed=True)
return {'msg': 'Level has been up'}
| 0 | 0 | 0 |
ef18948fdb78fa3068ca12b44f03bba8440d1b43 | 7,594 | py | Python | Testes com Pygame/scrolling_plataformer.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
] | null | null | null | Testes com Pygame/scrolling_plataformer.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
] | null | null | null | Testes com Pygame/scrolling_plataformer.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
] | null | null | null | #! /usr/bin/python
import pygame
from pygame import *
WIN_WIDTH = 800
WIN_HEIGHT = 640
HALF_WIDTH = int(WIN_WIDTH / 2)
HALF_HEIGHT = int(WIN_HEIGHT / 2)
DISPLAY = (WIN_WIDTH, WIN_HEIGHT)
DEPTH = 32
FLAGS = 0
CAMERA_SLACK = 30
if __name__ == "__main__":
main()
| 32.732759 | 78 | 0.454306 | #! /usr/bin/python
import pygame
from pygame import *
WIN_WIDTH = 800
WIN_HEIGHT = 640
HALF_WIDTH = int(WIN_WIDTH / 2)
HALF_HEIGHT = int(WIN_HEIGHT / 2)
DISPLAY = (WIN_WIDTH, WIN_HEIGHT)
DEPTH = 32
FLAGS = 0
CAMERA_SLACK = 30
def main():
global cameraX, cameraY
pygame.init()
screen = pygame.display.set_mode(DISPLAY, FLAGS, DEPTH)
pygame.display.set_caption("Use arrows to move!")
timer = pygame.time.Clock()
up = down = left = right = running = False
bg = Surface((32,32))
bg.convert()
bg.fill(Color("#000000"))
entities = pygame.sprite.Group()
player = Player(32, 32)
platforms = []
x = y = 0
level = [
"PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP",
"P P",
"P P",
"P P",
"P PPPPPPPPPPP P",
"P P",
"P P",
"P P",
"P PPPPPPPP P",
"P P",
"P PPPPPPP P",
"P PPPPPP P",
"P P",
"P PPPPPPP P",
"P P",
"P PPPPPP P",
"P P",
"P PPPPPPPPPPP P",
"P P",
"P PPPPPPPPPPP P",
"P P",
"P P",
"P P",
"P P",
"PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP",]
# build the level
for row in level:
for col in row:
if col == "P":
p = Platform(x, y)
platforms.append(p)
entities.add(p)
if col == "E":
e = ExitBlock(x, y)
platforms.append(e)
entities.add(e)
x += 32
y += 32
x = 0
total_level_width = len(level[0])*32
total_level_height = len(level)*32
camera = Camera(complex_camera, total_level_width, total_level_height)
entities.add(player)
while 1:
timer.tick(60)
for e in pygame.event.get():
if e.type == QUIT: raise SystemExit("QUIT")
if e.type == KEYDOWN and e.key == K_ESCAPE:
raise SystemExit("ESCAPE")
if e.type == KEYDOWN and e.key == K_UP:
up = True
if e.type == KEYDOWN and e.key == K_DOWN:
down = True
if e.type == KEYDOWN and e.key == K_LEFT:
left = True
if e.type == KEYDOWN and e.key == K_RIGHT:
right = True
if e.type == KEYDOWN and e.key == K_SPACE:
running = True
if e.type == KEYUP and e.key == K_UP:
up = False
if e.type == KEYUP and e.key == K_DOWN:
down = False
if e.type == KEYUP and e.key == K_RIGHT:
right = False
if e.type == KEYUP and e.key == K_LEFT:
left = False
# draw background
for y in range(32):
for x in range(32):
screen.blit(bg, (x * 32, y * 32))
camera.update(player)
# update player, draw everything else
player.update(up, down, left, right, running, platforms)
for e in entities:
screen.blit(e.image, camera.apply(e))
pygame.display.update()
class Camera(object):
def __init__(self, camera_func, width, height):
self.camera_func = camera_func
self.state = Rect(0, 0, width, height)
def apply(self, target):
return target.rect.move(self.state.topleft)
def update(self, target):
self.state = self.camera_func(self.state, target.rect)
def simple_camera(camera, target_rect):
l, t, _, _ = target_rect
_, _, w, h = camera
return Rect(-l+HALF_WIDTH, -t+HALF_HEIGHT, w, h)
def complex_camera(camera, target_rect):
l, t, _, _ = target_rect
_, _, w, h = camera
l, t, _, _ = -l+HALF_WIDTH, -t+HALF_HEIGHT, w, h
l = min(0, l) # stop scrolling at the left edge
l = max(-(camera.width-WIN_WIDTH), l) # stop scrolling at the right edge
t = max(-(camera.height-WIN_HEIGHT), t) # stop scrolling at the bottom
t = min(0, t) # stop scrolling at the top
return Rect(l, t, w, h)
class Entity(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
class Player(Entity):
def __init__(self, x, y):
Entity.__init__(self)
self.xvel = 0
self.yvel = 0
self.onGround = False
self.image = Surface((32,32))
self.image.fill(Color("#0000FF"))
self.image.convert()
self.rect = Rect(x, y, 32, 32)
def update(self, up, down, left, right, running, platforms):
if up:
# only jump if on the ground
if self.onGround: self.yvel -= 10
if down:
pass
if running:
self.xvel = 12
if left:
self.xvel = -8
if right:
self.xvel = 8
if not self.onGround:
# only accelerate with gravity if in the air
self.yvel += 0.3
# max falling speed
if self.yvel > 100: self.yvel = 100
if not(left or right):
self.xvel = 0
# increment in x direction
self.rect.left += self.xvel
# do x-axis collisions
self.collide(self.xvel, 0, platforms)
# increment in y direction
self.rect.top += self.yvel
# assuming we're in the air
self.onGround = False;
# do y-axis collisions
self.collide(0, self.yvel, platforms)
def collide(self, xvel, yvel, platforms):
for p in platforms:
if pygame.sprite.collide_rect(self, p):
if isinstance(p, ExitBlock):
pygame.event.post(pygame.event.Event(QUIT))
if xvel > 0:
self.rect.right = p.rect.left
print("collide right")
if xvel < 0:
self.rect.left = p.rect.right
print("collide left")
if yvel > 0:
self.rect.bottom = p.rect.top
self.onGround = True
self.yvel = 0
# print("bateu no chão")
print()
if yvel < 0:
self.rect.top = p.rect.bottom
# print("encostou no teto")
class Platform(Entity):
def __init__(self, x, y):
Entity.__init__(self)
self.image = Surface((32, 32))
self.image.convert()
self.image.fill(Color("#DDDDDD"))
self.rect = Rect(x, y, 32, 32)
def update(self):
pass
class ExitBlock(Platform):
def __init__(self, x, y):
Platform.__init__(self, x, y)
self.image.fill(Color("#0033FF"))
if __name__ == "__main__":
main()
| 6,856 | 21 | 449 |
33f621e85f116a0cc71456be34809b20fbefd5d3 | 2,121 | py | Python | ssrm_test/test_jupyter_notebooks.py | michaellindon/ssrm | c44f5382471a2c2ddea4bf597072304745af8578 | [
"Apache-2.0"
] | null | null | null | ssrm_test/test_jupyter_notebooks.py | michaellindon/ssrm | c44f5382471a2c2ddea4bf597072304745af8578 | [
"Apache-2.0"
] | null | null | null | ssrm_test/test_jupyter_notebooks.py | michaellindon/ssrm | c44f5382471a2c2ddea4bf597072304745af8578 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Optimizely Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import shutil
from typing import Any, Union
import papermill as pm
import pytest
JUPYTER_NOTEBOOK_DIR: Union[bytes, str] = os.path.join(os.getcwd(), "notebooks")
INCLUDED_NOTEBOOK_GLOB = os.path.join(JUPYTER_NOTEBOOK_DIR, "*.ipynb")
JUPYTER_NOTEBOOK_TESTING_OUTPUT_DIR: Union[bytes, str] = os.path.join(
os.getcwd(), "ssrm_test", "jupyter_notebook_testing_output"
)
@pytest.fixture
def generate_papermill_output_dir(tmpdir_factory: object) -> object:
"""Ensures directory exists for output notebooks. This is one of the
required parameters for papermill.execute_notebook()
"""
try:
os.makedirs(JUPYTER_NOTEBOOK_TESTING_OUTPUT_DIR, exist_ok=True)
yield JUPYTER_NOTEBOOK_TESTING_OUTPUT_DIR
# Teardown: delete testing output dir.
shutil.rmtree(JUPYTER_NOTEBOOK_TESTING_OUTPUT_DIR)
except OSError as err:
raise err
| 35.35 | 82 | 0.715229 | # Copyright 2020 Optimizely Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import shutil
from typing import Any, Union
import papermill as pm
import pytest
JUPYTER_NOTEBOOK_DIR: Union[bytes, str] = os.path.join(os.getcwd(), "notebooks")
INCLUDED_NOTEBOOK_GLOB = os.path.join(JUPYTER_NOTEBOOK_DIR, "*.ipynb")
JUPYTER_NOTEBOOK_TESTING_OUTPUT_DIR: Union[bytes, str] = os.path.join(
os.getcwd(), "ssrm_test", "jupyter_notebook_testing_output"
)
@pytest.fixture
def generate_papermill_output_dir(tmpdir_factory: object) -> object:
"""Ensures directory exists for output notebooks. This is one of the
required parameters for papermill.execute_notebook()
"""
try:
os.makedirs(JUPYTER_NOTEBOOK_TESTING_OUTPUT_DIR, exist_ok=True)
yield JUPYTER_NOTEBOOK_TESTING_OUTPUT_DIR
# Teardown: delete testing output dir.
shutil.rmtree(JUPYTER_NOTEBOOK_TESTING_OUTPUT_DIR)
except OSError as err:
raise err
def test_all_jupyter_notebook(generate_papermill_output_dir, caplog):
caplog.set_level("INFO", logger="papermill")
for notebook_file_path in glob.glob(INCLUDED_NOTEBOOK_GLOB):
this_notebook_file_name: Union[Union[bytes, str], Any] = os.path.basename(
notebook_file_path
)
output_file_path = os.path.join(
generate_papermill_output_dir, this_notebook_file_name
)
pm.execute_notebook(
notebook_file_path,
output_file_path,
cwd=JUPYTER_NOTEBOOK_DIR,
log_output=True,
)
| 572 | 0 | 23 |
82526c0dd834ab826582f8c46cdfe3cfd94bc82e | 6,390 | py | Python | pythonfiles/testphone_pythonfiles/test_phone/test/test.py | Huyuxuan-github/wica | f87b22ce4c773ce96b9e583ebcd5bf36469e32c7 | [
"WTFPL"
] | 1 | 2022-03-04T01:03:50.000Z | 2022-03-04T01:03:50.000Z | pythonfiles/testphone_pythonfiles/test_phone/test/test.py | Huyuxuan-github/wica | f87b22ce4c773ce96b9e583ebcd5bf36469e32c7 | [
"WTFPL"
] | null | null | null | pythonfiles/testphone_pythonfiles/test_phone/test/test.py | Huyuxuan-github/wica | f87b22ce4c773ce96b9e583ebcd5bf36469e32c7 | [
"WTFPL"
] | 1 | 2022-03-04T01:03:53.000Z | 2022-03-04T01:03:53.000Z | from tkinter import *
from tkinter import filedialog
# from tkinter.filedialog import FileDialog
from traceback import *
from win32com.client import Dispatch
import time,eyed3,threading
name = []
root =Tk()
root.geometry("1000x385")
root.title('本地音乐播放器')
wmp = Dispatch("WMPlayer.OCX")
canvas = Canvas(root,width =200,height = 200,bg = "white")
img = PhotoImage(file = 'C:\\\\Users\\\\1\\\\Desktop\\\\test_phone\\\\test\\\\111.gif')
canvas.create_image((200,200),image = img)
canvas.place(x=0,y=0)
canvas.coords(img,100,50)
canvas.grid(row =0,column = 0,sticky = "nw",rowspan =2)
progress_lab = LabelFrame(root,text = "播放进度")
progress_lab.grid(row =2,column =0,sticky = "ws",rowspan = 2)
var_scale = DoubleVar()
progress_scal = Scale(progress_lab,orient = HORIZONTAL,showvalue = 0,length =180,variable = var_scale)
progress_scal.bind("<Button-1>",pause)
progress_scal.bind("")
progress_scal.bind("<ButtonRelease-1>",play)
progress_scal.grid(row =3,column =0)
modee_lab = LabelFrame(root,text = "播放模式")
modee_lab.grid(row =4,column =0,rowspan =2,sticky = "ws")
var_mode = IntVar()
randomradio = Radiobutton(modee_lab,variable = var_mode,value = 1,text ="随机播放",command =List_random )
randomradio.grid(row =4,column =2)
inturnradio = Radiobutton(modee_lab,variable = var_mode,value =2,text= "顺序播放",command = play)
inturnradio.grid(row=4,column =3)
alloop = Radiobutton(modee_lab,variable = var_mode,value =2,text = "全部循环播放",command = List_loop)
alloop.grid(row =5,column = 2)
sinloop = Radiobutton(modee_lab,variable = var_mode,value =3,text = "单曲循环播放")
sinloop.grid(row =5,column =3)
previous_play = Button(modee_lab,text = "上一曲",command = Previous_it)
previous_play.grid(row =6,column =2,rowspan =2,pady =10)
next_play = Button(modee_lab,text = "下一曲",command = Next_it)
next_play.grid(row =6,column =3,rowspan =2,pady =10)
var_volume = IntVar()
vioce_lab = LabelFrame(root,text = "音量控制")
vioce_lab.grid(row =8,column =0,sticky = "wes")
vio_scale = Scale(vioce_lab,orient = HORIZONTAL,length =170,variable = var_volume,command =Volume_ctr)
vio_scale.set(30)
vio_scale.grid(row =8,column =0)
vio_plus = Button(vioce_lab,width =8,text = "增加音量+",command =Volume_add)
vio_plus.grid(row =9,column =0,sticky = "w")
vio_minus = Button(vioce_lab,width =8,text ="减少音量-",command = Volume_minus)
vio_minus.grid(row =9,column =0,sticky ="e")
ctr_lab = LabelFrame(root,text = "播放控制",height =100)
ctr_lab.grid(row =0,column =1,rowspan =12,sticky = "ns")
btn_open = Button(ctr_lab,text ="打开音乐文件",width =10,command = openfile)
btn_open.grid(row=0,column =1)
btn_play = Button(ctr_lab,text ="播放",width =10,command = play)
btn_play.grid(row =1,column =1,pady =5)
btn_stop = Button(ctr_lab,text ="停止",width =10,command = stop)
btn_stop.grid(row =2,column =1,pady =5)
btn_pause = Button(ctr_lab,text ="暂停",width =10,command = pause)
btn_pause.grid(row =3,column =1,pady =5)
# btn_playlist = Button(ctr_lab,text ="新建播放列表",width =10,command = uselist)
# btn_playlist.grid(row =4,column =1,pady =5)
# listimport = Button(ctr_lab,width =10,text = "导入列表")
# listimport.grid(row =6,column =1,sticky ="nw",pady =5)
# listexport = Button(ctr_lab,width =10,text = "导出列表")
# listexport.grid(row =7,column =1,sticky = "nw",pady =5)
listdel_all = Button(ctr_lab,width =10,text = "清空列表",command = Clear_list)
listdel_all.grid(row =8,column =1,sticky ="nw",pady =5)
# listdel_sel= Button(ctr_lab,width =10,text = "删除歌曲")
# listdel_sel.grid(row =12,column =1,sticky = "nw",pady =5)
# savelist_btn = Button(ctr_lab,text = "保存为列表")
# savelist_btn.grid(row=9,column =1)
min_btn = Button(ctr_lab,width =10,text = "最小化窗口",command = root.iconify)
min_btn.grid(row =8,column =1,sticky ="nw",pady =5)
##time_lab= Label(root,width =20,height =2,text ="现在时间为:")
##time_lab.grid(row =12,column =0,sticky = "nw",pady =5)
# time_text= Text(root,width =30,height =3,foreground ="green")
# time_text.grid(row =10,column =0,sticky = "nw",pady =5)
list_name = Text(root,height =30,width =50)
list_name.grid(row =0,column =2,sticky = "n",rowspan =6)
root.mainloop()
| 34.540541 | 125 | 0.700939 | from tkinter import *
from tkinter import filedialog
# from tkinter.filedialog import FileDialog
from traceback import *
from win32com.client import Dispatch
import time,eyed3,threading
name = []
def openfile(index = [1]):
global total,name
filenames = filedialog.askopenfilenames(title = "音乐播放器",filetypes =[("mp3文件","*.mp3"),("WMA文件","*.wma"),("WAV文件","*.wav")])
if filenames:
for i in range(len(filenames)):
media = wmp.newMedia(filenames[i])
wmp.currentPlaylist.appendItem(media)
print(filenames[i])
coco = eyed3.load(filenames[i])#eyed3模块读取mp3信息
total = int(coco.info.time_secs)
minute = int(coco.info.time_secs)//60
sec = int(coco.info.time_secs)%60
length = int(coco.info.time_secs)
name = filenames[i].split("/")
i =index[-1]
list_name.insert(END,str(i)+"."+name[-1])
list_name.insert(END," "*6)
if sec >=10:
list_name.insert(END,"0%d:%d" %(minute,sec)+ "\n")
else:
list_name.insert(END,"0%s:0%d" %(minute,sec)+ "\n")
i = i +1
index.append(i)
def play(event = None):
#root.title("%s" % name[-1]),使用wmp.currentMedia.name更好,在per函数中
per_thread = threading.Thread(target = per)
per_thread.daemnon = True
wmp.controls.play()
per_thread.start()
#print(wmp.currentMedia.duration)#放到暂停那里居然可以用,而这里不行
def per():
global total
while wmp.playState !=1:
progress_scal.set(int(wmp.controls.currentPosition))
progress_scal.config(label = wmp.controls.currentPositionString)
progress_scal.config(to = total,tickinterval = 50)
time.sleep(1)
root.title("%s" % wmp.currentMedia.name)
def stop():
wmp.controls.stop()
def pause(event = None):
wmp.controls.pause()
def uselist():
pass
def fullscr():
pass
def exitit():
root.destroy()
def Previous_it():
wmp.controls.previous()
def Next_it():
wmp.controls.next()
def Volume_ctr(none):
wmp.settings.Volume = vio_scale.get()
def Volume_add(i=[0]):
wmp.settings.Volume =wmp.settings.Volume+5
i.append(wmp.settings.Volume)
vio_scale.set(wmp.settings.Volume)
def Volume_minus(i=[0]):
wmp.settings.Volume = wmp.settings.Volume -5
i.append(wmp.settings.Volume)
vio_scale.set(wmp.settings.Volume)
def Scale_ctr(none):
wmp.controls.currentPosition = var_scale.get()
print(wmp.currentMedia.duration)
def Clear_list():
wmp.currentPlaylist.clear()
list_name.delete(1.0,END)
name = []
index = []
def List_random():
wmp.settings.setMode("shuffle",True)
play()
def List_loop():
wmp.settings.setMode("loop",True)
play()
root =Tk()
root.geometry("1000x385")
root.title('本地音乐播放器')
wmp = Dispatch("WMPlayer.OCX")
canvas = Canvas(root,width =200,height = 200,bg = "white")
img = PhotoImage(file = 'C:\\\\Users\\\\1\\\\Desktop\\\\test_phone\\\\test\\\\111.gif')
canvas.create_image((200,200),image = img)
canvas.place(x=0,y=0)
canvas.coords(img,100,50)
canvas.grid(row =0,column = 0,sticky = "nw",rowspan =2)
progress_lab = LabelFrame(root,text = "播放进度")
progress_lab.grid(row =2,column =0,sticky = "ws",rowspan = 2)
var_scale = DoubleVar()
progress_scal = Scale(progress_lab,orient = HORIZONTAL,showvalue = 0,length =180,variable = var_scale)
progress_scal.bind("<Button-1>",pause)
progress_scal.bind("")
progress_scal.bind("<ButtonRelease-1>",play)
progress_scal.grid(row =3,column =0)
modee_lab = LabelFrame(root,text = "播放模式")
modee_lab.grid(row =4,column =0,rowspan =2,sticky = "ws")
var_mode = IntVar()
randomradio = Radiobutton(modee_lab,variable = var_mode,value = 1,text ="随机播放",command =List_random )
randomradio.grid(row =4,column =2)
inturnradio = Radiobutton(modee_lab,variable = var_mode,value =2,text= "顺序播放",command = play)
inturnradio.grid(row=4,column =3)
alloop = Radiobutton(modee_lab,variable = var_mode,value =2,text = "全部循环播放",command = List_loop)
alloop.grid(row =5,column = 2)
sinloop = Radiobutton(modee_lab,variable = var_mode,value =3,text = "单曲循环播放")
sinloop.grid(row =5,column =3)
previous_play = Button(modee_lab,text = "上一曲",command = Previous_it)
previous_play.grid(row =6,column =2,rowspan =2,pady =10)
next_play = Button(modee_lab,text = "下一曲",command = Next_it)
next_play.grid(row =6,column =3,rowspan =2,pady =10)
var_volume = IntVar()
vioce_lab = LabelFrame(root,text = "音量控制")
vioce_lab.grid(row =8,column =0,sticky = "wes")
vio_scale = Scale(vioce_lab,orient = HORIZONTAL,length =170,variable = var_volume,command =Volume_ctr)
vio_scale.set(30)
vio_scale.grid(row =8,column =0)
vio_plus = Button(vioce_lab,width =8,text = "增加音量+",command =Volume_add)
vio_plus.grid(row =9,column =0,sticky = "w")
vio_minus = Button(vioce_lab,width =8,text ="减少音量-",command = Volume_minus)
vio_minus.grid(row =9,column =0,sticky ="e")
ctr_lab = LabelFrame(root,text = "播放控制",height =100)
ctr_lab.grid(row =0,column =1,rowspan =12,sticky = "ns")
btn_open = Button(ctr_lab,text ="打开音乐文件",width =10,command = openfile)
btn_open.grid(row=0,column =1)
btn_play = Button(ctr_lab,text ="播放",width =10,command = play)
btn_play.grid(row =1,column =1,pady =5)
btn_stop = Button(ctr_lab,text ="停止",width =10,command = stop)
btn_stop.grid(row =2,column =1,pady =5)
btn_pause = Button(ctr_lab,text ="暂停",width =10,command = pause)
btn_pause.grid(row =3,column =1,pady =5)
# btn_playlist = Button(ctr_lab,text ="新建播放列表",width =10,command = uselist)
# btn_playlist.grid(row =4,column =1,pady =5)
# listimport = Button(ctr_lab,width =10,text = "导入列表")
# listimport.grid(row =6,column =1,sticky ="nw",pady =5)
# listexport = Button(ctr_lab,width =10,text = "导出列表")
# listexport.grid(row =7,column =1,sticky = "nw",pady =5)
listdel_all = Button(ctr_lab,width =10,text = "清空列表",command = Clear_list)
listdel_all.grid(row =8,column =1,sticky ="nw",pady =5)
# listdel_sel= Button(ctr_lab,width =10,text = "删除歌曲")
# listdel_sel.grid(row =12,column =1,sticky = "nw",pady =5)
# savelist_btn = Button(ctr_lab,text = "保存为列表")
# savelist_btn.grid(row=9,column =1)
min_btn = Button(ctr_lab,width =10,text = "最小化窗口",command = root.iconify)
min_btn.grid(row =8,column =1,sticky ="nw",pady =5)
##time_lab= Label(root,width =20,height =2,text ="现在时间为:")
##time_lab.grid(row =12,column =0,sticky = "nw",pady =5)
# time_text= Text(root,width =30,height =3,foreground ="green")
# time_text.grid(row =10,column =0,sticky = "nw",pady =5)
list_name = Text(root,height =30,width =50)
list_name.grid(row =0,column =2,sticky = "n",rowspan =6)
root.mainloop()
| 2,065 | 0 | 378 |
44ee42bf9ff9030f86932367a57942656a472290 | 1,826 | py | Python | consolemenu/items/submenu_item.py | Tyler887/console-menu | 1f1df62524aa4a8be08c343802bce9dbc21cfa01 | [
"MIT"
] | 230 | 2018-04-30T18:03:31.000Z | 2022-03-26T17:35:22.000Z | consolemenu/items/submenu_item.py | Tyler887/console-menu | 1f1df62524aa4a8be08c343802bce9dbc21cfa01 | [
"MIT"
] | 53 | 2018-09-07T13:51:31.000Z | 2022-03-31T08:56:27.000Z | consolemenu/items/submenu_item.py | Tyler887/console-menu | 1f1df62524aa4a8be08c343802bce9dbc21cfa01 | [
"MIT"
] | 51 | 2018-10-28T07:48:06.000Z | 2022-03-06T12:57:16.000Z | from consolemenu.items import MenuItem
class SubmenuItem(MenuItem):
"""
A menu item to open a submenu
"""
def __init__(self, text, submenu, menu=None, should_exit=False):
"""
:ivar str text: The text shown for this menu item
:ivar ConsoleMenu submenu: The submenu to be opened when this item is selected
:ivar ConsoleMenu menu: The menu to which this item belongs
:ivar bool should_exit: Whether the menu should exit once this item's action is done
"""
super(SubmenuItem, self).__init__(text=text, menu=menu, should_exit=should_exit)
self.submenu = submenu
if menu:
self.get_submenu().parent = menu
def set_menu(self, menu):
"""
Sets the menu of this item.
Should be used instead of directly accessing the menu attribute for this class.
:param ConsoleMenu menu: the menu
"""
self.menu = menu
self.get_submenu().parent = menu
def set_up(self):
"""
This class overrides this method
"""
self.menu.pause()
self.menu.clear_screen()
def action(self):
"""
This class overrides this method
"""
self.get_submenu().start()
def clean_up(self):
"""
This class overrides this method
"""
self.get_submenu().join()
self.menu.clear_screen()
self.menu.resume()
def get_return(self):
"""
:return: The returned value in the submenu
"""
return self.get_submenu().returned_value
def get_submenu(self):
"""
We unwrap the submenu variable in case it is a reference to a method that returns a submenu
"""
return self.submenu if not callable(self.submenu) else self.submenu()
| 28.53125 | 99 | 0.601862 | from consolemenu.items import MenuItem
class SubmenuItem(MenuItem):
"""
A menu item to open a submenu
"""
def __init__(self, text, submenu, menu=None, should_exit=False):
"""
:ivar str text: The text shown for this menu item
:ivar ConsoleMenu submenu: The submenu to be opened when this item is selected
:ivar ConsoleMenu menu: The menu to which this item belongs
:ivar bool should_exit: Whether the menu should exit once this item's action is done
"""
super(SubmenuItem, self).__init__(text=text, menu=menu, should_exit=should_exit)
self.submenu = submenu
if menu:
self.get_submenu().parent = menu
def set_menu(self, menu):
"""
Sets the menu of this item.
Should be used instead of directly accessing the menu attribute for this class.
:param ConsoleMenu menu: the menu
"""
self.menu = menu
self.get_submenu().parent = menu
def set_up(self):
"""
This class overrides this method
"""
self.menu.pause()
self.menu.clear_screen()
def action(self):
"""
This class overrides this method
"""
self.get_submenu().start()
def clean_up(self):
"""
This class overrides this method
"""
self.get_submenu().join()
self.menu.clear_screen()
self.menu.resume()
def get_return(self):
"""
:return: The returned value in the submenu
"""
return self.get_submenu().returned_value
def get_submenu(self):
"""
We unwrap the submenu variable in case it is a reference to a method that returns a submenu
"""
return self.submenu if not callable(self.submenu) else self.submenu()
| 0 | 0 | 0 |
58a2e095dd52fbf9150d60febf9321dd76634b0e | 14,932 | py | Python | code/scripts/dataset.py | estherrolf/representation-matters | 502e351e21fc6b33aaa5c96b8c1409c76807f5a7 | [
"BSD-3-Clause"
] | 1 | 2021-11-28T09:29:33.000Z | 2021-11-28T09:29:33.000Z | code/scripts/dataset.py | estherrolf/representation-matters | 502e351e21fc6b33aaa5c96b8c1409c76807f5a7 | [
"BSD-3-Clause"
] | null | null | null | code/scripts/dataset.py | estherrolf/representation-matters | 502e351e21fc6b33aaa5c96b8c1409c76807f5a7 | [
"BSD-3-Clause"
] | null | null | null | import os
import torch
import torchvision.transforms as transforms
from PIL import Image
import pandas as pd
from skimage import io, transform
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torch.utils.data.sampler import WeightedRandomSampler
# for cifar4, should be
# means = [0.496887 0.4964557 0.4691896]
# std = [0.23818445 0.23714599 0.25873092]
# for isic (without sonic): should be
# means = [0.7519596 0.5541971 0.5523066]
# std = [0.14961188 0.16191609 0.1755095 ]
# for isic (with sonic - uncommon): should be
# means = [0.71835655 0.5626882 0.5254832]
# std = [0.15869816 0.14007549 0.1677716]
| 40.797814 | 130 | 0.567975 | import os
import torch
import torchvision.transforms as transforms
from PIL import Image
import pandas as pd
from skimage import io, transform
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torch.utils.data.sampler import WeightedRandomSampler
class GroupedDataset(Dataset):
# for cifar4, should be
# means = [0.496887 0.4964557 0.4691896]
# std = [0.23818445 0.23714599 0.25873092]
# for isic (without sonic): should be
# means = [0.7519596 0.5541971 0.5523066]
# std = [0.14961188 0.16191609 0.1755095 ]
# for isic (with sonic - uncommon): should be
# means = [0.71835655 0.5626882 0.5254832]
# std = [0.15869816 0.14007549 0.1677716]
def __init__(self,
data_dir,
csv_fp, # relative to root dir
image_fp, # relative to root dir
split,
label_colname,
group_colnames=[], # list of column names we might later want to stratify on
this_group_key=None, # str of the column name that serves as the subsetting group
# must be in group_colnames
transforms_basic = None,
transforms_augmented_subset = None,
image_colname = 'image_name'):
group_id = this_group_key
# print('root dir: ',data_dir)
# print('image_fp: ',image_fp)
# print(os.path.join(data_dir, image_fp))
self.images_path = os.path.join(data_dir, image_fp)
# print("images_path: ", self.images_path)
# print('looking in ',os.path.join(data_dir,csv_fp))
# self.full_data is a dataframe with labels, groups, and image file names
self.full_data = pd.read_csv(os.path.join(data_dir,csv_fp))
# get the df for this split (train/eval/test)
split_df = self.full_data[self.full_data['fold'] == split]
self.df = split_df.set_index(image_colname)
self.split = split
self.transforms_basic = transforms_basic
if transforms_augmented_subset is not None:
self.augment_subsets = True
self.transforms_augmented_subset = transforms_augmented_subset
augment_col = 'augment'
assert augment_col in self.df.columns, '{0} must be a column of the dataframe'.format(augment_col)
# if augmenting, make column for whether to agument
# print(np.array(self.df[augment_col].values.sum()), 'aug')
self.whether_to_augment = torch.from_numpy(np.array(self.df[augment_col].values.reshape(-1)))
else:
self.augment_subsets = False
# track images for each entry
image_names = []
image_splits = []
image_idxs = []
for idx in range(len(self.df)):
image_idxs.append(idx)
image_splits.append(split)
image_names.append(self.df.index[idx])
self.image_names = image_names
self.image_splits = image_splits
self.image_idxs = image_idxs
# instantiate targets
self.targets = torch.from_numpy(np.array(self.df[label_colname].values))
self.has_groups = (group_id is not None)
group_info = {}
# store the groups of each instance for downstream evaluation
if group_colnames == [] and self.has_groups:
print('instantiating group_colname as {0}'.format([group_id]))
group_colnames = [group_id]
for group_colname in group_colnames:
group_info[group_colname] = torch.from_numpy(np.array(self.df[group_colname].values.reshape(-1)))
self.group_info = group_info
# instantiate groups
self.groups = []
self.group_counts = []
# add groups
if self.has_groups:
# the group on which to subset.
self.group_id = group_id
# unique values the groups can take on
self.group_names = np.sort(np.unique(self.df[group_id].values))
#
self.groups = group_info[group_id]
self.group_counts = torch.zeros(len(self.group_names))
for g, group_name in enumerate(self.group_names):
self.group_counts[g] = (self.groups == group_name).sum()
self.n = len(self.df)
def __len__(self):
return self.n
def __getitem__(self, idx):
# get image
try: image = Image.open(os.path.join(self.images_path, self.image_names[idx]))
except:
print('failed opening ', os.path.join(self.images_path, self.image_names[idx]))
image = image.convert('RGB')
if ((self.augment_subsets) and (self.whether_to_augment[idx])):
image = self.transforms_augmented_subset(image)
elif self.transforms_basic is not None:
image = self.transforms_basic(image)
image = np.array(image)
# get sample
label = self.targets[idx]
sample_this_index = {'image': image, 'target': label}
for group_id_key in self.group_info.keys():
sample_this_index[group_id_key] = self.group_info[group_id_key][idx]
return sample_this_index
def get_transform(dataset_name, augment=False):
channels = 3
resolution = 224
isic_no_sonic_normalization_stats = {'mean': (0.7519596, 0.5541971, 0.5523066), 'std': (0.14961188, 0.16191609, 0.1755095)}
cifar_normalization_stats = {'mean': (0.496887, 0.4964557, 0.4691896), 'std': (0.23818445, 0.23714599, 0.25873092)}
isic_with_sonic_normalization_stats = {'mean': (0.71835655, 0.5626882, 0.5254832), 'std': (0.15869816, 0.14007549, 0.1677716)}
if (dataset_name == 'isic'):
normalization_stats = isic_no_sonic_normalization_stats
print('using normalization data for ISIC without sonic')
elif (dataset_name == 'cifar'):
normalization_stats = cifar_normalization_stats
print('using normalization data for CIFAR4')
elif (dataset_name == 'isic_sonic'):
normalization_stats = isic_with_sonic_normalization_stats
print('using normalization data for ISIC with sonic')
else:
print('TODO: need to add the normalization stats for this dataset')
test_transforms = [
transforms.Resize(resolution),
transforms.CenterCrop(resolution),
transforms.ToTensor(),
transforms.Normalize(mean=normalization_stats['mean'],
std=normalization_stats['std'])
]
if augment:
return transforms.Compose([transforms.RandomHorizontalFlip(),]
#transforms.RandomVerticalFlip()]#,
# transforms.RandomCrop(size=GroupedDataset._resolution)]
+ test_transforms)
else:
return transforms.Compose(test_transforms)
def get_data_loaders(data_dir,
csv_fp,
image_fp,
label_colname,
eval_key,
dataset_name,
all_group_colnames = [],
this_group_key = None,
sample_by_groups = False,
weight_to_eval_set_distribution = True,
augment_subsets = False,
train_batch_size = 16,
test_batch_size = 32,
num_workers = 32):
# for now, don't augment the training set
transform_basic = get_transform(dataset_name, augment=False)
# train set will know to look for subsets to augment if transform_augment is not None
if augment_subsets:
print('augmenting')
# can't do both right now
assert not sample_by_groups, "can't augment and sample by groups rn"
transform_augment = get_transform(dataset_name, augment=True)
else:
transform_augment = None
print('not augmenting')
print()
# get the datasets
print('data dir: ',data_dir)
print()
train_set = GroupedDataset(data_dir,
csv_fp,
image_fp,
split = 'train',
label_colname = label_colname,
group_colnames = all_group_colnames,
this_group_key = this_group_key,
transforms_basic = transform_basic,
transforms_augmented_subset = transform_augment
)
# don't do data augmentations for eval set of the traning set
train_set_eval = GroupedDataset(data_dir,
csv_fp,
image_fp,
split = 'train',
label_colname = label_colname,
group_colnames = all_group_colnames,
this_group_key = this_group_key,
transforms_basic = transform_basic)
val_set = GroupedDataset(data_dir,
csv_fp,
image_fp,
split = 'val',
label_colname = label_colname,
group_colnames = all_group_colnames,
this_group_key = this_group_key,
transforms_basic = transform_basic)
test_set = GroupedDataset(data_dir,
csv_fp,
image_fp,
split = 'test',
label_colname = label_colname,
group_colnames = all_group_colnames,
this_group_key = this_group_key,
transforms_basic = transform_basic)
if not this_group_key is None:
print('group_names: ',train_set.group_names)
print()
group_dict = {}
# sample by groups is importance sampling
if sample_by_groups:
assert not this_group_key is None
shuffle_train = False
# confirm whether weighting by group is what you actually want in all cases,
# or modify code to handle other cases -- could definitely weight
# by test set proportions, as the following code will do
# can also specify the weights in an input variable
print('training set group counts:',train_set.group_counts)
if weight_to_eval_set_distribution:
if (eval_key == 'test'): # we use this case for the subsetting experiment
print("using TEST to set weights")
# print('test (eval) set group counts ',test_set.group_counts)
test_fracs = test_set.group_counts / test_set.group_counts.sum()
elif (eval_key == 'val'): # we use this case for the HP search experiment
print("using VAL to set weights")
# print('val (eval) set group counts ',val_set.group_counts)
test_fracs = val_set.group_counts / val_set.group_counts.sum()
train_fracs = train_set.group_counts / train_set.group_counts.sum()
weights_by_group = test_fracs / train_fracs
else:
# if running GDRO, e.g., we want each group to be sampled equally, not
# according to their test set percentages
weights_by_group = train_set.group_counts.sum() / train_set.group_counts
group_labels_by_instance = train_set.groups
instance_weights = weights_by_group[group_labels_by_instance].type(torch.DoubleTensor)
print('group training weights:',weights_by_group)
print()
# print('confirming group weights ', [instance_weights[group_labels_by_instance == g].mean() for g in [0,1]])
# print(instance_weights.dtype)
train_sampler = WeightedRandomSampler(instance_weights,
len(group_labels_by_instance),
replacement=True)
group_dict['group_counts_train'] = train_set.group_counts
group_dict['num_groups'] = len(weights_by_group)
# elif augment_subsets:
# shuffle_train = False
# # double weight the samples to be augmented
# to_augment_by_instance = train_set.whether_to_augment
# # print(len(to_augment_by_instance))
# print('augmenting a total of {0} instances'.format(to_augment_by_instance.sum()))
# instance_weights = np.ones(len(to_augment_by_instance))
# instance_weights[to_augment_by_instance] += 1
# # instance_weights = instance_weights / instance_weights.sum()
# print('instance weights for augmented sample: ',np.unique(instance_weights[to_augment_by_instance]))
# print('instance weights for not-augmented samples: ',np.unique(instance_weights[~to_augment_by_instance]))
# print('percent of augmented samples from group 0 :', 1-(train_set.groups[to_augment_by_instance]).numpy().mean())
# train_sampler = WeightedRandomSampler(instance_weights,
# len(to_augment_by_instance),
# replacement=True)
else:
shuffle_train = True
train_sampler = None
print('train batch size', train_batch_size)
train_loader = DataLoader(train_set,
batch_size = train_batch_size,
shuffle = shuffle_train,
sampler = train_sampler,
num_workers = num_workers)
train_loader_eval = DataLoader(train_set,
batch_size = train_batch_size,
shuffle = False,
num_workers = num_workers)
val_loader = DataLoader(val_set,
batch_size = train_batch_size,
shuffle = False,
num_workers = num_workers)
test_loader = DataLoader(test_set,
batch_size = train_batch_size,
shuffle = False,
num_workers = num_workers)
return train_loader, train_loader_eval, val_loader, test_loader, group_dict
| 13,976 | 9 | 174 |
a57f663fc645eda54af2f6bb152a3cf3577ccdc4 | 233 | py | Python | Spider/db.py | aspxcor/Ming-Dynasty-Knowledge-Graph | 576cbbd86ca61d415d18bb5d6c60feac56357693 | [
"MIT"
] | 3 | 2021-12-12T12:11:45.000Z | 2022-02-09T03:12:58.000Z | Spider/db.py | aspxcor/Ming-Dynasty-Knowledge-Graph | 576cbbd86ca61d415d18bb5d6c60feac56357693 | [
"MIT"
] | null | null | null | Spider/db.py | aspxcor/Ming-Dynasty-Knowledge-Graph | 576cbbd86ca61d415d18bb5d6c60feac56357693 | [
"MIT"
] | 1 | 2022-02-09T03:13:00.000Z | 2022-02-09T03:13:00.000Z | import pymysql
import dbExe
a="*"
b="kg.validRelationship"
db=dbExe.db()
# db.add(Table="validRelationship",names="(valid)",values=r'("父子4")')
# db.delete(Table="validRelationship",name="valid",value=r'"父子4"')
print(db.select(a,b)) | 23.3 | 69 | 0.703863 | import pymysql
import dbExe
a="*"
b="kg.validRelationship"
db=dbExe.db()
# db.add(Table="validRelationship",names="(valid)",values=r'("父子4")')
# db.delete(Table="validRelationship",name="valid",value=r'"父子4"')
print(db.select(a,b)) | 0 | 0 | 0 |
cda652031409758da610e6b0915e4a658f41312d | 1,794 | py | Python | 2020/01/code.py | irobin591/advent-of-code-2019 | 279c28a2863558bd014b289802fff4b444c5d6cf | [
"MIT"
] | null | null | null | 2020/01/code.py | irobin591/advent-of-code-2019 | 279c28a2863558bd014b289802fff4b444c5d6cf | [
"MIT"
] | null | null | null | 2020/01/code.py | irobin591/advent-of-code-2019 | 279c28a2863558bd014b289802fff4b444c5d6cf | [
"MIT"
] | null | null | null | # Advent of Code 2020
# Day 01
# Author: irobin591
import os
import doctest
with open(os.path.join(os.path.dirname(__file__), "input.txt"), 'r') as input_file:
input_data = input_file.read()
# Prep Input
input_data = list(map(int, input_data.strip().split('\n')))
# print(input_data)
def part1(input_data):
"""
>>> part1([1721,979,366,299,675,1456])
514579
"""
# Easy nested loop solution:
# for entry1 in input_data:
# for entry2 in input_data:
# if entry1 + entry2 == 2020:
# return entry1 * entry2
# Juggling with numbers
# First: Sort the list
data = input_data.copy()
data.sort()
# Select the smallest and the largest number
left_el = 0
right_el = len(data)-1
# Decrease the largest number and increase the smallest number
while True:
if right_el < left_el:
# We have missed the numbers (or there are none)
return None
el_sum = data[left_el] + data[right_el]
if el_sum == 2020:
return data[left_el] * data[right_el]
if el_sum > 2020:
# Select a smaller number to decrease the sum
right_el -= 1
else:
# Select a bigger number to increase the sum
left_el += 1
def part2(input_data):
"""
>>> part2([1721,979,366,299,675,1456])
241861950
"""
for entry1 in input_data:
for entry2 in input_data:
for entry3 in input_data:
if entry1 + entry2 + entry3 == 2020:
return entry1 * entry2 * entry3
return None
if __name__ == "__main__":
doctest.testmod()
print("Part One: {}".format(part1(input_data)))
print("Part Two: {}".format(part2(input_data)))
pass | 24.916667 | 83 | 0.584727 | # Advent of Code 2020
# Day 01
# Author: irobin591
import os
import doctest
with open(os.path.join(os.path.dirname(__file__), "input.txt"), 'r') as input_file:
input_data = input_file.read()
# Prep Input
input_data = list(map(int, input_data.strip().split('\n')))
# print(input_data)
def part1(input_data):
"""
>>> part1([1721,979,366,299,675,1456])
514579
"""
# Easy nested loop solution:
# for entry1 in input_data:
# for entry2 in input_data:
# if entry1 + entry2 == 2020:
# return entry1 * entry2
# Juggling with numbers
# First: Sort the list
data = input_data.copy()
data.sort()
# Select the smallest and the largest number
left_el = 0
right_el = len(data)-1
# Decrease the largest number and increase the smallest number
while True:
if right_el < left_el:
# We have missed the numbers (or there are none)
return None
el_sum = data[left_el] + data[right_el]
if el_sum == 2020:
return data[left_el] * data[right_el]
if el_sum > 2020:
# Select a smaller number to decrease the sum
right_el -= 1
else:
# Select a bigger number to increase the sum
left_el += 1
def part2(input_data):
"""
>>> part2([1721,979,366,299,675,1456])
241861950
"""
for entry1 in input_data:
for entry2 in input_data:
for entry3 in input_data:
if entry1 + entry2 + entry3 == 2020:
return entry1 * entry2 * entry3
return None
if __name__ == "__main__":
doctest.testmod()
print("Part One: {}".format(part1(input_data)))
print("Part Two: {}".format(part2(input_data)))
pass | 0 | 0 | 0 |
6d67dce8b62b3d80e3629c6790b99aedc88608e3 | 2,034 | py | Python | tocador mp3 python/tocador_mp3_python.py | Koalapvh13/python_exercises | 5dc12dfa886edaccc54727d1866438f20a83d4d0 | [
"MIT"
] | null | null | null | tocador mp3 python/tocador_mp3_python.py | Koalapvh13/python_exercises | 5dc12dfa886edaccc54727d1866438f20a83d4d0 | [
"MIT"
] | null | null | null | tocador mp3 python/tocador_mp3_python.py | Koalapvh13/python_exercises | 5dc12dfa886edaccc54727d1866438f20a83d4d0 | [
"MIT"
] | null | null | null | import pygame
import os
version = '2.0 (Beta)'
update = 'March 6th, 2018'
playlist()
os.system("pause")
| 30.818182 | 120 | 0.333333 | import pygame
import os
version = '2.0 (Beta)'
update = 'March 6th, 2018'
def playlist():
print(
"+-----------------------------------------------------------------------------------------------------------+")
print(
"| |")
print(
"| KOALA'S JUKEBOX |")
print(
"| |")
print(
"| Version: {} Created by: Matheus Dias Vieira Last Update On: {} |".format(version,
update))
print(
"+-----------------------------------------------------------------------------------------------------------+")
print('''\nPLAYLIST:\n
001- Numb - Linkin Park
002- Sweet Child'O Mine - Guns and Roses
003- Payphone - Maroon 5 ft. Wiz Khalifa\n''')
pygame.mixer.init()
music = pygame.mixer.music
toca = input('Qual Música tocar?[Diga o número]: ')
if toca == '001':
music.load('numb.mp3')
music.play()
elif toca == '002':
music.load('sweet_child_o_mine.mp3')
music.play()
elif toca == '003':
music.load('payphone.mp3')
music.play()
else:
playlist()
while True:
esc = input("Queres parar? Y/N: ")
if esc == 'Y':
music.fadeout(2000)
pygame.time.wait(2000)
pygame.quit()
os.system('cls')
playlist()
break
elif esc == 'y':
music.fadeout(2000)
pygame.time.wait(2000)
pygame.quit()
os.system('cls')
playlist()
break
else:
pass
playlist()
os.system("pause")
| 1,904 | 0 | 23 |
b5e3c96d790f5b5539d92549c83a1e60f902dba2 | 6,531 | py | Python | clubhouse/parser.py | j-martin/clubhouse | 45975e337905d701a2d4725133f0f9f85e5da076 | [
"MIT"
] | 2 | 2019-04-12T09:29:06.000Z | 2019-05-03T12:00:03.000Z | clubhouse/parser.py | j-martin/clubhouse | 45975e337905d701a2d4725133f0f9f85e5da076 | [
"MIT"
] | 1 | 2018-11-05T23:39:11.000Z | 2018-11-05T23:39:11.000Z | clubhouse/parser.py | mahmoudimus/clubhouse | adaf97197b557e9cb6bf4eecd2b80a581987c7dc | [
"MIT"
] | 4 | 2018-10-22T23:55:06.000Z | 2019-12-01T03:47:23.000Z | """
Parses https://clubhouse.io/api/rest/v2/ and generates a JSON payload
of their resources that can be used as basis of a client library.
"""
import argparse
import logging
import sys
import re
from typing import Dict
from collections import OrderedDict, deque
from lxml import html
from unidecode import unidecode
from jinja2 import BaseLoader, Environment
import dag
_jinja_env = Environment(loader=BaseLoader(), trim_blocks=True, lstrip_blocks=True)
logger = logging.getLogger(__name__)
ENCODINGS_WITH_SMART_QUOTES = [
"windows-1252",
"iso-8859-1",
"iso-8859-2",
]
if __name__ == '__main__':
main()
| 30.661972 | 94 | 0.59409 | """
Parses https://clubhouse.io/api/rest/v2/ and generates a JSON payload
of their resources that can be used as basis of a client library.
"""
import argparse
import logging
import sys
import re
from typing import Dict
from collections import OrderedDict, deque
from lxml import html
from unidecode import unidecode
from jinja2 import BaseLoader, Environment
import dag
_jinja_env = Environment(loader=BaseLoader(), trim_blocks=True, lstrip_blocks=True)
logger = logging.getLogger(__name__)
ENCODINGS_WITH_SMART_QUOTES = [
"windows-1252",
"iso-8859-1",
"iso-8859-2",
]
def conf_logging(cli_arguments):
logger = logging.getLogger()
sfmt = '%(asctime)s : %(levelname)s : %(name)s : %(message)s'
formatter = logging.Formatter(sfmt)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(getattr(logging, cli_arguments.log_level.upper()))
def add_logging_options(options):
options.add_argument(
'-l', '--log-level',
default='INFO',
help='Set the logging level',
choices=[
'debug',
'info',
'warn',
'warning',
'error',
'critical',
'fatal',
],
)
return options
def add_options(options):
options.add_argument('infile', nargs='?', type=argparse.FileType('r'),
default=sys.stdin,
help="path to file to parse, defaults to stdin")
options.add_argument('-o', '--outfile', type=argparse.FileType('w'),
default=sys.stdout,
help="path to dump the output, defaults to stdout")
return options
def execute(parser):
parser = add_options(parser)
args = parser.parse_args()
conf_logging(args)
parsed = parse(html.parse(args.infile))
munged = munge(parsed)
rendered = build(munged)
with args.outfile as f:
f.write(rendered)
def parse(tree):
resources = tree.xpath('//h1[text()="Resources"]/following-sibling::h2')
tables = tree.xpath('//h1[text()="Resources"]/following-sibling::table')
logger.debug('resources: %s', resources)
parsed = {}
for resource, rawtable in zip(resources, tables):
resource_name = resource.xpath('string(.)')
logger.info('resource: %s', resource_name)
parsed[resource_name] = extract(rawtable)
logger.debug(parsed)
return parsed
def extract(table):
headers = table.xpath('thead/tr/th/text()')
fields = table.xpath('./tbody/tr/td[strong]')
descriptions = table.xpath('./tbody/tr/td[not(strong)]')
logger.debug('headers: %s', [h for h in headers])
extracted_fields = [
(h.findtext('strong'), h.xpath('string(./span)'))
for h in fields
]
extracted_descriptions = []
for h in descriptions:
asbytes = bytes(h.text, ENCODINGS_WITH_SMART_QUOTES[0])
extracted_descriptions.append(unidecode(str(asbytes, ENCODINGS_WITH_SMART_QUOTES[2])))
logger.debug('fields: %s', extracted_fields)
logger.debug('descriptions: %s', extracted_descriptions)
rv = {f[0]: {"type": f[1], "description": d, "args": ''}
for f, d in zip(extracted_fields, extracted_descriptions)}
logger.debug(rv)
return rv
def munge(datablob: Dict[str, Dict]) -> Dict[str, Dict]:
#: searches for data between () or [] .. also matches [)..
nested = re.compile(r'(?:\[|\()(?P<inside>.+)(?:\]|\))')
def has_nested(type_):
return any(('Array' in type_, type_ in datablob,))
graph = dag.DAG()
for resource_name, resource in datablob.items():
graph.add_node_if_not_exists(resource_name)
for details in resource.values():
if 'or null' in details['type']:
details['type'] = details['type'].split(' ')[0]
details['args'] = 'allow_none=True'
elif 'Enum' in details['type']:
choices = nested.search(details['type']).group(1).split(',')
details['type'] = 'String'
details['args'] = 'validate=validate.OneOf({0})'.format(
', '.join(['"{0}"'.format(c.strip()) for c in choices])
)
if not has_nested(details['type']):
continue
fieldtype = details['type']
logger.info('%s: %s\n%s', resource_name, fieldtype, details)
if 'Array' in fieldtype:
details['args'] = ', many=True'
fieldtype = nested.search(fieldtype).group(1)
if fieldtype not in datablob:
# if the field type is not part of the resources, then
# we will use marshmallow default fields
details['type'] = 'fields.' + fieldtype
continue
graph.add_node_if_not_exists(fieldtype)
if fieldtype == resource_name:
# marshmallow self-nesting schema
logger.info(
'self referential cycle detected for %s on %s',
resource_name,
fieldtype
)
fieldtype = '"self"'
else:
logger.info('---- %s: ', fieldtype)
graph.add_edge(fieldtype, resource_name)
details['type'] = fieldtype
details['args'] = ', many=False'
ob = OrderedDict()
for r in graph.topological_sort():
logger.info(r)
ob[r] = datablob[r]
return ob
def build(datablob: Dict[str, Dict]):
_template = """\
from marshmallow import Schema, fields, pprint, validate
{% for resource_name, resource in resources.items(): %}
class {{ resource_name }}(Schema):
{% for field, details in resource.items() %}
{{ '#: ' ~ details.description | wordwrap(73) | replace('\n', '\n#: ') | indent }}
{# parses the types and understands how to map to schemas #}
{%- if 'many' is in(details.args) %}
{{ field }} = fields.Nested({{details.type}}{{details.args}})
{% else %}
{{ field }} = fields.{{details.type}}({{details.args}})
{% endif %}
{% endfor %}
{% endfor %}
"""
rtemplate = _jinja_env.from_string(_template)
_rendered = rtemplate.render(resources=datablob)
logger.debug('%s: ', _rendered)
return _rendered
def main():
option_parser = argparse.ArgumentParser()
option_parser = add_logging_options(option_parser)
execute(option_parser)
if __name__ == '__main__':
main()
| 5,686 | 0 | 207 |
123426310c510e0cb92a84aaba3b535adbdd3d41 | 202 | py | Python | List_files.py | HawkingLaugh/Whisky-Related-Project | 83fd415f9350eac214315d50387676766019b7f0 | [
"MIT"
] | null | null | null | List_files.py | HawkingLaugh/Whisky-Related-Project | 83fd415f9350eac214315d50387676766019b7f0 | [
"MIT"
] | null | null | null | List_files.py | HawkingLaugh/Whisky-Related-Project | 83fd415f9350eac214315d50387676766019b7f0 | [
"MIT"
] | null | null | null | import pathlib
from os import listdir
from os.path import isfile, join | 28.857143 | 76 | 0.712871 | import pathlib
from os import listdir
from os.path import isfile, join
def get_files(my_path):
# path = input('Paths: ')
filename = [f for f in listdir(path=my_path) if isfile(join(my_path,f))] | 109 | 0 | 23 |
dc8143457a2ac5bfe1b4efd724c65cef8d03a12a | 349 | py | Python | app.py | edusan7/cdk-serverless-pipeline-starter-template | 299fa1dffc9b1dc2cf52a6703eb36dd56a3c49db | [
"Apache-2.0"
] | 9 | 2019-10-20T21:02:43.000Z | 2021-02-16T10:19:08.000Z | app.py | edusan7/cdk-serverless-pipeline-starter-template | 299fa1dffc9b1dc2cf52a6703eb36dd56a3c49db | [
"Apache-2.0"
] | null | null | null | app.py | edusan7/cdk-serverless-pipeline-starter-template | 299fa1dffc9b1dc2cf52a6703eb36dd56a3c49db | [
"Apache-2.0"
] | 9 | 2019-10-13T15:39:45.000Z | 2021-05-26T16:38:57.000Z | #!/usr/bin/env python3
from aws_cdk import core
import os
from serverless_pipeline.serverless_pipeline_stack import ServerlessPipelineStack
app = core.App()
ServerlessPipelineStack(app, "serverless-pipeline", env=core.Environment(
account=os.getenv('AWS_CDK_DEFAULT_ACCOUNT'),
region=os.getenv('AWS_CDK_DEFAULT_REGION'),
))
app.synth()
| 21.8125 | 81 | 0.790831 | #!/usr/bin/env python3
from aws_cdk import core
import os
from serverless_pipeline.serverless_pipeline_stack import ServerlessPipelineStack
app = core.App()
ServerlessPipelineStack(app, "serverless-pipeline", env=core.Environment(
account=os.getenv('AWS_CDK_DEFAULT_ACCOUNT'),
region=os.getenv('AWS_CDK_DEFAULT_REGION'),
))
app.synth()
| 0 | 0 | 0 |
afc0c3b7f24a969259ab6ea8035fa90bfaf7cfad | 39 | py | Python | cat/log/__init__.py | e12005490/CAT | f322f885360439327221fb51e23ed42d8fdeda00 | [
"BSD-3-Clause"
] | 2 | 2021-06-23T14:27:27.000Z | 2021-07-04T13:44:03.000Z | cat/log/__init__.py | e12005490/CAT | f322f885360439327221fb51e23ed42d8fdeda00 | [
"BSD-3-Clause"
] | null | null | null | cat/log/__init__.py | e12005490/CAT | f322f885360439327221fb51e23ed42d8fdeda00 | [
"BSD-3-Clause"
] | 1 | 2020-12-02T21:03:32.000Z | 2020-12-02T21:03:32.000Z | from cat.log.log import enable_logging
| 19.5 | 38 | 0.846154 | from cat.log.log import enable_logging
| 0 | 0 | 0 |
4167bbe9554b86bc903c8c9aadc9881a3baf49c5 | 446 | py | Python | pra subir/pythonexercicios/ex42.py | daianebandeira88/curso-python | 763f5f36b6d7329549ad861c63acc3c84aade887 | [
"MIT"
] | null | null | null | pra subir/pythonexercicios/ex42.py | daianebandeira88/curso-python | 763f5f36b6d7329549ad861c63acc3c84aade887 | [
"MIT"
] | null | null | null | pra subir/pythonexercicios/ex42.py | daianebandeira88/curso-python | 763f5f36b6d7329549ad861c63acc3c84aade887 | [
"MIT"
] | null | null | null | r1=float(input('primeiro segmento'))
r2=float(input('segundo segmento'))
r3=float(input('terceiro segmento'))
if r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:
print('os segmentos acima podem formar um triangulo ' ,end='')
if r1 == r2 and r2 == r3:
print('EQUILATERO')
elif r1 != r2 != r3 != r1:
print('ESCALENO')
else:
print('ISÓSELES')
else:
print('os segmentos acima nao podem formar um triangulo') | 34.307692 | 66 | 0.616592 | r1=float(input('primeiro segmento'))
r2=float(input('segundo segmento'))
r3=float(input('terceiro segmento'))
if r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:
print('os segmentos acima podem formar um triangulo ' ,end='')
if r1 == r2 and r2 == r3:
print('EQUILATERO')
elif r1 != r2 != r3 != r1:
print('ESCALENO')
else:
print('ISÓSELES')
else:
print('os segmentos acima nao podem formar um triangulo') | 0 | 0 | 0 |
05a2aabcad9d2b7c91c1ea7c34e4db3da03cbad5 | 1,322 | py | Python | Python3/1170.py | rakhi2001/ecom7 | 73790d44605fbd51e8f7e804b9808e364fcfc680 | [
"MIT"
] | 854 | 2018-11-09T08:06:16.000Z | 2022-03-31T06:05:53.000Z | Python3/1170.py | rakhi2001/ecom7 | 73790d44605fbd51e8f7e804b9808e364fcfc680 | [
"MIT"
] | 29 | 2019-06-02T05:02:25.000Z | 2021-11-15T04:09:37.000Z | Python3/1170.py | rakhi2001/ecom7 | 73790d44605fbd51e8f7e804b9808e364fcfc680 | [
"MIT"
] | 347 | 2018-12-23T01:57:37.000Z | 2022-03-12T14:51:21.000Z | __________________________________________________________________________________________________
sample 72 ms submission
__________________________________________________________________________________________________
sample 136 ms submission
__________________________________________________________________________________________________
| 48.962963 | 107 | 0.688351 | __________________________________________________________________________________________________
sample 72 ms submission
class Solution:
def numSmallerByLeadingCount(self, queries: List[str], words: List[str]) -> List[int]:
def score(word):
return word.count(min(word))
word_scores = sorted(score(word) for word in words)
return [len(words) - bisect.bisect(word_scores, score(query)) for query in queries]
__________________________________________________________________________________________________
sample 136 ms submission
class Solution:
def numSmallerByLeadingCount(self, queries: List[str], words: List[str]) -> List[int]:
counter, save, ans = collections.defaultdict(int), [[float("inf"), 0]], []
for w in words:
cur = collections.Counter(w)
counter[cur[min(cur, key = lambda x: ord(x))]] += 1
for c in sorted(counter, reverse = True):
save.append([c, save[-1][1] + counter[c]])
save.reverse()
for q in queries:
cur = collections.Counter(q)
ans.append(save[bisect.bisect(save, [cur[min(cur, key = lambda x: ord(x))], float("inf")])][1])
return ans
__________________________________________________________________________________________________
| 892 | -12 | 96 |
45df30d3c79310cce01ab793fba5834771916f6a | 362 | py | Python | kb/admin.py | movermeyer/django-kb | 3c3246761f4f6d60221049f46ce9702fd56384a8 | [
"BSD-3-Clause"
] | 7 | 2015-04-17T12:01:29.000Z | 2021-06-21T01:54:15.000Z | kb/admin.py | movermeyer/django-kb | 3c3246761f4f6d60221049f46ce9702fd56384a8 | [
"BSD-3-Clause"
] | null | null | null | kb/admin.py | movermeyer/django-kb | 3c3246761f4f6d60221049f46ce9702fd56384a8 | [
"BSD-3-Clause"
] | 3 | 2015-04-17T16:21:42.000Z | 2018-06-06T13:34:03.000Z | from __future__ import unicode_literals
from django.contrib import admin
from .article.admin import ArticleAdmin
from .category.admin import CategoryAdmin
from .vote.admin import VoteAdmin
from .models import Article, Category, Vote
admin.site.register(Article, ArticleAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Vote, VoteAdmin)
| 25.857143 | 44 | 0.831492 | from __future__ import unicode_literals
from django.contrib import admin
from .article.admin import ArticleAdmin
from .category.admin import CategoryAdmin
from .vote.admin import VoteAdmin
from .models import Article, Category, Vote
admin.site.register(Article, ArticleAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Vote, VoteAdmin)
| 0 | 0 | 0 |
c7d46243836354e254c8dae539d0806a6b6d09a9 | 1,989 | py | Python | bin/nback_derivative.py | willstrawson/adie_ongoingthoughts | a9ba9e8e4326fd528db72eac8ed5cc932fc490b5 | [
"MIT"
] | 1 | 2021-06-04T15:39:14.000Z | 2021-06-04T15:39:14.000Z | bin/nback_derivative.py | willstrawson/adie_ongoingthoughts | a9ba9e8e4326fd528db72eac8ed5cc932fc490b5 | [
"MIT"
] | 27 | 2020-11-30T11:13:00.000Z | 2021-05-02T07:58:37.000Z | bin/nback_derivative.py | willstrawson/adie_ongoingthoughts | a9ba9e8e4326fd528db72eac8ed5cc932fc490b5 | [
"MIT"
] | 4 | 2020-11-27T13:49:31.000Z | 2021-04-28T15:51:09.000Z | from os import sep
from pathlib import Path
import json
import pandas as pd
from adie.nback import (
read_log,
compile_performance,
compile_probes,
compile_ses,
)
bids_dir = Path("__file__").parent # this file is in BIDS_data/code
sub_probes = []
sub_beh = []
sub_ses = []
for p in bids_dir.glob("sub-*/**/*task-nback*.tsv"):
# extract data
data = read_log(p)
sub_probes.append(compile_probes(data))
sub_beh.append(compile_performance(data))
sub_ses.append(compile_ses(data))
master_probes = pd.concat(sub_probes, axis=0)
master_beh = pd.concat(sub_beh, axis=0)
master_ses = pd.concat(sub_ses, axis=0)
master_probes.to_csv(
bids_dir
/ "derivatives"
/ "nback_derivatives"
/ "task-nbackmindwandering_probes.tsv",
sep="\t",
)
master_beh.to_csv(
bids_dir
/ "derivatives"
/ "nback_derivatives"
/ "task-nbackmindwandering_performance.tsv",
sep="\t",
index=False,
)
master_ses.to_csv(
bids_dir
/ "derivatives"
/ "nback_derivatives"
/ "task-nbackmindwandering_sessionthough.tsv",
sep="\t",
)
# get the full question of thought probe
probe_json = bids_dir / "derivatives" / "nback_derivatives" / "probes.json"
session_json = (
bids_dir / "derivatives" / "nback_derivatives" / "sessionend.json"
)
if not probe_json.is_file:
es_probe = data.query('stimType == "MWQ"')
es_probe = es_probe.reset_index().loc[:13, ["mwType", "stimPic", "Ans"]]
probes = {}
for a, full in es_probe.set_index(["mwType"]).iterrows():
probes[a] = {"question": full["stimPic"], "scale": full["Ans"]}
with open(probe_json) as f:
json.dump(probes, f, indent=2)
if not session_json.is_file:
es_end = es_end.reset_index().loc[:, ["mwType", "stimPic", "Ans"]]
end = {}
for a, full in es_end.set_index(["mwType"]).iterrows():
end[a] = {"question": full["stimPic"], "scale": full["Ans"]}
with open(session_json) as f:
json.dump(end, f, indent=2)
| 25.177215 | 76 | 0.65812 | from os import sep
from pathlib import Path
import json
import pandas as pd
from adie.nback import (
read_log,
compile_performance,
compile_probes,
compile_ses,
)
bids_dir = Path("__file__").parent # this file is in BIDS_data/code
sub_probes = []
sub_beh = []
sub_ses = []
for p in bids_dir.glob("sub-*/**/*task-nback*.tsv"):
# extract data
data = read_log(p)
sub_probes.append(compile_probes(data))
sub_beh.append(compile_performance(data))
sub_ses.append(compile_ses(data))
master_probes = pd.concat(sub_probes, axis=0)
master_beh = pd.concat(sub_beh, axis=0)
master_ses = pd.concat(sub_ses, axis=0)
master_probes.to_csv(
bids_dir
/ "derivatives"
/ "nback_derivatives"
/ "task-nbackmindwandering_probes.tsv",
sep="\t",
)
master_beh.to_csv(
bids_dir
/ "derivatives"
/ "nback_derivatives"
/ "task-nbackmindwandering_performance.tsv",
sep="\t",
index=False,
)
master_ses.to_csv(
bids_dir
/ "derivatives"
/ "nback_derivatives"
/ "task-nbackmindwandering_sessionthough.tsv",
sep="\t",
)
# get the full question of thought probe
probe_json = bids_dir / "derivatives" / "nback_derivatives" / "probes.json"
session_json = (
bids_dir / "derivatives" / "nback_derivatives" / "sessionend.json"
)
if not probe_json.is_file:
es_probe = data.query('stimType == "MWQ"')
es_probe = es_probe.reset_index().loc[:13, ["mwType", "stimPic", "Ans"]]
probes = {}
for a, full in es_probe.set_index(["mwType"]).iterrows():
probes[a] = {"question": full["stimPic"], "scale": full["Ans"]}
with open(probe_json) as f:
json.dump(probes, f, indent=2)
if not session_json.is_file:
es_end = es_end.reset_index().loc[:, ["mwType", "stimPic", "Ans"]]
end = {}
for a, full in es_end.set_index(["mwType"]).iterrows():
end[a] = {"question": full["stimPic"], "scale": full["Ans"]}
with open(session_json) as f:
json.dump(end, f, indent=2)
| 0 | 0 | 0 |
4a707d2752561152bbc9cff59f79e54377f195b6 | 17,209 | py | Python | lib/mk/autoconf/make_example_metamakefiles.py | rmrsk/Chombo-3.3 | f2119e396460c1bb19638effd55eb71c2b35119e | [
"BSD-3-Clause-LBNL"
] | 8 | 2019-08-07T12:34:08.000Z | 2021-07-16T01:12:57.000Z | lib/mk/autoconf/make_example_metamakefiles.py | rmrsk/Chombo-3.3 | f2119e396460c1bb19638effd55eb71c2b35119e | [
"BSD-3-Clause-LBNL"
] | 1 | 2021-01-07T11:26:51.000Z | 2021-01-07T11:26:51.000Z | lib/mk/autoconf/make_example_metamakefiles.py | rmrsk/Chombo-3.3 | f2119e396460c1bb19638effd55eb71c2b35119e | [
"BSD-3-Clause-LBNL"
] | 6 | 2020-03-08T18:17:48.000Z | 2021-12-25T15:11:08.000Z | """
This script attempts to generate a complete autoconf/automake/libtool-based
build system for any project that wants to use the Chombo library. To use it,
type
python make_example_metamakefiles.py <rootdir>
where rootdir is the root directory of your project.
Your project can contain C++ sources (with suffixes .cpp and .H) and
Chombo-Fortran sources (suffix .ChF and .fh). The simplest sort of
project is a single directory with a single .cpp file. But this
script can handle more complicated projects as well -- projects
distributed over several directories, like many of those under
Chombo's example directory. Moreover, the build system we generate is
good for "make dist" as well as for out-of-source builds.
What we can't handle, though, are cases of:
1. Multiple use of the same filename, e.g. dirA/Foo.H and dirB/Foo.H.
2. Circular dependencies among directories, e.g. files in directory A
#include'ing files in directory B and vice versa.
3. Multiple definitions of a function, class, etc.
4. Pretty much anything weirder than 1-3 above.
5. Many cases where this script guesses wrong about the order in which
directories need to be built -- see below.
If a directory contains a file or files with a "main(int argc...)" file,
we try to build an executable for each such file, and all other .cpp and .ChF
files in the same directory are compiled, and their .o files linked into
each executable.
If a directory does not contain any files with "main(int argc...)", we
roll all the .cpp and .ChF files in that directory into a library. All
such libraries are linked into the executables mentioned in the paragraph
just above.
Coming now to #5 above: if we guess wrong about the order in which
directories need to be built, you will need to go into some of the
Makefile.am files and change the line that begins with "SUBDIRS". And
then run the bootstrap script, followed by configure.
You will know the order is wrong if compilation fails because one of
your header files could not be found. This will happen if a file in
directory A #include's a header from directory B -- counting on an
appropriate -I flag rather than specifying the relative path in the
#include directive -- and SUBDIRS lists directory A ahead of directory
B.
Another way SUBDIRS order will cause a build to fail is if the library
directories (the ones without main() functions) are not all built
before the directories that do have main() functions. Once again, you
can fix this by changing SUBDIRS in the appropriate Makefile.am, and then
rerun bootstrap and configure.
A more intelligent program than this one would minimize such
subdirectory-order problems; someday perhaps this program will be like
that. In the meantime, there are a few rules of thumb you can follow,
in the naming of your project's directories, that will increase the
chance of this script giving you a working build system:
1. Have just one library directory (i.e. a directory with no main()
functions), call it "src", and put it right under the root of
your tree.
2. If you must have more than one library directory, call the others
src<something> (e.g. srcBob, srcEdna) and make sure they don't
#include one another's headers (but they can #include headers from
the src directory).
"""
import sys
import os
import re
import glob
import string
def hasMainFunc( filename ):
"""
Returns True if file contains something that looks like a main() or
main( int argc, char argv ) function.
"""
main_regex = re.compile('.*main(.*argc.*argv).*')
f = open(filename)
for line in f.readlines():
if main_regex.search( line ):
return True
return False
def describeDirStructure( extra, dirname, files ):
"""
Walk the tree under arg rootdir and classify the directories as to
whether they are source (used for building a library) or exec (contain
a cpp file with main() function in it). A directory that has both is
not considered a library directory; the source files that don't have
a main() are simply considered binSOURCE's for the files that do.
"""
bname = os.path.basename( dirname )
if bname == 'CVS':
return
subdirs = filter( lambda f: os.path.isdir(dirname+'/'+f), files )
has_cpp = bool(filter( lambda f: f.find('.cpp') != -1, files ))
has_ChF = bool(filter( lambda f: f.find('.ChF') != -1, files ))
has_lib_src=False
has_lib_cpp=False
has_main_cpp=False
main_cpps = []
nonmain_nonlib_cpps = [] # cpp's in dir that has a main() cpp.
lib_cpps = []
lib_ChFs = []
if has_cpp:
for cpp in glob.glob( dirname + '/*.cpp' ):
if hasMainFunc( cpp ):
has_main_cpp = True
main_cpps.append( os.path.basename(cpp) )
for cpp in glob.glob( dirname + '/*.cpp' ):
if not hasMainFunc( cpp ):
if has_main_cpp:
nonmain_nonlib_cpps.append( os.path.basename(cpp) )
else:
has_lib_cpp = True
lib_cpps.append( os.path.basename(cpp) )
if has_lib_cpp:
extra.all_lib_names.append( makeLTLibName( dirname ) )
if has_ChF:
for chf in glob.glob( dirname + '/*.ChF' ):
lib_ChFs.append( os.path.basename( chf ) )
if has_main_cpp:
extra.main_dirs.append( bname )
if has_lib_cpp or has_ChF:
has_lib_src = True
extra.lib_dirs.append( bname )
subdir_extra = WalkExtra( indent = extra.indent + ' ',
all_lib_names = extra.all_lib_names )
for sd in subdirs:
if sd == 'CVS': continue
#print extra.indent, "Entering subdir", dirname+'/'+sd, "..."
walk1Deep( dirname+'/'+sd, describeDirStructure, subdir_extra )
if bool(subdir_extra.lib_dirs) or bool(subdir_extra.main_dirs):
#print extra.indent, "lib subdirs of", bname, ":", subdir_extra.lib_dirs
#print extra.indent, "main subdirs of", bname, ":", subdir_extra.main_dirs
if not bname in extra.lib_dirs: # Source code >1 level down
extra.lib_dirs.append( bname )
if( len(main_cpps) + len(lib_cpps) + len(nonmain_nonlib_cpps) + len(lib_ChFs)
+ len(subdir_extra.lib_dirs) + len(subdir_extra.main_dirs) > 0):
makeMakefileAm( dirname, subdir_extra.lib_dirs + subdir_extra.main_dirs,
main_cpps, lib_cpps, nonmain_nonlib_cpps, lib_ChFs,
depth=len(extra.indent)/2 )
def fixupMakefileAms( all_lib_names, dirname, files ):
"""
Every time you find a Makefile.am, look for "bin_PROGRAMS". If it's there,
then for every listed program, make an LDADD line that lists arg
all_lib_names.
"""
if 'Makefile.am' in files:
ro = open( dirname + '/Makefile.am' )
bin_progs_line = filter( lambda line: grep('bin_PROGRAMS',line), ro.readlines() )
if len(bin_progs_line) > 0:
m = open( dirname + '/Makefile.am', 'a' )
for bin in bin_progs_line[0].split()[2:]:
m.write( bin + '_LDADD = -L$(CHOMBO_INSTALLDIR)/lib/Chombo $(LIBSRC_LIBS) ' )
for lib in all_lib_names:
m.write( '-l' + lib + ' ' )
m.write( '-lg2c' )
m.write( '\n' )
def makeConfigureIn( topdir ):
"""
Generate the configure.in, starting from a template and just adding
AC_CONFIG_FILES and AC_OUTPUT lines.
It's assumed this script is in the same directory as configure.pre; they should
both be at the root of the Chombo source tree, and in share/Chombo in any
Chombo install tree.
"""
confpre = os.path.dirname(sys.argv[0]) + '/../../../configure.pre'
if confpre[0] == '/':
confpre = confpre[1:]
infile = open( confpre )
outfile = open( topdir + '/configure.in', 'w' )
in_lines = infile.readlines()
curline = 0
while True: # No do...while in Python.
outfile.write( in_lines[curline] )
curline += 1
if in_lines[curline-1][0:7] == "AC_INIT":
break
#
# Users of the configure.in we're writing will need to tell it where their
# Chombo install tree is. That's where Make.defs and transformation scripts are.
#
outfile.write( 'if test x$CHOMBO_INSTALLDIR = x ; then\n' )
outfile.write( ' echo " no CHOMBO_INSTALLDIR" \n' )
outfile.write( 'fi\n' )
outfile.write( 'if test ! -f $CHOMBO_INSTALLDIR/share/Chombo/Make.defs ; then\n' )
outfile.write( ' echo " no $CHOMBO_INSTALLDIR/share/Chombo/Make.defs" \n' )
outfile.write( 'fi\n' )
outfile.write( 'if test x$CHOMBO_INSTALLDIR = x -o ! -f $CHOMBO_INSTALLDIR/share/Chombo/Make.defs ; then\n' )
outfile.write( ' echo "*****************************************" \n' )
outfile.write( ' echo "Error: you must pass configure a definition of CHOMBO_INSTALLDIR"\n' )
outfile.write( ' echo "and it must indicate the root of your Chombo install tree" \n' )
outfile.write( ' echo "e.g. \'./configure CHOMBO_INSTALLDIR=\$HOME/Chombo/install\'"\n' )
outfile.write( ' echo ""\n' )
outfile.write( ' echo "If you think this message is in error, check that under your"\n')
outfile.write( ' echo "CHOMBO_INSTALLDIR you have a file called Make.defs. If you"\n' )
outfile.write( ' echo "do not (but, say, you do seem to have some of the libraries"\n')
outfile.write( ' echo "and header files), then it is possible your Chombo build just"\n' )
outfile.write( ' echo "did not run to completion."\n' )
outfile.write( ' echo "*****************************************" \n' )
outfile.write( ' exit 1 \n' )
outfile.write( 'fi\n' )
#
# Substitute something for the project name.
#
while in_lines[curline][0:16] != 'AM_INIT_AUTOMAKE':
outfile.write( in_lines[curline] )
curline += 1
outfile.write( 'AM_INIT_AUTOMAKE('+os.path.basename(topdir)+', 0.1.0 )\n' )
curline += 1
#
# Throw away configure.pre lines that control what is and isn't in a
# "small build".
#
for line in in_lines[curline:]:
if line[0:11] == "#SMALLBUILD" or line[0:12] == "#!SMALLBUILD" :
continue
outfile.write( line )
#
# Write out the paths to the Makefiles we want generated.
#
outfile.write( 'AC_CONFIG_FILES(\n' )
makefile_ams = findAllMakefileAms( topdir )
for m in makefile_ams:
outfile.write( ' ' + m + '\n' )
outfile.write( ')\n' )
outfile.write( 'AC_OUTPUT\n' )
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.stderr.write( "**********************************************************\n")
sys.stderr.write( "Usage: python make_example_metamakefiles.py example_dir \n")
sys.stderr.write( " where example_dir is the name of the directory in which \n")
sys.stderr.write( " you want to generate an autoconf build system.\n")
sys.stderr.write( "\n" )
sys.stderr.write( "It's important that when you run this script, your working\n")
sys.stderr.write( "directory be one above example_dir.\n")
sys.stderr.write( "**********************************************************\n")
sys.exit(1)
topdir = string.rstrip( sys.argv[1], '/' )
chombodir = os.path.dirname(sys.argv[0]) + '/../../..'
print "chombodir=", chombodir
walk_extra = WalkExtra( indent='', all_lib_names=[] )
walk1Deep( topdir, describeDirStructure, walk_extra )
# Now that you know the full set of LT libraries, fix up the Makefile.am's
# of the executables, so they all link to all those LT libraries.
os.path.walk( topdir, fixupMakefileAms, walk_extra.all_lib_names )
# Generate the configure.in, starting from a template and just adding
# AC_CONFIG_FILES and AC_OUTPUT lines.
makeConfigureIn( topdir )
assert( os.path.exists( chombodir + '/lib/mk/autoconf/Automake.rules' ) )
os.system( "sed 's/makefile\.anag//' " + chombodir + "/lib/mk/autoconf/Automake.rules > "
+ topdir + "/Automake.rules" )
os.system( "cp " + chombodir + "/lib/mk/autoconf/bootstrap-generated-example.sh "
+ topdir + "/bootstrap\n" )
os.system( "cp " + chombodir + "/lib/mk/autoconf/zap-generated-example.sh "
+ topdir + "/zap\n" )
# Now that you've created configure.in and all the Makefile.am's, run
# GNU autotools on them.
# os.system( 'cd ' + topdir + '; ./bootstrap' )
| 41.46747 | 113 | 0.615085 | """
This script attempts to generate a complete autoconf/automake/libtool-based
build system for any project that wants to use the Chombo library. To use it,
type
python make_example_metamakefiles.py <rootdir>
where rootdir is the root directory of your project.
Your project can contain C++ sources (with suffixes .cpp and .H) and
Chombo-Fortran sources (suffix .ChF and .fh). The simplest sort of
project is a single directory with a single .cpp file. But this
script can handle more complicated projects as well -- projects
distributed over several directories, like many of those under
Chombo's example directory. Moreover, the build system we generate is
good for "make dist" as well as for out-of-source builds.
What we can't handle, though, are cases of:
1. Multiple use of the same filename, e.g. dirA/Foo.H and dirB/Foo.H.
2. Circular dependencies among directories, e.g. files in directory A
#include'ing files in directory B and vice versa.
3. Multiple definitions of a function, class, etc.
4. Pretty much anything weirder than 1-3 above.
5. Many cases where this script guesses wrong about the order in which
directories need to be built -- see below.
If a directory contains a file or files with a "main(int argc...)" file,
we try to build an executable for each such file, and all other .cpp and .ChF
files in the same directory are compiled, and their .o files linked into
each executable.
If a directory does not contain any files with "main(int argc...)", we
roll all the .cpp and .ChF files in that directory into a library. All
such libraries are linked into the executables mentioned in the paragraph
just above.
Coming now to #5 above: if we guess wrong about the order in which
directories need to be built, you will need to go into some of the
Makefile.am files and change the line that begins with "SUBDIRS". And
then run the bootstrap script, followed by configure.
You will know the order is wrong if compilation fails because one of
your header files could not be found. This will happen if a file in
directory A #include's a header from directory B -- counting on an
appropriate -I flag rather than specifying the relative path in the
#include directive -- and SUBDIRS lists directory A ahead of directory
B.
Another way SUBDIRS order will cause a build to fail is if the library
directories (the ones without main() functions) are not all built
before the directories that do have main() functions. Once again, you
can fix this by changing SUBDIRS in the appropriate Makefile.am, and then
rerun bootstrap and configure.
A more intelligent program than this one would minimize such
subdirectory-order problems; someday perhaps this program will be like
that. In the meantime, there are a few rules of thumb you can follow,
in the naming of your project's directories, that will increase the
chance of this script giving you a working build system:
1. Have just one library directory (i.e. a directory with no main()
functions), call it "src", and put it right under the root of
your tree.
2. If you must have more than one library directory, call the others
src<something> (e.g. srcBob, srcEdna) and make sure they don't
#include one another's headers (but they can #include headers from
the src directory).
"""
import sys
import os
import re
import glob
import string
class WalkExtra:
def __init__(self, indent='', all_lib_names=[] ):
self.lib_dirs = []
self.main_dirs = []
self.indent = indent
self.all_lib_names = all_lib_names
def walk1Deep( dirname, visit_func, extra_stuff ):
apply( visit_func, ( extra_stuff, dirname, os.listdir(dirname) ) )
def hasMainFunc( filename ):
"""
Returns True if file contains something that looks like a main() or
main( int argc, char argv ) function.
"""
main_regex = re.compile('.*main(.*argc.*argv).*')
f = open(filename)
for line in f.readlines():
if main_regex.search( line ):
return True
return False
def makeMakefileAm( dirname, subdirs, main_cpps, lib_cpps, nonmain_nonlib_cpps,
lib_ChFs, depth ):
makefile_am = open( dirname + '/Makefile.am', 'w' )
# Directories names "src" or "src<something>" are good bets for
# example-library code and should therefore be built first.
if len(subdirs) > 0:
def mysort(a,b):
if a == 'src':
return -1
elif b == 'src':
return 1
elif a[:3] == 'src':
return -1
elif b[:3] == 'src':
return 1
return cmp(a,b)
ordered_subdirs = subdirs[:]
uniqer = {} # To remove duplicates...
for s in ordered_subdirs : uniqer[s] = 0
ordered_subdirs = uniqer.keys()
ordered_subdirs.sort(mysort)
makefile_am.write( 'SUBDIRS = ' + string.join( ordered_subdirs ) + '\n' )
if len(lib_ChFs) + len(lib_cpps) + len(main_cpps) > 0:
makefile_am.write( 'include ' + '../'*depth + 'Automake.rules\n' )
if len(lib_ChFs) > 0:
makefile_am.write( 'nodist_fort_HEADERS = ' )
for f in lib_ChFs:
bfname = f[:f.index('.ChF')]
makefile_am.write( '\\\n ' + bfname + '_F.H' )
makefile_am.write( '\n' )
makefile_am.write( 'fortdir = $(pkgincludedir)\n' )
makefile_am.write( 'GENERATED_FORTRAN = ' )
for f in lib_ChFs:
bfname = f[:f.index('.ChF')]
makefile_am.write( '\\\n ' + bfname + '.f' )
makefile_am.write( '\n' )
makefile_am.write( 'EXTRA_DIST += *.ChF\n' )
if len(main_cpps) > 0:
makefile_am.write( 'bin_PROGRAMS = ' )
for f in main_cpps:
bfname = f[:f.index('.cpp')]
makefile_am.write( ' ' + bfname )
makefile_am.write('\n')
for f in main_cpps:
bfname = f[:f.index('.cpp')]
makefile_am.write( bfname + '_SOURCES = ' )
if len(lib_ChFs) > 0:
makefile_am.write( '$(GENERATED_FORTRAN) ' )
makefile_am.write( bfname + '.cpp' )
for g in nonmain_nonlib_cpps:
bgname = g[:g.index('.cpp')]
makefile_am.write( ' ' + bgname + '.cpp' )
makefile_am.write( '\n' )
makefile_am.write( 'AM_LDFLAGS += -L$(pkglibdir) \n' )
if len(lib_cpps) > 0:
normalized_dirname = makeLTLibName( dirname )
makefile_am.write( normalized_dirname + '_LTLIBRARIES = '
+ 'lib' + normalized_dirname + '.la\n' )
makefile_am.write( 'nodist_lib' + normalized_dirname + '_la_SOURCES = ' )
if len(lib_ChFs) > 0:
makefile_am.write( '$(GENERATED_FORTRAN)' )
for f in lib_cpps:
makefile_am.write( '\\\n ' + f )
makefile_am.write( '\n' )
makefile_am.write( normalized_dirname + 'dir = $(pkglibdir)\n' )
makefile_am.write( 'EXTRA_DIST += *.cpp\n' )
if (len(lib_cpps) > 0) and (len(main_cpps) > 0):
sys.stderr.write( "Warning: directory " + dirname + " contains both "
+ "cpp files with main() and cpp files without main()\n" )
has_H = False
if glob.glob( dirname + '/*.H' ) != []:
has_H = True
makefile_am.write( 'headers_HEADERS = $(srcdir)/*.H\n' )
makefile_am.write( 'headersdir = $(pkgincludedir)\n' )
if glob.glob( dirname + '/*.fh' ) != []:
if has_H:
makefile_am.write( 'headers_HEADERS += $(srcdir)/*.fh\n' )
else:
makefile_am.write( 'headers_HEADERS = $(srcdir)/*.fh\n' )
makefile_am.write( 'headersdir = $(pkgincludedir)\n' )
def makeLTLibName( dirname ):
return string.lstrip( dirname.replace( '/', '_' ), '._' )
def describeDirStructure( extra, dirname, files ):
"""
Walk the tree under arg rootdir and classify the directories as to
whether they are source (used for building a library) or exec (contain
a cpp file with main() function in it). A directory that has both is
not considered a library directory; the source files that don't have
a main() are simply considered binSOURCE's for the files that do.
"""
bname = os.path.basename( dirname )
if bname == 'CVS':
return
subdirs = filter( lambda f: os.path.isdir(dirname+'/'+f), files )
has_cpp = bool(filter( lambda f: f.find('.cpp') != -1, files ))
has_ChF = bool(filter( lambda f: f.find('.ChF') != -1, files ))
has_lib_src=False
has_lib_cpp=False
has_main_cpp=False
main_cpps = []
nonmain_nonlib_cpps = [] # cpp's in dir that has a main() cpp.
lib_cpps = []
lib_ChFs = []
if has_cpp:
for cpp in glob.glob( dirname + '/*.cpp' ):
if hasMainFunc( cpp ):
has_main_cpp = True
main_cpps.append( os.path.basename(cpp) )
for cpp in glob.glob( dirname + '/*.cpp' ):
if not hasMainFunc( cpp ):
if has_main_cpp:
nonmain_nonlib_cpps.append( os.path.basename(cpp) )
else:
has_lib_cpp = True
lib_cpps.append( os.path.basename(cpp) )
if has_lib_cpp:
extra.all_lib_names.append( makeLTLibName( dirname ) )
if has_ChF:
for chf in glob.glob( dirname + '/*.ChF' ):
lib_ChFs.append( os.path.basename( chf ) )
if has_main_cpp:
extra.main_dirs.append( bname )
if has_lib_cpp or has_ChF:
has_lib_src = True
extra.lib_dirs.append( bname )
subdir_extra = WalkExtra( indent = extra.indent + ' ',
all_lib_names = extra.all_lib_names )
for sd in subdirs:
if sd == 'CVS': continue
#print extra.indent, "Entering subdir", dirname+'/'+sd, "..."
walk1Deep( dirname+'/'+sd, describeDirStructure, subdir_extra )
if bool(subdir_extra.lib_dirs) or bool(subdir_extra.main_dirs):
#print extra.indent, "lib subdirs of", bname, ":", subdir_extra.lib_dirs
#print extra.indent, "main subdirs of", bname, ":", subdir_extra.main_dirs
if not bname in extra.lib_dirs: # Source code >1 level down
extra.lib_dirs.append( bname )
if( len(main_cpps) + len(lib_cpps) + len(nonmain_nonlib_cpps) + len(lib_ChFs)
+ len(subdir_extra.lib_dirs) + len(subdir_extra.main_dirs) > 0):
makeMakefileAm( dirname, subdir_extra.lib_dirs + subdir_extra.main_dirs,
main_cpps, lib_cpps, nonmain_nonlib_cpps, lib_ChFs,
depth=len(extra.indent)/2 )
def fixupMakefileAms( all_lib_names, dirname, files ):
"""
Every time you find a Makefile.am, look for "bin_PROGRAMS". If it's there,
then for every listed program, make an LDADD line that lists arg
all_lib_names.
"""
if 'Makefile.am' in files:
ro = open( dirname + '/Makefile.am' )
def grep( str, line ):
return line.find(str) != -1
bin_progs_line = filter( lambda line: grep('bin_PROGRAMS',line), ro.readlines() )
if len(bin_progs_line) > 0:
m = open( dirname + '/Makefile.am', 'a' )
for bin in bin_progs_line[0].split()[2:]:
m.write( bin + '_LDADD = -L$(CHOMBO_INSTALLDIR)/lib/Chombo $(LIBSRC_LIBS) ' )
for lib in all_lib_names:
m.write( '-l' + lib + ' ' )
m.write( '-lg2c' )
m.write( '\n' )
def findAllMakefileAms( topdir ):
def visit_func( extra, dirname, files ):
if 'Makefile.am' in files:
extra['dirs_with_makefileam'
].append( string.lstrip(
dirname[len(extra['topdir'])+1:] + '/Makefile', '/'))
extra = {'dirs_with_makefileam':[], 'topdir':topdir}
os.path.walk( topdir, visit_func, extra )
return extra['dirs_with_makefileam']
def makeConfigureIn( topdir ):
"""
Generate the configure.in, starting from a template and just adding
AC_CONFIG_FILES and AC_OUTPUT lines.
It's assumed this script is in the same directory as configure.pre; they should
both be at the root of the Chombo source tree, and in share/Chombo in any
Chombo install tree.
"""
confpre = os.path.dirname(sys.argv[0]) + '/../../../configure.pre'
if confpre[0] == '/':
confpre = confpre[1:]
infile = open( confpre )
outfile = open( topdir + '/configure.in', 'w' )
in_lines = infile.readlines()
curline = 0
while True: # No do...while in Python.
outfile.write( in_lines[curline] )
curline += 1
if in_lines[curline-1][0:7] == "AC_INIT":
break
#
# Users of the configure.in we're writing will need to tell it where their
# Chombo install tree is. That's where Make.defs and transformation scripts are.
#
outfile.write( 'if test x$CHOMBO_INSTALLDIR = x ; then\n' )
outfile.write( ' echo " no CHOMBO_INSTALLDIR" \n' )
outfile.write( 'fi\n' )
outfile.write( 'if test ! -f $CHOMBO_INSTALLDIR/share/Chombo/Make.defs ; then\n' )
outfile.write( ' echo " no $CHOMBO_INSTALLDIR/share/Chombo/Make.defs" \n' )
outfile.write( 'fi\n' )
outfile.write( 'if test x$CHOMBO_INSTALLDIR = x -o ! -f $CHOMBO_INSTALLDIR/share/Chombo/Make.defs ; then\n' )
outfile.write( ' echo "*****************************************" \n' )
outfile.write( ' echo "Error: you must pass configure a definition of CHOMBO_INSTALLDIR"\n' )
outfile.write( ' echo "and it must indicate the root of your Chombo install tree" \n' )
outfile.write( ' echo "e.g. \'./configure CHOMBO_INSTALLDIR=\$HOME/Chombo/install\'"\n' )
outfile.write( ' echo ""\n' )
outfile.write( ' echo "If you think this message is in error, check that under your"\n')
outfile.write( ' echo "CHOMBO_INSTALLDIR you have a file called Make.defs. If you"\n' )
outfile.write( ' echo "do not (but, say, you do seem to have some of the libraries"\n')
outfile.write( ' echo "and header files), then it is possible your Chombo build just"\n' )
outfile.write( ' echo "did not run to completion."\n' )
outfile.write( ' echo "*****************************************" \n' )
outfile.write( ' exit 1 \n' )
outfile.write( 'fi\n' )
#
# Substitute something for the project name.
#
while in_lines[curline][0:16] != 'AM_INIT_AUTOMAKE':
outfile.write( in_lines[curline] )
curline += 1
outfile.write( 'AM_INIT_AUTOMAKE('+os.path.basename(topdir)+', 0.1.0 )\n' )
curline += 1
#
# Throw away configure.pre lines that control what is and isn't in a
# "small build".
#
for line in in_lines[curline:]:
if line[0:11] == "#SMALLBUILD" or line[0:12] == "#!SMALLBUILD" :
continue
outfile.write( line )
#
# Write out the paths to the Makefiles we want generated.
#
outfile.write( 'AC_CONFIG_FILES(\n' )
makefile_ams = findAllMakefileAms( topdir )
for m in makefile_ams:
outfile.write( ' ' + m + '\n' )
outfile.write( ')\n' )
outfile.write( 'AC_OUTPUT\n' )
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.stderr.write( "**********************************************************\n")
sys.stderr.write( "Usage: python make_example_metamakefiles.py example_dir \n")
sys.stderr.write( " where example_dir is the name of the directory in which \n")
sys.stderr.write( " you want to generate an autoconf build system.\n")
sys.stderr.write( "\n" )
sys.stderr.write( "It's important that when you run this script, your working\n")
sys.stderr.write( "directory be one above example_dir.\n")
sys.stderr.write( "**********************************************************\n")
sys.exit(1)
topdir = string.rstrip( sys.argv[1], '/' )
chombodir = os.path.dirname(sys.argv[0]) + '/../../..'
print "chombodir=", chombodir
walk_extra = WalkExtra( indent='', all_lib_names=[] )
walk1Deep( topdir, describeDirStructure, walk_extra )
# Now that you know the full set of LT libraries, fix up the Makefile.am's
# of the executables, so they all link to all those LT libraries.
os.path.walk( topdir, fixupMakefileAms, walk_extra.all_lib_names )
# Generate the configure.in, starting from a template and just adding
# AC_CONFIG_FILES and AC_OUTPUT lines.
makeConfigureIn( topdir )
assert( os.path.exists( chombodir + '/lib/mk/autoconf/Automake.rules' ) )
os.system( "sed 's/makefile\.anag//' " + chombodir + "/lib/mk/autoconf/Automake.rules > "
+ topdir + "/Automake.rules" )
os.system( "cp " + chombodir + "/lib/mk/autoconf/bootstrap-generated-example.sh "
+ topdir + "/bootstrap\n" )
os.system( "cp " + chombodir + "/lib/mk/autoconf/zap-generated-example.sh "
+ topdir + "/zap\n" )
# Now that you've created configure.in and all the Makefile.am's, run
# GNU autotools on them.
# os.system( 'cd ' + topdir + '; ./bootstrap' )
| 4,395 | -5 | 171 |
a08c38d6c8319f07b9477a4539b2eeedf4490472 | 1,375 | py | Python | radioepg/migrations/0023_auto_20210505_2116.py | mervij/radiodns | 01543cf1e4de8de335af0301616e089c35fc67f8 | [
"Apache-2.0"
] | null | null | null | radioepg/migrations/0023_auto_20210505_2116.py | mervij/radiodns | 01543cf1e4de8de335af0301616e089c35fc67f8 | [
"Apache-2.0"
] | 8 | 2021-05-17T10:54:28.000Z | 2021-06-08T12:02:37.000Z | radioepg/migrations/0023_auto_20210505_2116.py | mervij/radiodns | 01543cf1e4de8de335af0301616e089c35fc67f8 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.1 on 2021-05-05 18:16
from django.db import migrations, models
| 31.25 | 91 | 0.579636 | # Generated by Django 3.2.1 on 2021-05-05 18:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('radioepg', '0022_service_logo'),
]
operations = [
migrations.AddField(
model_name='service',
name='logo112',
field=models.ImageField(editable=False, null=True, upload_to='resized_logos/'),
),
migrations.AddField(
model_name='service',
name='logo128',
field=models.ImageField(editable=False, null=True, upload_to='resized_logos/'),
),
migrations.AddField(
model_name='service',
name='logo32',
field=models.ImageField(editable=False, null=True, upload_to='resized_logos/'),
),
migrations.AddField(
model_name='service',
name='logo320',
field=models.ImageField(editable=False, null=True, upload_to='resized_logos/'),
),
migrations.AddField(
model_name='service',
name='logo600',
field=models.ImageField(editable=False, null=True, upload_to='resized_logos/'),
),
migrations.AlterField(
model_name='service',
name='serviceIdentifier',
field=models.CharField(max_length=16, unique=True),
),
]
| 0 | 1,261 | 23 |
ee1c8f7bb7be58d86f5522c7f5df1b0d12eaa0db | 1,329 | py | Python | test/testbase/dummy.py | noralsydmp/icetea | b486cdc8e0d2211e118f1f8211aa4d284ca02422 | [
"Apache-2.0"
] | 6 | 2018-08-10T17:11:10.000Z | 2020-04-29T07:05:36.000Z | test/testbase/dummy.py | noralsydmp/icetea | b486cdc8e0d2211e118f1f8211aa4d284ca02422 | [
"Apache-2.0"
] | 58 | 2018-08-13T08:36:08.000Z | 2021-07-07T08:32:52.000Z | test/testbase/dummy.py | noralsydmp/icetea | b486cdc8e0d2211e118f1f8211aa4d284ca02422 | [
"Apache-2.0"
] | 7 | 2018-08-10T12:53:18.000Z | 2021-11-08T05:15:42.000Z | """
Copyright 2017 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from icetea_lib.bench import Bench
from icetea_lib.bench import TestStepError
# pylint: disable=missing-docstring
| 32.414634 | 72 | 0.54778 | """
Copyright 2017 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from icetea_lib.bench import Bench
from icetea_lib.bench import TestStepError
# pylint: disable=missing-docstring
class Testcase(Bench):
def __init__(self):
Bench.__init__(self,
name="dummy",
title="dummy",
status="unknown",
type="functional",
purpose="dummy",
requirements={
"duts": {
'*': {
"count": 0
}
}
}
)
def case(self):
if "--fail" in self.unknown:
raise TestStepError(str(self.unknown))
| 577 | 1 | 76 |
4bead50296bd25e856c7c5c341f7f4ccd6db5412 | 6,281 | py | Python | ficapi/fictoken.py | riceball-k/ficapi | 98387d98252f4e438a7c136b59dbefe1425718f0 | [
"Apache-2.0"
] | null | null | null | ficapi/fictoken.py | riceball-k/ficapi | 98387d98252f4e438a7c136b59dbefe1425718f0 | [
"Apache-2.0"
] | null | null | null | ficapi/fictoken.py | riceball-k/ficapi | 98387d98252f4e438a7c136b59dbefe1425718f0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 riceball-k
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Token Management Module for FicAPI
Manage authentication tokens for the FIC API access.
The authentication token can be read from the token file or
retrieved from the Response object after accessing keystone.
"""
from __future__ import annotations
import json
from copy import copy, deepcopy
from datetime import datetime
from os import PathLike
from pathlib import Path
import requests
from . import mycipher
TOKENID = 'X-Subject-Token'
EXPIRES = 'expires_at'
class FicToken:
"""Tokenを保持するクラス"""
def __init__(self,
token: PathLike | str | dict | None = None,
*,
password: str | None = None
):
"""トークンの作成
Args:
token (PathLike | str | dict | None):
- PathLike | str: ファイル名
- dict: トークンデータ
password (str | None): パスワード
Raises:
TypeError: 引数の型が不正
"""
if not isinstance(password, (str, type(None))):
raise TypeError(f'invalid arg type {type(password)}, '
f'must be {str} or None.')
self._cipher = mycipher.MyCipher(password)
self._token = {
TOKENID: 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
EXPIRES: '2001-01-01T00:00:00.000000Z'
}
if token is None:
pass
elif isinstance(token, (PathLike, str)):
self.read(token)
elif isinstance(token, dict):
self._token = copy(token)
else:
raise TypeError(f'invalid arg type {type(token)}, '
f'must be {PathLike}, {str} or {dict}.')
self._check_token()
def __str__(self) -> str:
"""トークンIDを返す"""
return self.id
def __repr__(self) -> str:
"""トークンIDを返す(token_id=xxxxx)"""
return repr(f'token_id={self.id}')
def __bool__(self) -> bool:
"""トークンの有効期限判定
Returns:
bool: 有効期限内ならTrue、有効期限切れならFalse
"""
return self.is_valid
def _check_token(self) -> None:
"""トークンに必須要素('X-Subject-Token'と'expires_at')
があるかチェックする
Raises:
KeyError: 必須要素がない
"""
if not {TOKENID, EXPIRES} <= set(self._token):
raise KeyError(f'"{TOKENID}" or "{EXPIRES}" is not found in token')
@property
def id(self) -> str:
"""トークンIDを返す
Returns:
str: トークンID
"""
return self._token[TOKENID]
@property
def expire_time(self) -> datetime:
"""トークンの有効期限を返す
Returns:
datetime: 有効期限(タイムゾーン有り)
"""
return datetime.fromisoformat(
self._token[EXPIRES].replace('Z', '+00:00')
)
@property
def is_valid(self) -> bool:
"""トークンの有効期限判定
Returns:
bool: 有効期限内ならTrue、有効期限切れならFalse
"""
return self.expire_time > datetime.now().astimezone()
def read(self, file: PathLike | str, password: str | None = None) -> None:
"""ファイル(JSON形式)からトークンを読み込む
トークンIDは復号化する
Args:
file (PathLike | str): ファイル名
password (str | None): パスワード
Raises:
TypeError: 引数の型が不正
KeyError: 必須要素がない
"""
if not isinstance(file, (PathLike, str)):
raise TypeError(f'invalid arg type {type(file)}, '
f'must be {PathLike} or {str}.')
if isinstance(password, str):
cipher = mycipher.MyCipher(password)
elif password is None:
cipher = self._cipher
else:
raise TypeError(f'password must be {str}.')
self._token = json.loads(Path(file).read_text(encoding='utf-8'))
self._check_token()
self._token[TOKENID] = cipher.decrypt(self._token[TOKENID])
self._cipher = cipher
def write(self, file: PathLike | str, password: str | None = None) -> None:
"""ファイルへトークンを書き込む(JSON形式)
トークンIDは暗号化する
Args:
file (PathLike | str): ファイル名
password (str | None): パスワード
Raises:
TypeError: 引数の型が不正
"""
if not isinstance(file, (PathLike, str)):
raise TypeError(f'invalid arg type {type(file)}, '
f'must be {PathLike} or {str}.')
if isinstance(password, str):
cipher = mycipher.MyCipher(password)
elif password is None:
cipher = self._cipher
else:
raise TypeError(f'password must be {str}.')
token = deepcopy(self._token)
token[TOKENID] = cipher.encrypt(token[TOKENID])
Path(file).write_text(json.dumps(token, indent=4), encoding='utf-8')
self._cipher = cipher
def update(self, response: requests.Response) -> None:
"""レスポンスオブジェクトからトークンを読み込む
以下の必須情報がない場合には例外が発生する
- headerには 'X-Subject-Token' があること
- bodyには 'expires_at' があること
Args:
response (requests.Response): レスポンスオブジェクト
Raises:
TypeError: 引数の型が不正
ValueError: レスポンスオブジェクトに必須情報がない
"""
if not isinstance(response, requests.Response):
raise TypeError(f'invalid arg type {type(response)}, '
f'must be {requests.Response}.')
body = response.json()
if TOKENID not in response.headers:
raise ValueError(f'"{TOKENID}" is not found in response header.')
if EXPIRES not in body['token']:
raise ValueError(f'"{EXPIRES}" is not found in response body.')
# トークンの置き換え
self._token = dict(response.headers)
self._token[EXPIRES] = body['token'][EXPIRES]
| 29.767773 | 79 | 0.574431 | # Copyright 2021 riceball-k
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Token Management Module for FicAPI
Manage authentication tokens for the FIC API access.
The authentication token can be read from the token file or
retrieved from the Response object after accessing keystone.
"""
from __future__ import annotations
import json
from copy import copy, deepcopy
from datetime import datetime
from os import PathLike
from pathlib import Path
import requests
from . import mycipher
TOKENID = 'X-Subject-Token'
EXPIRES = 'expires_at'
class FicToken:
"""Tokenを保持するクラス"""
def __init__(self,
token: PathLike | str | dict | None = None,
*,
password: str | None = None
):
"""トークンの作成
Args:
token (PathLike | str | dict | None):
- PathLike | str: ファイル名
- dict: トークンデータ
password (str | None): パスワード
Raises:
TypeError: 引数の型が不正
"""
if not isinstance(password, (str, type(None))):
raise TypeError(f'invalid arg type {type(password)}, '
f'must be {str} or None.')
self._cipher = mycipher.MyCipher(password)
self._token = {
TOKENID: 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
EXPIRES: '2001-01-01T00:00:00.000000Z'
}
if token is None:
pass
elif isinstance(token, (PathLike, str)):
self.read(token)
elif isinstance(token, dict):
self._token = copy(token)
else:
raise TypeError(f'invalid arg type {type(token)}, '
f'must be {PathLike}, {str} or {dict}.')
self._check_token()
def __str__(self) -> str:
"""トークンIDを返す"""
return self.id
def __repr__(self) -> str:
"""トークンIDを返す(token_id=xxxxx)"""
return repr(f'token_id={self.id}')
def __bool__(self) -> bool:
"""トークンの有効期限判定
Returns:
bool: 有効期限内ならTrue、有効期限切れならFalse
"""
return self.is_valid
def _check_token(self) -> None:
"""トークンに必須要素('X-Subject-Token'と'expires_at')
があるかチェックする
Raises:
KeyError: 必須要素がない
"""
if not {TOKENID, EXPIRES} <= set(self._token):
raise KeyError(f'"{TOKENID}" or "{EXPIRES}" is not found in token')
@property
def id(self) -> str:
"""トークンIDを返す
Returns:
str: トークンID
"""
return self._token[TOKENID]
@property
def expire_time(self) -> datetime:
"""トークンの有効期限を返す
Returns:
datetime: 有効期限(タイムゾーン有り)
"""
return datetime.fromisoformat(
self._token[EXPIRES].replace('Z', '+00:00')
)
@property
def is_valid(self) -> bool:
"""トークンの有効期限判定
Returns:
bool: 有効期限内ならTrue、有効期限切れならFalse
"""
return self.expire_time > datetime.now().astimezone()
def read(self, file: PathLike | str, password: str | None = None) -> None:
"""ファイル(JSON形式)からトークンを読み込む
トークンIDは復号化する
Args:
file (PathLike | str): ファイル名
password (str | None): パスワード
Raises:
TypeError: 引数の型が不正
KeyError: 必須要素がない
"""
if not isinstance(file, (PathLike, str)):
raise TypeError(f'invalid arg type {type(file)}, '
f'must be {PathLike} or {str}.')
if isinstance(password, str):
cipher = mycipher.MyCipher(password)
elif password is None:
cipher = self._cipher
else:
raise TypeError(f'password must be {str}.')
self._token = json.loads(Path(file).read_text(encoding='utf-8'))
self._check_token()
self._token[TOKENID] = cipher.decrypt(self._token[TOKENID])
self._cipher = cipher
def write(self, file: PathLike | str, password: str | None = None) -> None:
"""ファイルへトークンを書き込む(JSON形式)
トークンIDは暗号化する
Args:
file (PathLike | str): ファイル名
password (str | None): パスワード
Raises:
TypeError: 引数の型が不正
"""
if not isinstance(file, (PathLike, str)):
raise TypeError(f'invalid arg type {type(file)}, '
f'must be {PathLike} or {str}.')
if isinstance(password, str):
cipher = mycipher.MyCipher(password)
elif password is None:
cipher = self._cipher
else:
raise TypeError(f'password must be {str}.')
token = deepcopy(self._token)
token[TOKENID] = cipher.encrypt(token[TOKENID])
Path(file).write_text(json.dumps(token, indent=4), encoding='utf-8')
self._cipher = cipher
def update(self, response: requests.Response) -> None:
"""レスポンスオブジェクトからトークンを読み込む
以下の必須情報がない場合には例外が発生する
- headerには 'X-Subject-Token' があること
- bodyには 'expires_at' があること
Args:
response (requests.Response): レスポンスオブジェクト
Raises:
TypeError: 引数の型が不正
ValueError: レスポンスオブジェクトに必須情報がない
"""
if not isinstance(response, requests.Response):
raise TypeError(f'invalid arg type {type(response)}, '
f'must be {requests.Response}.')
body = response.json()
if TOKENID not in response.headers:
raise ValueError(f'"{TOKENID}" is not found in response header.')
if EXPIRES not in body['token']:
raise ValueError(f'"{EXPIRES}" is not found in response body.')
# トークンの置き換え
self._token = dict(response.headers)
self._token[EXPIRES] = body['token'][EXPIRES]
| 0 | 0 | 0 |
c3a4c4d5fa877feaf2babdef850a2c1ecda90814 | 4,769 | py | Python | WebKit/Tools/Scripts/webkitpy/common/system/executive_mock.py | JavaScriptTesting/LJS | 9818dbdb421036569fff93124ac2385d45d01c3a | [
"Apache-2.0"
] | 1 | 2019-06-18T06:52:54.000Z | 2019-06-18T06:52:54.000Z | WebKit/Tools/Scripts/webkitpy/common/system/executive_mock.py | JavaScriptTesting/LJS | 9818dbdb421036569fff93124ac2385d45d01c3a | [
"Apache-2.0"
] | null | null | null | WebKit/Tools/Scripts/webkitpy/common/system/executive_mock.py | JavaScriptTesting/LJS | 9818dbdb421036569fff93124ac2385d45d01c3a | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from webkitpy.common.system.deprecated_logging import log
from webkitpy.common.system.executive import ScriptError
# FIXME: This should be unified with MockExecutive2
| 39.413223 | 104 | 0.651499 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from webkitpy.common.system.deprecated_logging import log
from webkitpy.common.system.executive import ScriptError
# FIXME: This should be unified with MockExecutive2
class MockExecutive(object):
def __init__(self, should_log=False, should_throw=False, should_throw_when_run=None):
self._should_log = should_log
self._should_throw = should_throw
self._should_throw_when_run = should_throw_when_run or set()
# FIXME: Once executive wraps os.getpid() we can just use a static pid for "this" process.
self._running_pids = [os.getpid()]
def check_running_pid(self, pid):
return pid in self._running_pids
def run_and_throw_if_fail(self, args, quiet=False, cwd=None, env=None):
if self._should_log:
env_string = ""
if env:
env_string = ", env=%s" % env
log("MOCK run_and_throw_if_fail: %s, cwd=%s%s" % (args, cwd, env_string))
if self._should_throw_when_run.intersection(args):
raise ScriptError("Exception for %s" % args)
return "MOCK output of child process"
def run_command(self,
args,
cwd=None,
input=None,
error_handler=None,
return_exit_code=False,
return_stderr=True,
decode_output=False,
env=None):
assert(isinstance(args, list) or isinstance(args, tuple))
if self._should_log:
env_string = ""
if env:
env_string = ", env=%s" % env
log("MOCK run_command: %s, cwd=%s%s" % (args, cwd, env_string))
output = "MOCK output of child process"
if self._should_throw:
raise ScriptError("MOCK ScriptError", output=output)
return output
def cpu_count(self):
return 2
class MockExecutive2(object):
def __init__(self, output='', exit_code=0, exception=None,
run_command_fn=None, stderr=''):
self._output = output
self._stderr = stderr
self._exit_code = exit_code
self._exception = exception
self._run_command_fn = run_command_fn
def cpu_count(self):
return 2
def kill_all(self, process_name):
pass
def kill_process(self, pid):
pass
def run_command(self,
args,
cwd=None,
input=None,
error_handler=None,
return_exit_code=False,
return_stderr=True,
decode_output=False,
env=None):
assert(isinstance(args, list) or isinstance(args, tuple))
if self._exception:
raise self._exception
if self._run_command_fn:
return self._run_command_fn(args)
if return_exit_code:
return self._exit_code
if self._exit_code and error_handler:
script_error = ScriptError(script_args=args, exit_code=self._exit_code, output=self._output)
error_handler(script_error)
if return_stderr:
return self._output + self._stderr
return self._output
| 2,733 | 15 | 313 |
fac72f5de4eca19b50e505af1e0929ed2f6b7d44 | 250 | py | Python | p15.py | daicang/Euler | c00114d588351b6f2c637937558a35738d56345b | [
"MIT"
] | null | null | null | p15.py | daicang/Euler | c00114d588351b6f2c637937558a35738d56345b | [
"MIT"
] | null | null | null | p15.py | daicang/Euler | c00114d588351b6f2c637937558a35738d56345b | [
"MIT"
] | null | null | null | # Lattice path through 20 * 20 grids
print solve(20, 20)
| 16.666667 | 43 | 0.452 | # Lattice path through 20 * 20 grids
def solve(x0, y0):
x, y = x0 + 1, y0 + 1
l = [[1] * x] * y
for i in range(1, x):
for j in range(1, y):
l[i][j] = l[i-1][j] + l[i][j-1]
return l[x0][y0]
print solve(20, 20)
| 167 | 0 | 23 |
6701dd775ed2c2e99d08413a997a7eec1946f37d | 1,103 | py | Python | src/arbiterd/common/nova.py | valleedelisle/arbiterd | 18d6babbc3eab9eac6f35fd0babf1658cab4863e | [
"Apache-2.0"
] | null | null | null | src/arbiterd/common/nova.py | valleedelisle/arbiterd | 18d6babbc3eab9eac6f35fd0babf1658cab4863e | [
"Apache-2.0"
] | 6 | 2021-12-01T13:18:21.000Z | 2022-01-31T23:50:24.000Z | src/arbiterd/common/nova.py | valleedelisle/arbiterd | 18d6babbc3eab9eac6f35fd0babf1658cab4863e | [
"Apache-2.0"
] | 1 | 2022-01-21T18:45:03.000Z | 2022-01-21T18:45:03.000Z | # -*- coding: utf-8 -*-
# Copyright 2021 - 2021, Sean Mooney and the arbiterd contributors
# SPDX-License-Identifier: Apache-2.0
import configparser
import typing as ty
from arbiterd.common import cpu
| 26.902439 | 71 | 0.692656 | # -*- coding: utf-8 -*-
# Copyright 2021 - 2021, Sean Mooney and the arbiterd contributors
# SPDX-License-Identifier: Apache-2.0
import configparser
import typing as ty
from arbiterd.common import cpu
def parse_nova_conf(nova_conf: str) -> configparser.ConfigParser:
config = configparser.ConfigParser(interpolation=None)
config.read(nova_conf)
return config
def get_string(
conf: configparser.ConfigParser, section, option, default=None,
strip=True
) -> str:
data = conf.get(section, option, fallback=default)
if data is not None and strip:
data = data.strip('"').strip('\'')
return data
def get_dedicated_cpus(nova_conf: str) -> ty.Set[int]:
nova = parse_nova_conf(nova_conf)
data = get_string(nova, 'compute', 'cpu_dedicated_set')
if data is None:
return set()
return cpu.parse_cpu_spec(data)
def get_shared_cpus(nova_conf: str) -> ty.Set[int]:
nova = parse_nova_conf(nova_conf)
data = get_string(nova, 'compute', 'cpu_shared_set')
if data is None:
return set()
return cpu.parse_cpu_spec(data)
| 805 | 0 | 92 |
92d497e92371584ebd1df6bf0aa6c2bb39ad029b | 9,266 | py | Python | retrograde/modules/gra_afch/__init__.py | Software-Knife-and-Tool/retrograde | 1c9be80eb42e3bf395feba2fd99b9a29bef15925 | [
"MIT"
] | null | null | null | retrograde/modules/gra_afch/__init__.py | Software-Knife-and-Tool/retrograde | 1c9be80eb42e3bf395feba2fd99b9a29bef15925 | [
"MIT"
] | 44 | 2020-11-22T19:56:59.000Z | 2021-06-15T22:42:01.000Z | retrograde/modules/gra_afch/__init__.py | Software-Knife-and-Tool/retrograde | 1c9be80eb42e3bf395feba2fd99b9a29bef15925 | [
"MIT"
] | null | null | null | ##########
##
## SPDX-License-Identifier: MIT
##
## Copyright (c) 2017-2022 James M. Putnam <putnamjm.design@gmail.com>
##
##########
##########
##
## gra-afch controller
##
###########
"""Manage GRA-AFCH NCS31X hardware
See module ncs31x for display/clock interface.
Classes:
GraAfch
Functions:
dump(object, file)
dumps(object) -> string
load(file) -> object
loads(string) -> object
buttons()
default_rotor()
display_string(digits)
exec_(op)
gra_afch()
run_rotor(rotor_def)
update_backlight(color)
Misc variables:
VERSION
_conf_dict
_dots
_lock
_rotor
_tube_mask
"""
import json
import time
import wiringpi
from time import localtime, strftime
from datetime import datetime
from threading import Thread, Lock, Timer
from .ncs31x import Ncs31x
class GraAfch:
"""run the rotor thread
"""
VERSION = '0.0.3'
_DEBOUNCE_DELAY = 150
_TOTAL_DELAY = 17
_conf_dict = None
_rotor = None
_lock = None
_dots = None
_ncs31x = None
_tube_mask = [255 for _ in range(8)]
_toggle = None
# modules
_retro = None
_event = None
# def string_to_color(str_):
# def ctoi_(nib):#
# nval = 0
#
# if nib >= '0' & nib <= '9':
# nval = nib - '0'
# elif nib >= 'a' & nib <= 'f':
# nval = nib - 'a' + 10;
# elif (nib >= 'A' & nib <= 'F'):
# nval = nib - 'A' + 10
# else:
# nval = -1
# return nval
#
# def channel_(msn, lsn):
# m = ctoi(msn);
# l = ctoi(lsn);
#
# return (m < 0 | l < 0) if -1 else (m << 4) + l
#
# r = channel(str[0], str[1])
# g = channel(str[2], str[3])
# b = channel(str[4], str[5])
#
# return [r, g, b];
def update_backlight(self, color):
"""change the backlight color
"""
self._ncs31x.backlight(
[scale_(color[0]),
scale_(color[1]),
scale_(color[2])])
def display_string(self, digits):
"""stuff the tubes from decimal string
"""
buffer = [0 for _ in range(8)]
left = tubes_(digits, Ncs31x.LEFT_REPR_START)
left = dots_(left)
fmt_(left, buffer, Ncs31x.LEFT_BUFFER_START, 0)
right = tubes_(digits, Ncs31x.RIGHT_REPR_START)
right = dots_(right)
fmt_(right, buffer, Ncs31x.RIGHT_BUFFER_START, 4)
self._ncs31x.display(buffer)
def buttons(self):
"""button events
"""
self._ncs31x.init_pin(Ncs31x.UP_BUTTON_PIN)
self._ncs31x.init_pin(Ncs31x.DOWN_BUTTON_PIN)
self._ncs31x.init_pin(Ncs31x.MODE_BUTTON_PIN)
wiringpi.wiringPiISR(Ncs31x.MODE_BUTTON_PIN, wiringpi.INT_EDGE_RISING,
debounce_mode)
wiringpi.wiringPiISR(Ncs31x.UP_BUTTON_PIN, wiringpi.INT_EDGE_RISING,
debounce_up)
wiringpi.wiringPiISR(Ncs31x.DOWN_BUTTON_PIN, wiringpi.INT_EDGE_RISING,
debounce_down)
def exec_(self, op):
"""gra-afch operations
"""
step = op['exec']
if not self._toggle:
self._ncs31x.blank(not self._toggle)
else:
self._retro.switch_in(
[('delay', lambda: wiringpi.delay(int(step['delay']))),
('blank', lambda: self._ncs31x.blank(step['blank'])),
('back', lambda: self.update_backlight(step['back'])),
('dots', lambda: dots_),
('date-time', lambda: self.display_string(
strftime(step['date-time'], self._ncs31x.read_rtc()))),
('display', lambda: self.display_string(step['display'])),
('sync', lambda: self._ncs31x.write_rtc(localtime())),
('mask', mask_)], step)
def _run_rotor(self, rotor_def):
"""run the rotor thread
"""
if self._rotor:
self._rotor._exit = True
self._rotor.join()
self._rotor = Thread(target=rotor_proc, args=(rotor_def, ))
self._rotor.start()
def __init__(self, retro_):
"""initialize the gra-afch module
register with the event module
read the config file
crank up the default rotor
"""
def event_proc():
"""grab one of our events off the queue
if it's an exec, do it.
if it's an event, go look it up in our
event config and send whatever it maps
to back to the queue.
"""
while True:
event_ = self._event.find_event('gra-afch')['gra-afch']
type_ = list(event_)[0]
if type_ == 'exec':
self.exec_(event_)
elif type_ == 'event':
arg_ = event_['event']
v, _ = self._retro.switch([
('mode-button', lambda: print('button mode')),
('up-button', lambda: print('button up')),
('down-button', lambda: print('button down')),
('toggle', toggle_),
('timer',
lambda: Timer(event_['timer'] / 1000, lambda: self.
_event.send_event('timer')).start())
], arg_)
if not v:
for ev in retro_.events('gra-afch'):
if arg_ == list(ev)[0]:
self._event.send_event(ev[arg_])
self._retro = retro_
self._event = retro_.event
self._event.register('gra-afch', event_proc)
self._conf_dict = []
with open(retro_.path(__file__, 'conf.json'), 'r') as file:
self._conf_dict = json.load(file)
self._toggle = True
# does ncs31x need the configuration dictionary?
self._ncs31x = Ncs31x(self._conf_dict)
self.buttons()
self._run_rotor(retro_.find_rotor('default')['default'])
| 29.509554 | 79 | 0.514246 | ##########
##
## SPDX-License-Identifier: MIT
##
## Copyright (c) 2017-2022 James M. Putnam <putnamjm.design@gmail.com>
##
##########
##########
##
## gra-afch controller
##
###########
"""Manage GRA-AFCH NCS31X hardware
See module ncs31x for display/clock interface.
Classes:
GraAfch
Functions:
dump(object, file)
dumps(object) -> string
load(file) -> object
loads(string) -> object
buttons()
default_rotor()
display_string(digits)
exec_(op)
gra_afch()
run_rotor(rotor_def)
update_backlight(color)
Misc variables:
VERSION
_conf_dict
_dots
_lock
_rotor
_tube_mask
"""
import json
import time
import wiringpi
from time import localtime, strftime
from datetime import datetime
from threading import Thread, Lock, Timer
from .ncs31x import Ncs31x
class GraAfch:
"""run the rotor thread
"""
VERSION = '0.0.3'
_DEBOUNCE_DELAY = 150
_TOTAL_DELAY = 17
_conf_dict = None
_rotor = None
_lock = None
_dots = None
_ncs31x = None
_tube_mask = [255 for _ in range(8)]
_toggle = None
# modules
_retro = None
_event = None
# def string_to_color(str_):
# def ctoi_(nib):#
# nval = 0
#
# if nib >= '0' & nib <= '9':
# nval = nib - '0'
# elif nib >= 'a' & nib <= 'f':
# nval = nib - 'a' + 10;
# elif (nib >= 'A' & nib <= 'F'):
# nval = nib - 'A' + 10
# else:
# nval = -1
# return nval
#
# def channel_(msn, lsn):
# m = ctoi(msn);
# l = ctoi(lsn);
#
# return (m < 0 | l < 0) if -1 else (m << 4) + l
#
# r = channel(str[0], str[1])
# g = channel(str[2], str[3])
# b = channel(str[4], str[5])
#
# return [r, g, b];
def update_backlight(self, color):
"""change the backlight color
"""
def scale_(nval):
return int(nval * (100 / 255))
self._ncs31x.backlight(
[scale_(color[0]),
scale_(color[1]),
scale_(color[2])])
def display_string(self, digits):
"""stuff the tubes from decimal string
"""
def tubes_(str_, start):
tube_map_ = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
def num_(ch):
return 0 if ch == ' ' else int(ch)
bits = (tube_map_[num_(str_[start])]) << 20
bits |= (tube_map_[num_(str_[start - 1])]) << 10
bits |= (tube_map_[num_(str_[start - 2])])
return bits
def dots_(bits):
if self._dots:
bits |= Ncs31x.LOWER_DOTS_MASK
bits |= Ncs31x.UPPER_DOTS_MASK
else:
bits &= ~Ncs31x.LOWER_DOTS_MASK
bits &= ~Ncs31x.UPPER_DOTS_MASK
return bits
def fmt_(nval, buffer, start, off):
buffer[start] = (nval >> 24 & 0xff) & self._tube_mask[off]
buffer[start +
1] = ((nval >> 16) & 0xff) & self._tube_mask[off + 1]
buffer[start + 2] = ((nval >> 8) & 0xff) & self._tube_mask[off + 2]
buffer[start + 3] = (nval & 0xff) & self._tube_mask[off + 3]
return buffer
buffer = [0 for _ in range(8)]
left = tubes_(digits, Ncs31x.LEFT_REPR_START)
left = dots_(left)
fmt_(left, buffer, Ncs31x.LEFT_BUFFER_START, 0)
right = tubes_(digits, Ncs31x.RIGHT_REPR_START)
right = dots_(right)
fmt_(right, buffer, Ncs31x.RIGHT_BUFFER_START, 4)
self._ncs31x.display(buffer)
def buttons(self):
"""button events
"""
def nope():
pass
def debounce_mode():
wiringpi.wiringPiISR(Ncs31x.MODE_BUTTON_PIN,
wiringpi.INT_EDGE_RISING, nope)
wiringpi.delay(self._DEBOUNCE_DELAY)
self._event.make_event('gra-afch', 'event', 'mode-button')
wiringpi.wiringPiISR(Ncs31x.MODE_BUTTON_PIN,
wiringpi.INT_EDGE_RISING, debounce_mode)
def debounce_up():
wiringpi.wiringPiISR(Ncs31x.UP_BUTTON_PIN,
wiringpi.INT_EDGE_RISING, nope)
wiringpi.delay(self._DEBOUNCE_DELAY)
self._event.make_event('gra-afch', 'event', 'up-button')
wiringpi.wiringPiISR(Ncs31x.UP_BUTTON_PIN,
wiringpi.INT_EDGE_RISING, debounce_up)
def debounce_down():
wiringpi.wiringPiISR(Ncs31x.DOWN_BUTTON_PIN,
wiringpi.INT_EDGE_RISING, nope)
wiringpi.delay(self._DEBOUNCE_DELAY)
self._event.make_event('gra-afch', 'event', 'down-button')
wiringpi.wiringPiISR(Ncs31x.DOWN_BUTTON_PIN,
wiringpi.INT_EDGE_RISING, debounce_down)
self._ncs31x.init_pin(Ncs31x.UP_BUTTON_PIN)
self._ncs31x.init_pin(Ncs31x.DOWN_BUTTON_PIN)
self._ncs31x.init_pin(Ncs31x.MODE_BUTTON_PIN)
wiringpi.wiringPiISR(Ncs31x.MODE_BUTTON_PIN, wiringpi.INT_EDGE_RISING,
debounce_mode)
wiringpi.wiringPiISR(Ncs31x.UP_BUTTON_PIN, wiringpi.INT_EDGE_RISING,
debounce_up)
wiringpi.wiringPiISR(Ncs31x.DOWN_BUTTON_PIN, wiringpi.INT_EDGE_RISING,
debounce_down)
def exec_(self, op):
"""gra-afch operations
"""
step = op['exec']
def mask_():
# bits 0 and 6 are indicator lamps
# rightmost number lamp is bit 1
mask_ = step['mask']
for i in range(8):
self._tube_mask[i] = 255 if mask_ & (2**i) else 0
def dots_():
self._dots = step['dots']
if not self._toggle:
self._ncs31x.blank(not self._toggle)
else:
self._retro.switch_in(
[('delay', lambda: wiringpi.delay(int(step['delay']))),
('blank', lambda: self._ncs31x.blank(step['blank'])),
('back', lambda: self.update_backlight(step['back'])),
('dots', lambda: dots_),
('date-time', lambda: self.display_string(
strftime(step['date-time'], self._ncs31x.read_rtc()))),
('display', lambda: self.display_string(step['display'])),
('sync', lambda: self._ncs31x.write_rtc(localtime())),
('mask', mask_)], step)
def _events(self):
if 'events' in self._conf_dict:
return self._conf_dict['events']
return None
def config(self):
return self._conf_dict
def _run_rotor(self, rotor_def):
"""run the rotor thread
"""
def rotor_proc(rotor):
self._dots = self._conf_dict['dots']
self._event.send_event(rotor)
if self._rotor:
self._rotor._exit = True
self._rotor.join()
self._rotor = Thread(target=rotor_proc, args=(rotor_def, ))
self._rotor.start()
def __init__(self, retro_):
"""initialize the gra-afch module
register with the event module
read the config file
crank up the default rotor
"""
def event_proc():
"""grab one of our events off the queue
if it's an exec, do it.
if it's an event, go look it up in our
event config and send whatever it maps
to back to the queue.
"""
while True:
event_ = self._event.find_event('gra-afch')['gra-afch']
type_ = list(event_)[0]
if type_ == 'exec':
self.exec_(event_)
elif type_ == 'event':
def toggle_():
self._toggle = not self._toggle
arg_ = event_['event']
v, _ = self._retro.switch([
('mode-button', lambda: print('button mode')),
('up-button', lambda: print('button up')),
('down-button', lambda: print('button down')),
('toggle', toggle_),
('timer',
lambda: Timer(event_['timer'] / 1000, lambda: self.
_event.send_event('timer')).start())
], arg_)
if not v:
for ev in retro_.events('gra-afch'):
if arg_ == list(ev)[0]:
self._event.send_event(ev[arg_])
self._retro = retro_
self._event = retro_.event
self._event.register('gra-afch', event_proc)
self._conf_dict = []
with open(retro_.path(__file__, 'conf.json'), 'r') as file:
self._conf_dict = json.load(file)
self._toggle = True
# does ncs31x need the configuration dictionary?
self._ncs31x = Ncs31x(self._conf_dict)
self.buttons()
self._run_rotor(retro_.find_rotor('default')['default'])
| 2,628 | 0 | 434 |
85f464a00257f88848ce98d9706db18bf15eb15d | 2,998 | py | Python | meissner/__main__.py | terrylove19/meissner | 14cedb73aa86172aac7af4031aa4670d26acc8ef | [
"MIT"
] | 1 | 2022-01-27T10:16:46.000Z | 2022-01-27T10:16:46.000Z | meissner/__main__.py | terrylove19/meissner | 14cedb73aa86172aac7af4031aa4670d26acc8ef | [
"MIT"
] | null | null | null | meissner/__main__.py | terrylove19/meissner | 14cedb73aa86172aac7af4031aa4670d26acc8ef | [
"MIT"
] | null | null | null | """
.@@#
(@&*%@@@/,%@@@# #&@@@@&. .@@# /&@@@@&* /&@@@@&* (@&*%@@@( *%@@@@&/ .@@&*&@.
(@@&((&@@@(/&@@, #@@#/(&@@. .@@# #@@(///(, .@@%////, (@@&(/#@@# #@@&//#@@( .@@@@@%.
(@@. /@@* ,@@/ .&@@%%%&@@* .@@# (@@&&%#* .@@@&%#/ (@@. .&@% &@@&%%%@@% .@@@
(@@. /@@, ,@@/ .&@%,,,,,, .@@# ./#%&@@&. ./(%&@@&. (@@. .&@% &@@/,,,,,. .@@@
(@@. /@@, ,@@/ #@@#////* .@@# ./////&@@. /////&@@. (@@. .&@% #@@&/////. .@@@
(@@. /@@, ,@@/ #&@@@@@% .@@# ,&@@@@@%. &@@@@@&. (@@. .&@% *%@@@@@&* .@@@
MIT License
Copyright (c) 2017 epsimatt (https://github.com/epsimatt/meissner)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from meissner.client import MeissnerClient
import logging
import meissner.config
log = logging.getLogger()
log.setLevel(logging.INFO)
log_handler = logging.StreamHandler()
# "%(asctime)s: [%(filename)s: %(levelname)s] > %(message)s (on line %(lineno)s)"
log_handler.setFormatter(
MeissnerFormatter(
'%(asctime)s: [%(filename)s: %(levelname)s] > %(message)s',
'%H:%M:%S'
)
)
log.addHandler(log_handler)
config_manager = meissner.config.ConfigManager()
token = config_manager.get('token')
prefix = config_manager.get('prefix')
if not (token and token.strip()):
log.critical("Invalid value for token")
raise SystemExit
meissner_client = MeissnerClient(prefix)
meissner_client.run(token, bot = False)
| 37.475 | 111 | 0.548699 | """
.@@#
(@&*%@@@/,%@@@# #&@@@@&. .@@# /&@@@@&* /&@@@@&* (@&*%@@@( *%@@@@&/ .@@&*&@.
(@@&((&@@@(/&@@, #@@#/(&@@. .@@# #@@(///(, .@@%////, (@@&(/#@@# #@@&//#@@( .@@@@@%.
(@@. /@@* ,@@/ .&@@%%%&@@* .@@# (@@&&%#* .@@@&%#/ (@@. .&@% &@@&%%%@@% .@@@
(@@. /@@, ,@@/ .&@%,,,,,, .@@# ./#%&@@&. ./(%&@@&. (@@. .&@% &@@/,,,,,. .@@@
(@@. /@@, ,@@/ #@@#////* .@@# ./////&@@. /////&@@. (@@. .&@% #@@&/////. .@@@
(@@. /@@, ,@@/ #&@@@@@% .@@# ,&@@@@@%. &@@@@@&. (@@. .&@% *%@@@@@&* .@@@
MIT License
Copyright (c) 2017 epsimatt (https://github.com/epsimatt/meissner)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from meissner.client import MeissnerClient
import logging
import meissner.config
class MeissnerFormatter(logging.Formatter):
@staticmethod
def capitalize_first_only(string: str) -> str:
if len(string) > 0:
return string[0].upper() + string[1:]
else:
return string
def format(self, record):
record.msg = self.capitalize_first_only(str(record.msg).strip())
return super().format(record)
log = logging.getLogger()
log.setLevel(logging.INFO)
log_handler = logging.StreamHandler()
# "%(asctime)s: [%(filename)s: %(levelname)s] > %(message)s (on line %(lineno)s)"
log_handler.setFormatter(
MeissnerFormatter(
'%(asctime)s: [%(filename)s: %(levelname)s] > %(message)s',
'%H:%M:%S'
)
)
log.addHandler(log_handler)
config_manager = meissner.config.ConfigManager()
token = config_manager.get('token')
prefix = config_manager.get('prefix')
if not (token and token.strip()):
log.critical("Invalid value for token")
raise SystemExit
meissner_client = MeissnerClient(prefix)
meissner_client.run(token, bot = False)
| 259 | 93 | 23 |
99161f4405050317dc8d31e92ba3ec2ce76eea68 | 2,235 | py | Python | certifiable/__init__.py | sys-git/certifiable | a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8 | [
"MIT"
] | null | null | null | certifiable/__init__.py | sys-git/certifiable | a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8 | [
"MIT"
] | 311 | 2017-09-14T22:34:21.000Z | 2022-03-27T18:30:17.000Z | certifiable/__init__.py | sys-git/certifiable | a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: latin-1 -*-
# ____ ____ ____ ______ ______ ____ ______ ____ __ ____
# /\ _`\ /\ _`\ /\ _`\ /\__ _/\__ _\/\ _`\ /\ _ \/\ _`\ /\ \ /\ _`\
# \ \ \/\_\ \ \L\_\ \ \L\ \/_/\ \\/_/\ \/\ \ \L\_\ \ \L\ \ \ \L\ \ \ \ \ \ \L\_\
# \ \ \/_/\ \ _\L\ \ , / \ \ \ \ \ \ \ \ _\/\ \ __ \ \ _ <\ \ \ _\ \ _\L
# \ \ \L\ \ \ \L\ \ \ \\ \ \ \ \ \_\ \_\ \ \/ \ \ \/\ \ \ \L\ \ \ \L\ \ \ \L\ \
# \ \____/\ \____/\ \_\ \_\ \ \_\ /\_____\ \_\ \ \_\ \_\ \____/\ \____/\ \____/
# \/___/ \/___/ \/_/\/ / \/_/ \/_____/\/_/ \/_/\/_/\/___/ \/___/ \/___/
#
#
"""Top-level package for certifiable."""
from .__version__ import __author__, __email__, __keywords__, __short_description__, __version__
from .complex import certify_dict, certify_dict_schema, certify_email, certify_iterable_schema, \
certify_list, certify_set, certify_tuple
from .core import certify_bool, certify_bytes, certify_date, certify_enum, certify_enum_value, \
certify_int, certify_number, certify_object, certify_string, certify_text, certify_time, \
certify_timestamp
from .errors import CertifierError, CertifierParamError, CertifierTypeError, CertifierValueError
from .operators import (
ALL, AND, ANY, NAND, XOR, certify_all, certify_any, certify_none, certify_only_one,
)
from .utils import certify_required, disable, enable, enable_from_env, is_enabled, make_certifier
import certifiable.cli
__all__ = [
'certify_dict',
'certify_dict_schema',
'certify_iterable_schema',
'certify_list',
'certify_set',
'certify_tuple',
'certify_bool',
'certify_bytes',
'certify_date',
'certify_enum',
'certify_enum_value',
'certify_int',
'certify_number',
'certify_object',
'certify_text',
'certify_string',
'certify_timestamp',
'certify_time',
'CertifierError',
'CertifierTypeError',
'CertifierValueError',
'CertifierParamError',
'AND',
'ALL',
'ANY',
'NAND',
'XOR',
'certify_all',
'certify_any',
'certify_none',
'certify_only_one',
'make_certifier',
'certify_required',
'is_enabled',
'enable',
'disable',
'enable_from_env',
]
| 33.358209 | 97 | 0.587472 | #!/usr/bin/env python
# -*- coding: latin-1 -*-
# ____ ____ ____ ______ ______ ____ ______ ____ __ ____
# /\ _`\ /\ _`\ /\ _`\ /\__ _/\__ _\/\ _`\ /\ _ \/\ _`\ /\ \ /\ _`\
# \ \ \/\_\ \ \L\_\ \ \L\ \/_/\ \\/_/\ \/\ \ \L\_\ \ \L\ \ \ \L\ \ \ \ \ \ \L\_\
# \ \ \/_/\ \ _\L\ \ , / \ \ \ \ \ \ \ \ _\/\ \ __ \ \ _ <\ \ \ _\ \ _\L
# \ \ \L\ \ \ \L\ \ \ \\ \ \ \ \ \_\ \_\ \ \/ \ \ \/\ \ \ \L\ \ \ \L\ \ \ \L\ \
# \ \____/\ \____/\ \_\ \_\ \ \_\ /\_____\ \_\ \ \_\ \_\ \____/\ \____/\ \____/
# \/___/ \/___/ \/_/\/ / \/_/ \/_____/\/_/ \/_/\/_/\/___/ \/___/ \/___/
#
#
"""Top-level package for certifiable."""
from .__version__ import __author__, __email__, __keywords__, __short_description__, __version__
from .complex import certify_dict, certify_dict_schema, certify_email, certify_iterable_schema, \
certify_list, certify_set, certify_tuple
from .core import certify_bool, certify_bytes, certify_date, certify_enum, certify_enum_value, \
certify_int, certify_number, certify_object, certify_string, certify_text, certify_time, \
certify_timestamp
from .errors import CertifierError, CertifierParamError, CertifierTypeError, CertifierValueError
from .operators import (
ALL, AND, ANY, NAND, XOR, certify_all, certify_any, certify_none, certify_only_one,
)
from .utils import certify_required, disable, enable, enable_from_env, is_enabled, make_certifier
import certifiable.cli
__all__ = [
'certify_dict',
'certify_dict_schema',
'certify_iterable_schema',
'certify_list',
'certify_set',
'certify_tuple',
'certify_bool',
'certify_bytes',
'certify_date',
'certify_enum',
'certify_enum_value',
'certify_int',
'certify_number',
'certify_object',
'certify_text',
'certify_string',
'certify_timestamp',
'certify_time',
'CertifierError',
'CertifierTypeError',
'CertifierValueError',
'CertifierParamError',
'AND',
'ALL',
'ANY',
'NAND',
'XOR',
'certify_all',
'certify_any',
'certify_none',
'certify_only_one',
'make_certifier',
'certify_required',
'is_enabled',
'enable',
'disable',
'enable_from_env',
]
| 0 | 0 | 0 |
07bfae00eeb43316afb302e4aa036a13d9396e3c | 1,345 | py | Python | example/compat.py | mattkatz/dynaconf | 5f79f18ce031fcef7631b66b9eb0c311c6424d01 | [
"MIT"
] | null | null | null | example/compat.py | mattkatz/dynaconf | 5f79f18ce031fcef7631b66b9eb0c311c6424d01 | [
"MIT"
] | null | null | null | example/compat.py | mattkatz/dynaconf | 5f79f18ce031fcef7631b66b9eb0c311c6424d01 | [
"MIT"
] | null | null | null | from dynaconf import LazySettings
settings = LazySettings(
DYNACONF_NAMESPACE='FOO',
DYNACONF_SETTINGS_MODULE='foo.py',
PROJECT_ROOT='/tmp',
DYNACONF_SILENT_ERRORS=True,
DYNACONF_ALWAYS_FRESH_VARS=['BAR']
)
assert settings.ENV_FOR_DYNACONF == 'FOO'
assert settings.SETTINGS_MODULE_FOR_DYNACONF == 'foo.py'
assert settings.PROJECT_ROOT_FOR_DYNACONF == '/tmp'
assert settings.SILENT_ERRORS_FOR_DYNACONF is True
assert settings.FRESH_VARS_FOR_DYNACONF == ['BAR']
print(settings.ENV_FOR_DYNACONF)
print(settings.SETTINGS_MODULE_FOR_DYNACONF)
print(settings.PROJECT_ROOT_FOR_DYNACONF)
print(settings.SILENT_ERRORS_FOR_DYNACONF)
print(settings.FRESH_VARS_FOR_DYNACONF)
settings = LazySettings(
NAMESPACE_FOR_DYNACONF='FOO',
DYNACONF_SETTINGS_MODULE='foo.py',
PROJECT_ROOT='/tmp',
DYNACONF_SILENT_ERRORS=True,
DYNACONF_ALWAYS_FRESH_VARS=['BAR']
)
assert settings.ENV_FOR_DYNACONF == 'FOO'
assert settings.SETTINGS_MODULE_FOR_DYNACONF == 'foo.py'
assert settings.PROJECT_ROOT_FOR_DYNACONF == '/tmp'
assert settings.SILENT_ERRORS_FOR_DYNACONF is True
assert settings.FRESH_VARS_FOR_DYNACONF == ['BAR']
print(settings.ENV_FOR_DYNACONF)
print(settings.SETTINGS_MODULE_FOR_DYNACONF)
print(settings.PROJECT_ROOT_FOR_DYNACONF)
print(settings.SILENT_ERRORS_FOR_DYNACONF)
print(settings.FRESH_VARS_FOR_DYNACONF)
| 29.888889 | 56 | 0.81487 | from dynaconf import LazySettings
settings = LazySettings(
DYNACONF_NAMESPACE='FOO',
DYNACONF_SETTINGS_MODULE='foo.py',
PROJECT_ROOT='/tmp',
DYNACONF_SILENT_ERRORS=True,
DYNACONF_ALWAYS_FRESH_VARS=['BAR']
)
assert settings.ENV_FOR_DYNACONF == 'FOO'
assert settings.SETTINGS_MODULE_FOR_DYNACONF == 'foo.py'
assert settings.PROJECT_ROOT_FOR_DYNACONF == '/tmp'
assert settings.SILENT_ERRORS_FOR_DYNACONF is True
assert settings.FRESH_VARS_FOR_DYNACONF == ['BAR']
print(settings.ENV_FOR_DYNACONF)
print(settings.SETTINGS_MODULE_FOR_DYNACONF)
print(settings.PROJECT_ROOT_FOR_DYNACONF)
print(settings.SILENT_ERRORS_FOR_DYNACONF)
print(settings.FRESH_VARS_FOR_DYNACONF)
settings = LazySettings(
NAMESPACE_FOR_DYNACONF='FOO',
DYNACONF_SETTINGS_MODULE='foo.py',
PROJECT_ROOT='/tmp',
DYNACONF_SILENT_ERRORS=True,
DYNACONF_ALWAYS_FRESH_VARS=['BAR']
)
assert settings.ENV_FOR_DYNACONF == 'FOO'
assert settings.SETTINGS_MODULE_FOR_DYNACONF == 'foo.py'
assert settings.PROJECT_ROOT_FOR_DYNACONF == '/tmp'
assert settings.SILENT_ERRORS_FOR_DYNACONF is True
assert settings.FRESH_VARS_FOR_DYNACONF == ['BAR']
print(settings.ENV_FOR_DYNACONF)
print(settings.SETTINGS_MODULE_FOR_DYNACONF)
print(settings.PROJECT_ROOT_FOR_DYNACONF)
print(settings.SILENT_ERRORS_FOR_DYNACONF)
print(settings.FRESH_VARS_FOR_DYNACONF)
| 0 | 0 | 0 |
8688294bfc8d813dfe57824b275879de0265537b | 4,475 | py | Python | scripts/make_iso_input_file.py | marwahaha/MIST_codes | 116fc4f85f985c869e225eaae1e023251629dee8 | [
"MIT"
] | 17 | 2016-05-19T01:49:11.000Z | 2022-03-31T14:56:21.000Z | scripts/make_iso_input_file.py | arjanbik/MIST_codes | 116fc4f85f985c869e225eaae1e023251629dee8 | [
"MIT"
] | 3 | 2016-05-25T15:44:38.000Z | 2021-08-11T13:30:56.000Z | scripts/make_iso_input_file.py | arjanbik/MIST_codes | 116fc4f85f985c869e225eaae1e023251629dee8 | [
"MIT"
] | 25 | 2016-11-18T19:30:00.000Z | 2021-09-09T15:14:22.000Z | """
Generates the input file to the MIST isochrone code.
Use in either the eep or iso mode.
Args:
runname: the name of the grid
mode: determine if the the file is to make isochrones or eeps
Returns:
None
Example:
>>> make_iso_input_file('MIST_v0.1', 'eeps')
"""
import glob
import os
import numpy as np
| 40.681818 | 137 | 0.607151 | """
Generates the input file to the MIST isochrone code.
Use in either the eep or iso mode.
Args:
runname: the name of the grid
mode: determine if the the file is to make isochrones or eeps
Returns:
None
Example:
>>> make_iso_input_file('MIST_v0.1', 'eeps')
"""
import glob
import os
import numpy as np
def make_iso_input_file(runname, mode, basic, incomplete=[]):
#Convert MIST_vXX/feh_XXX_afe_XXX to MIST_vXX_feh_XXX_afe_XXX
runname_format = '_'.join(runname.split('/'))
#Name of the input file
inputfilename = "input."+runname_format
#Check if this input file exists already, and if so, remove it.
if os.path.isfile(os.path.join(os.environ['ISO_DIR'], inputfilename)):
print "REMOVE OLD ISO INPUT FILE....." + inputfilename
os.system("rm " + os.path.join(os.environ['ISO_DIR'], inputfilename))
#Define some paths
tracks_dir = os.path.join(os.path.join(os.environ['MIST_GRID_DIR'], runname), "tracks")
eeps_dir = os.path.join(os.path.join(os.environ['MIST_GRID_DIR'], runname), "eeps")
iso_dir = os.path.join(os.path.join(os.environ['MIST_GRID_DIR'], runname), "isochrones")
#Get the list of tracks
if mode == 'eeps':
tracks_list = sorted(glob.glob(tracks_dir+"/*.track"))
#Generate a list of final track names (i.e., as if low masses have been blended)
if mode == 'iso':
initial_tracks_list = glob.glob(eeps_dir+"/*M.track.eep")
tracks_list = sorted([x.split('.eep')[0] for x in initial_tracks_list])
#Generate a list of track names that are complete only
if mode == 'interp_eeps':
initial_tracks_list = glob.glob(eeps_dir+"/*M.track.eep")
for failed_eep in incomplete:
failed_eep_ind = np.where(np.array(initial_tracks_list) == failed_eep)[0][0]
initial_tracks_list.pop(failed_eep_ind)
tracks_list = sorted([x.split('.eep')[0] for x in initial_tracks_list])
max_good_mass = float(tracks_list[-1].split('/')[-1].split('M')[0])/100.0
min_good_mass = float(tracks_list[0].split('/')[-1].split('M')[0])/100.0
#Header and footer in the file
if basic == True:
mhc_file = "my_history_columns_basic.list"
iso_file = runname_format+"_basic.iso\n"
else:
mhc_file = "my_history_columns_full.list"
iso_file = runname_format+"_full.iso\n"
dirname = runname.split('/')[-1]
dirname_split = dirname.split('_')
feh = dirname_split[1]
afe = dirname_split[3]
vvcrit = dirname_split[4]
fehval = float(feh[1:])*1.0
if 'm' in feh:
fehval *= -1.0
afeval = float(afe[1:])*1.0
if 'm' in afe:
afeval *= -1.0
with open(os.path.join(os.path.join(os.environ['MIST_GRID_DIR'], runname),'input_XYZ')) as f:
Xval = float(f.readline())
Yval = float(f.readline())
Zval = float(f.readline())
fmt_abun_info = "{:>10.4f}".format(Yval)+"{:>12.5e}".format(Zval)+"{:>7.2f}".format(fehval)+\
"{:>12.2f}".format(afeval)+"{:>9}".format(vvcrit.split('vvcrit')[-1])+"\n"
header = ["#version string, max 8 characters\n", "1.0\n", "#initial Y, initial Z, [Fe/H], [alpha/Fe], v/vcrit\n",\
fmt_abun_info, "#data directories: 1) history files, 2) eeps, 3) isochrones\n", tracks_dir+"\n", eeps_dir+"\n", iso_dir+"\n", \
"# read history_columns\n", os.path.join(os.environ['ISO_DIR'], mhc_file)+"\n", "# specify tracks\n", str(len(tracks_list))+"\n"]
footer = ["#specify isochrones\n", iso_file, "min_max\n", "log10\n", "107\n", "5.0\n", "10.3\n", "single\n"]
#Write the file
print "**************************************************************************"
print "WRITE NEW ISO INPUT FILE..... "+os.environ['ISO_DIR']+"/"+inputfilename
print "**************************************************************************"
with open(inputfilename, "w") as newinputfile:
for headerline in header:
newinputfile.write(headerline)
for full_trackname in tracks_list:
trackname = full_trackname.split("/")[-1]
newinputfile.write(trackname+"\n")
for footerline in footer:
newinputfile.write(footerline)
os.system("mv " + inputfilename + " " + os.environ['ISO_DIR'])
#Used to check which masses can/can't be interpolated in mesa2fsps.py
if mode == 'interp_eeps':
return min_good_mass, max_good_mass
| 4,115 | 0 | 23 |
df05cf12de434b7a4cb11b97a3c9f0048e5c8171 | 2,752 | py | Python | iotawallet/gui/send.py | scottbelden/iotawallet | 4b495b4a94ea7ad2ca593515e9dda10ffe8bb476 | [
"MIT"
] | null | null | null | iotawallet/gui/send.py | scottbelden/iotawallet | 4b495b4a94ea7ad2ca593515e9dda10ffe8bb476 | [
"MIT"
] | null | null | null | iotawallet/gui/send.py | scottbelden/iotawallet | 4b495b4a94ea7ad2ca593515e9dda10ffe8bb476 | [
"MIT"
] | null | null | null | import wx
from ..wallet import Wallet
SPACER = (0, 0)
| 34.835443 | 80 | 0.590843 | import wx
from ..wallet import Wallet
SPACER = (0, 0)
class SendTab(wx.Panel): # type: ignore
def __init__(self,
parent: wx.Window,
wallet: Wallet) -> None:
super().__init__(parent)
self.wallet = wallet
self.create_send_tab()
def create_send_tab(self) -> None:
receive_text = wx.StaticText(self, label='Receiver Address: ')
self.receive_address_input = wx.TextCtrl(self)
amount_text = wx.StaticText(self, label='Iota: ')
self.amount_input = wx.TextCtrl(self)
send_button = wx.Button(self, label='Send')
input_vbox = wx.BoxSizer(wx.VERTICAL)
input_vbox.Add(receive_text, flag=wx.CENTER)
input_vbox.Add(self.receive_address_input, flag=wx.EXPAND)
input_vbox.Add(amount_text, flag=wx.CENTER)
input_vbox.Add(self.amount_input, flag=wx.CENTER)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(SPACER, proportion=1)
vbox.Add(input_vbox, proportion=1, flag=(wx.EXPAND | wx.ALL), border=10)
vbox.Add(send_button, proportion=1, flag=wx.EXPAND)
vbox.Add(SPACER, proportion=1)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(SPACER, proportion=1)
hbox.Add(vbox, proportion=1, flag=(wx.CENTER | wx.ALL))
hbox.Add(SPACER, proportion=1)
self.SetSizer(hbox)
self.Bind(wx.EVT_BUTTON, self.send_button_clicked, send_button)
def send_button_clicked(self,
event: wx.CommandEvent) -> None:
address = self.receive_address_input.GetLineText(0)
iota_amount = int(self.amount_input.GetLineText(0))
if iota_amount > self.wallet.balance:
self.show_insufficient_balance_dialog(iota_amount)
else:
self.show_confirm_send_dialog(address, iota_amount)
def show_confirm_send_dialog(self,
address: str,
iota_amount: int) -> None:
message = (f'You are about to send\n\n' +
f'{iota_amount} iota\n\n' +
f'to {address}')
dialog = wx.MessageDialog(
self,
message=message,
caption='Confirm Send',
style=(wx.YES_NO | wx.NO_DEFAULT),
)
response = dialog.ShowModal()
if response == wx.ID_YES:
self.wallet.send(address, iota_amount)
def show_insufficient_balance_dialog(self,
iota_amount: int) -> None:
dialog = wx.MessageDialog(
self,
message=f'You do not have enough funds to send {iota_amount} iota',
caption='Insufficient Balance',
)
dialog.ShowModal()
| 2,519 | 19 | 157 |
7a52120f4b6f320604b136fce855e1da180aabaa | 1,997 | py | Python | testsuite/tests/apicast/policy/test_default_credentials_policy.py | dlaso99/3scale-tests | b31a3b3596af6d632b393e383c0417ea56bd95ca | [
"Apache-2.0"
] | 5 | 2021-11-04T14:09:24.000Z | 2021-12-23T13:48:36.000Z | testsuite/tests/apicast/policy/test_default_credentials_policy.py | dlaso99/3scale-tests | b31a3b3596af6d632b393e383c0417ea56bd95ca | [
"Apache-2.0"
] | 41 | 2021-11-03T14:27:21.000Z | 2022-03-29T14:46:16.000Z | testsuite/tests/apicast/policy/test_default_credentials_policy.py | dlaso99/3scale-tests | b31a3b3596af6d632b393e383c0417ea56bd95ca | [
"Apache-2.0"
] | 12 | 2021-11-03T17:28:31.000Z | 2021-11-30T12:28:25.000Z | "Rewrite spec/functional_specs/policies/default_credentials_spec.rb"
import pytest
from threescale_api.resources import Service
from testsuite import rawobj
from testsuite.utils import blame
@pytest.fixture(scope="module")
def application(application, service):
"Application with default_credentials policy configured to use user_key"
service.proxy.list().policies.insert(0, rawobj.PolicyConfig("default_credentials", {
"auth_type": "user_key",
"user_key": application["user_key"]}))
return application
@pytest.fixture(scope="module")
def service_app_id_key(custom_service, service_proxy_settings, request):
"Another service using app_id/key auth configuration"
settings = {"name": blame(request, "CustSvc"), "backend_version": Service.AUTH_APP_ID_KEY}
return custom_service(settings, service_proxy_settings)
@pytest.fixture(scope="module")
def application_app_id_key(custom_application, custom_app_plan, service_app_id_key, request):
"Application with default_credentials policy configured to use app_id/key"
plan = custom_app_plan(rawobj.ApplicationPlan(blame(request, "CustAPlan")), service_app_id_key)
app = custom_application(rawobj.Application(blame(request, "CustApp"), plan))
service_app_id_key.proxy.list().policies.insert(0, rawobj.PolicyConfig("default_credentials", {
"auth_type": "app_id_and_app_key",
"app_id": app["application_id"],
"app_key": app.keys.list()["keys"][0]["key"]["value"]}))
return app
@pytest.fixture(params=["application", "application_app_id_key"], ids=["user_key", "app_id_key"])
def a_client(request, api_client):
"Helper to provide other fixtures as parameters"
app = request.getfixturevalue(request.param)
client = api_client(app)
# now disable builtin auth credentials
client.auth = None
return client
def test_default_credentials(a_client):
"test default_credentials behavior"
assert a_client.get("/get").status_code == 200
| 35.035088 | 99 | 0.751627 | "Rewrite spec/functional_specs/policies/default_credentials_spec.rb"
import pytest
from threescale_api.resources import Service
from testsuite import rawobj
from testsuite.utils import blame
@pytest.fixture(scope="module")
def application(application, service):
"Application with default_credentials policy configured to use user_key"
service.proxy.list().policies.insert(0, rawobj.PolicyConfig("default_credentials", {
"auth_type": "user_key",
"user_key": application["user_key"]}))
return application
@pytest.fixture(scope="module")
def service_app_id_key(custom_service, service_proxy_settings, request):
"Another service using app_id/key auth configuration"
settings = {"name": blame(request, "CustSvc"), "backend_version": Service.AUTH_APP_ID_KEY}
return custom_service(settings, service_proxy_settings)
@pytest.fixture(scope="module")
def application_app_id_key(custom_application, custom_app_plan, service_app_id_key, request):
"Application with default_credentials policy configured to use app_id/key"
plan = custom_app_plan(rawobj.ApplicationPlan(blame(request, "CustAPlan")), service_app_id_key)
app = custom_application(rawobj.Application(blame(request, "CustApp"), plan))
service_app_id_key.proxy.list().policies.insert(0, rawobj.PolicyConfig("default_credentials", {
"auth_type": "app_id_and_app_key",
"app_id": app["application_id"],
"app_key": app.keys.list()["keys"][0]["key"]["value"]}))
return app
@pytest.fixture(params=["application", "application_app_id_key"], ids=["user_key", "app_id_key"])
def a_client(request, api_client):
"Helper to provide other fixtures as parameters"
app = request.getfixturevalue(request.param)
client = api_client(app)
# now disable builtin auth credentials
client.auth = None
return client
def test_default_credentials(a_client):
"test default_credentials behavior"
assert a_client.get("/get").status_code == 200
| 0 | 0 | 0 |
b242ac360de6a7a7086d7b7218d722b4c0236654 | 1,798 | py | Python | packages/tractography/mristudio_to_swc.py | justi/m2g | 09e8b889889ee8d8fb08b9b6fcd726fb3d901644 | [
"Apache-2.0"
] | 12 | 2015-03-11T22:07:17.000Z | 2016-01-29T21:24:29.000Z | packages/tractography/mristudio_to_swc.py | youngmook/m2g | 09e8b889889ee8d8fb08b9b6fcd726fb3d901644 | [
"Apache-2.0"
] | 213 | 2015-01-30T16:02:57.000Z | 2016-01-29T21:45:02.000Z | packages/tractography/mristudio_to_swc.py | youngmook/m2g | 09e8b889889ee8d8fb08b9b6fcd726fb3d901644 | [
"Apache-2.0"
] | 5 | 2015-02-04T13:58:12.000Z | 2016-01-29T21:24:46.000Z | #!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# mristudio_to_swc.py
# Based on camino_to_mristudio.py (By Disa Mhembere)
# Created by Greg Kiar on 2015-06-29.
# Email: gkiar@jhu.edu
# Copyright (c) 2015. All rights reserved.
from struct import pack
class SWC(object):
"""
This class represents an SWC format file.
It writes out data in the format given a path which is
defined by the SWC skeleton
"""
def __init__(self, filename):
"""
@param filename: the output fn
"""
self.fhandle = open(filename, "wb")
self.point = 0
def write_path(self, path):
"""
Write the path to disk
@param path we want to write
"""
#import pdb; pdb.set_trace()
for i in range(0, len(path)):
self.point += 1
node = str(self.point) + " 7 " + str(path[i][0]) + " " + str(path[i][1]) + " " + str(path[i][2]) + " 1 "
if i == 0:
node += "-1\n"
else:
node += str(self.point-1) + "\n"
self.fhandle.write(node)
| 29 | 107 | 0.67297 | #!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# mristudio_to_swc.py
# Based on camino_to_mristudio.py (By Disa Mhembere)
# Created by Greg Kiar on 2015-06-29.
# Email: gkiar@jhu.edu
# Copyright (c) 2015. All rights reserved.
from struct import pack
class SWC(object):
"""
This class represents an SWC format file.
It writes out data in the format given a path which is
defined by the SWC skeleton
"""
def __init__(self, filename):
"""
@param filename: the output fn
"""
self.fhandle = open(filename, "wb")
self.point = 0
def write_header(self, head, orig):
fhead = "# Original Fiber File: "+orig+"\n# FiberFileTag: " + head[0] + "\n# Number of Fibers: " + \
str(head[1]) + "\n# Pipeline version: m2gv1.1.1\n"
self.fhandle.write(fhead)
def write_path(self, path):
"""
Write the path to disk
@param path we want to write
"""
#import pdb; pdb.set_trace()
for i in range(0, len(path)):
self.point += 1
node = str(self.point) + " 7 " + str(path[i][0]) + " " + str(path[i][1]) + " " + str(path[i][2]) + " 1 "
if i == 0:
node += "-1\n"
else:
node += str(self.point-1) + "\n"
self.fhandle.write(node)
def __del__ (self,):
self.fhandle.close()
| 222 | 0 | 48 |
88172972b739ccfbabb46955126f913724d499b3 | 251 | py | Python | lab/lab00.py | dani3lwinter/IML.HUJI | 46b5e001b92d7bac3b7efa2278d0236b69159895 | [
"MIT"
] | null | null | null | lab/lab00.py | dani3lwinter/IML.HUJI | 46b5e001b92d7bac3b7efa2278d0236b69159895 | [
"MIT"
] | null | null | null | lab/lab00.py | dani3lwinter/IML.HUJI | 46b5e001b92d7bac3b7efa2278d0236b69159895 | [
"MIT"
] | null | null | null | import sys
import numpy as np
sys.path.append("../")
from utils import *
print(find_closest([1, 24, 12, 13, 14], 10)) | 13.944444 | 44 | 0.649402 | import sys
import numpy as np
sys.path.append("../")
from utils import *
def create_cartesian_product(vec1, vec2):
pass
def find_closest(a, n):
a = np.array(a)
return np.min(np.abs(a-n))
print(find_closest([1, 24, 12, 13, 14], 10)) | 82 | 0 | 46 |
1044eaf84965eeda15c2a91692f1484268ad0443 | 1,078 | py | Python | src/ggrc_basic_permissions/migrations/versions/20150317135354_27432edbe6d4_add_unique_constraint_for_user_roles.py | Smotko/ggrc-core | b3abb58b24e7559960d71a94ba79c75539e7fe29 | [
"Apache-2.0"
] | null | null | null | src/ggrc_basic_permissions/migrations/versions/20150317135354_27432edbe6d4_add_unique_constraint_for_user_roles.py | Smotko/ggrc-core | b3abb58b24e7559960d71a94ba79c75539e7fe29 | [
"Apache-2.0"
] | 12 | 2015-01-08T14:50:19.000Z | 2017-11-29T19:37:53.000Z | src/ggrc_basic_permissions/migrations/versions/20150317135354_27432edbe6d4_add_unique_constraint_for_user_roles.py | mikecb/ggrc-core | 1cda560cb0920021416e07740c6cca1acba56268 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2015-01-08T13:25:09.000Z | 2015-01-08T13:25:09.000Z | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""add unique constraint for user roles
Revision ID: 27432edbe6d4
Revises: 3bb32fb65d47
Create Date: 2015-03-17 13:53:54.238347
"""
# revision identifiers, used by Alembic.
revision = '27432edbe6d4'
down_revision = '3bb32fb65d47'
from alembic import op
import sqlalchemy as sa
| 29.135135 | 218 | 0.746753 | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""add unique constraint for user roles
Revision ID: 27432edbe6d4
Revises: 3bb32fb65d47
Create Date: 2015-03-17 13:53:54.238347
"""
# revision identifiers, used by Alembic.
revision = '27432edbe6d4'
down_revision = '3bb32fb65d47'
from alembic import op
import sqlalchemy as sa
def upgrade():
conn = op.get_bind()
query = 'delete from user_roles where id in (select id from (select count(id) as count, max(id) as id, person_id, role_id, context_id from user_roles group by role_id, context_id, person_id having count > 1) as tmp)'
res = conn.execute(query)
while res.rowcount > 0:
res = conn.execute(query)
op.create_unique_constraint('unique_role_context_person', 'user_roles', ['role_id', 'context_id', 'person_id'])
def downgrade():
op.drop_constraint('unique_role_context_person', 'user_roles', 'unique')
| 508 | 0 | 46 |
bedbe0574b32d7f8277a0a2417b43d49adbec00b | 4,947 | py | Python | tests/test.py | emielv-stu/python-postcodes-io | 012412825afbb462f5e1e144bf93200794558576 | [
"MIT"
] | 16 | 2019-01-07T21:17:46.000Z | 2022-03-25T18:37:35.000Z | tests/test.py | thewamz/python-postcodes-io | a4e91c88b7dbf6ed63eceba2746227d1873c7c54 | [
"MIT"
] | 1 | 2019-11-12T08:45:59.000Z | 2019-11-12T08:45:59.000Z | tests/test.py | thewamz/python-postcodes-io | a4e91c88b7dbf6ed63eceba2746227d1873c7c54 | [
"MIT"
] | 4 | 2019-11-20T21:58:43.000Z | 2021-05-17T12:40:02.000Z | import unittest
import postcodes_io_api
import random
| 52.62766 | 120 | 0.681423 | import unittest
import postcodes_io_api
import random
class PostcodeIOTest(unittest.TestCase):
def setUp(self):
self.api_client = postcodes_io_api.Api()
self.latitude = 51.466324
self.longitude = -0.173606
self.VALID_OUTCODES = ['KT19', 'HA9', 'SW19', 'IV2', 'PH16']
self.VALID_POSTCODES = ['L40TH', 'SW151JF', 'SW195AG', 'KT185LQ', 'NW87JY', 'RG11LZ']
self.INVALID_POSTCODES = ['HA9997QP', 'SW19OSZ', 'HA899NX', 'RG101LZ', 'SW12EF', 'L500QD']
self.TERMINATED_POSTCODES = ['SW112ZW', 'HA89NX', 'IV23EH', 'BA12QT', 'OX13LA', 'SE109DB']
self.reverse_geocode_data = {
"geolocations": [{
"longitude": 0.629834723775309,
"latitude": 51.7923246977375
}, {
"longitude": -2.49690382054704,
"latitude": 53.5351312861402,
"radius": 1000,
"limit": 5
}]
}
def test_is_postcode_valid(self):
self.assertTrue(self.api_client.is_postcode_valid(random.choice(self.VALID_POSTCODES)))
self.assertFalse(self.api_client.is_postcode_valid(random.choice(self.INVALID_POSTCODES)))
self.assertFalse(self.api_client.is_postcode_valid(random.choice(self.TERMINATED_POSTCODES)))
def test_is_postcode_terminated(self):
self.assertFalse(self.api_client.is_postcode_terminated(random.choice(self.VALID_POSTCODES)))
self.assertFalse(self.api_client.is_postcode_terminated(random.choice(self.INVALID_POSTCODES)))
self.assertTrue(self.api_client.is_postcode_terminated(random.choice(self.TERMINATED_POSTCODES)))
def test_get_postcode(self):
valid_data = self.api_client.get_postcode(random.choice(self.VALID_POSTCODES))
self.assertEqual(200, valid_data.get('status', None), "Checking for valid postcode")
invalid_data = self.api_client.get_postcode(random.choice(self.INVALID_POSTCODES))
self.assertEqual(404, invalid_data.get('status', None), "Checking for invalid postcode")
terminated_data = self.api_client.get_postcode(random.choice(self.TERMINATED_POSTCODES))
self.assertEqual(404, terminated_data.get('status', None), "Checking for terminated postcode")
def test_get_nearest_postcodes_for_postcode(self):
data = self.api_client.get_nearest_postcodes_for_postcode(postcode=random.choice(self.VALID_POSTCODES), limit=2)
self.assertEqual(200, data.get('status', None), "Checking for valid postcode")
def test_get_nearest_postcodes_for_coordinates(self):
data = self.api_client.get_nearest_postcodes_for_coordinates(latitude=self.latitude, longitude=self.longitude,
limit=3)
self.assertEqual(200, data.get('status', None))
def test_get_bulk_postcodes(self):
bulk_postcodes = [random.choice(self.VALID_POSTCODES), random.choice(self.VALID_POSTCODES)]
data = self.api_client.get_bulk_postcodes(bulk_postcodes)
self.assertEqual(200, data.get('status', None), "Checking for valid postcode")
self.assertEqual(2, len(data.get('result', [])), "Checking for result count")
def test_get_bulk_reverse_geocode(self):
data = self.api_client.get_bulk_reverse_geocode(self.reverse_geocode_data)
self.assertEqual(200, data.get('status', None))
self.assertEqual(2, len(data.get('result', [])), "Checking for result count")
def test_get_random_postcode(self):
data = self.api_client.get_random_postcode()
self.assertEqual(200, data.get('status', None))
def test_get_autocomplete_postcode(self):
data = self.api_client.get_autocomplete_postcode(postcode=random.choice(self.VALID_OUTCODES), limit=3)
self.assertEqual(200, data.get('status', None))
self.assertEqual(3, len(data.get('result', [])), "Checking for result count")
def test_get_outcode(self):
data = self.api_client.get_outcode(random.choice(self.VALID_OUTCODES))
self.assertEqual(200, data.get('status', None), "Checking for valid outcode")
def test_get_nearest_outcodes_for_outcode(self):
data = self.api_client.get_nearest_outcodes_for_outcode(outcode=random.choice(self.VALID_OUTCODES))
self.assertEqual(200, data.get('status', None), "Checking for valid outcode")
def test_get_nearest_outcodes_for_coordinates(self):
data = self.api_client.get_nearest_outcodes_for_coordinates(latitude=self.latitude, longitude=self.longitude,
limit=3)
self.assertEqual(200, data.get('status', None))
self.assertEqual(3, len(data.get('result', [])), "Checking for result count")
def tearDown(self):
"""
: close api_client session to avoid 'ResourceWarning: unclosed <socket.socket'
"""
self.api_client._session.close()
| 4,318 | 550 | 23 |
2b7bc498ae4838f9d7d50e516a343f3a09283220 | 873 | py | Python | zedenv/__init__.py | benyanke/zedenv | ca91d05fe74357b786b1a013f18351658e2a4a51 | [
"BSD-3-Clause"
] | null | null | null | zedenv/__init__.py | benyanke/zedenv | ca91d05fe74357b786b1a013f18351658e2a4a51 | [
"BSD-3-Clause"
] | null | null | null | zedenv/__init__.py | benyanke/zedenv | ca91d05fe74357b786b1a013f18351658e2a4a51 | [
"BSD-3-Clause"
] | null | null | null | # __init__.py
import subprocess
__version__ = '0.2.0'
def vcs_release(version: str):
"""
If in git repo, get git version
"""
try:
git_hash = subprocess.check_output(
['git', 'rev-parse', '--short', 'HEAD'],
universal_newlines=True, stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
return version
return version + '+git.' + git_hash
def get_release_version(version: str):
"""
If on a tagged release, use tagged version, else append git hash
"""
has_tags = None
try:
has_tags = subprocess.check_output(
['git', 'tag', '-l', '--points-at', 'HEAD'],
universal_newlines=True, stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
pass
return version if has_tags and has_tags.startswith("v") else vcs_release(version)
| 24.942857 | 85 | 0.627721 | # __init__.py
import subprocess
__version__ = '0.2.0'
def vcs_release(version: str):
"""
If in git repo, get git version
"""
try:
git_hash = subprocess.check_output(
['git', 'rev-parse', '--short', 'HEAD'],
universal_newlines=True, stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
return version
return version + '+git.' + git_hash
def get_release_version(version: str):
"""
If on a tagged release, use tagged version, else append git hash
"""
has_tags = None
try:
has_tags = subprocess.check_output(
['git', 'tag', '-l', '--points-at', 'HEAD'],
universal_newlines=True, stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
pass
return version if has_tags and has_tags.startswith("v") else vcs_release(version)
| 0 | 0 | 0 |
d821c28e1486dd2deb72a873e7ce437b4a192669 | 3,533 | py | Python | flavio/physics/bdecays/bvll/amplitudes.py | Felicia56/flavio | ea735bd8febbb961d249eddf338a4960c1fbee69 | [
"MIT"
] | 61 | 2016-03-09T16:19:39.000Z | 2022-03-30T00:55:51.000Z | flavio/physics/bdecays/bvll/amplitudes.py | Felicia56/flavio | ea735bd8febbb961d249eddf338a4960c1fbee69 | [
"MIT"
] | 167 | 2016-03-15T15:25:57.000Z | 2022-02-27T22:19:22.000Z | flavio/physics/bdecays/bvll/amplitudes.py | Felicia56/flavio | ea735bd8febbb961d249eddf338a4960c1fbee69 | [
"MIT"
] | 57 | 2016-03-15T14:24:23.000Z | 2022-01-14T01:00:03.000Z | """Functions for constructing the helicity amplitudes"""
from flavio.physics.bdecays.common import lambda_K, beta_l
from math import sqrt, pi
from flavio.physics.bdecays.wilsoncoefficients import wctot_dict, get_wceff
from flavio.physics.running import running
from flavio.config import config
from flavio.physics.bdecays.common import lambda_K, beta_l, meson_quark, meson_ff
from flavio.physics.common import conjugate_par, conjugate_wc, add_dict
from flavio.physics.bdecays import matrixelements, angular
from flavio.physics import ckm
from flavio.physics.bdecays.bvll import qcdf
from flavio.classes import AuxiliaryQuantity
import warnings
# get spectator scattering contribution
# get subleading hadronic contribution at low q2
| 41.081395 | 121 | 0.680442 | """Functions for constructing the helicity amplitudes"""
from flavio.physics.bdecays.common import lambda_K, beta_l
from math import sqrt, pi
from flavio.physics.bdecays.wilsoncoefficients import wctot_dict, get_wceff
from flavio.physics.running import running
from flavio.config import config
from flavio.physics.bdecays.common import lambda_K, beta_l, meson_quark, meson_ff
from flavio.physics.common import conjugate_par, conjugate_wc, add_dict
from flavio.physics.bdecays import matrixelements, angular
from flavio.physics import ckm
from flavio.physics.bdecays.bvll import qcdf
from flavio.classes import AuxiliaryQuantity
import warnings
def prefactor(q2, par, B, V):
GF = par['GF']
scale = config['renormalization scale']['bvll']
alphaem = running.get_alpha(par, scale)['alpha_e']
di_dj = meson_quark[(B,V)]
xi_t = ckm.xi('t',di_dj)(par)
return 4*GF/sqrt(2)*xi_t*alphaem/(4*pi)
def get_ff(q2, par, B, V):
ff_name = meson_ff[(B,V)] + ' form factor'
return AuxiliaryQuantity[ff_name].prediction(par_dict=par, wc_obj=None, q2=q2)
def helicity_amps_ff(q2, ff, wc_obj, par_dict, B, V, lep, cp_conjugate):
par = par_dict.copy()
if cp_conjugate:
par = conjugate_par(par)
scale = config['renormalization scale']['bvll']
label = meson_quark[(B,V)] + lep + lep # e.g. bsmumu, bdtautau
wc = wctot_dict(wc_obj, label, scale, par)
if cp_conjugate:
wc = conjugate_wc(wc)
wc_eff = get_wceff(q2, wc, par, B, V, lep, scale)
ml = par['m_'+lep]
mB = par['m_'+B]
mV = par['m_'+V]
mb = running.get_mb(par, scale)
N = prefactor(q2, par, B, V)
h = angular.helicity_amps_v(q2, mB, mV, mb, 0, ml, ml, ff, wc_eff, N)
return h
# get spectator scattering contribution
def get_ss(q2, wc_obj, par_dict, B, V, cp_conjugate):
# this only needs to be done for low q2 - which doesn't exist for taus!
if q2 >= 8.9:
return {('0' ,'V'): 0, ('pl' ,'V'): 0, ('mi' ,'V'): 0, }
ss_name = B+'->'+V+'ll spectator scattering'
return AuxiliaryQuantity[ss_name].prediction(par_dict=par_dict, wc_obj=wc_obj, q2=q2, cp_conjugate=cp_conjugate)
# get subleading hadronic contribution at low q2
def get_subleading(q2, wc_obj, par_dict, B, V, cp_conjugate):
if q2 <= 9:
sub_name = B+'->'+V+ 'll subleading effects at low q2'
return AuxiliaryQuantity[sub_name].prediction(par_dict=par_dict, wc_obj=wc_obj, q2=q2, cp_conjugate=cp_conjugate)
elif q2 > 14:
sub_name = B+'->'+V+ 'll subleading effects at high q2'
return AuxiliaryQuantity[sub_name].prediction(par_dict=par_dict, wc_obj=wc_obj, q2=q2, cp_conjugate=cp_conjugate)
else:
return {}
def helicity_amps(q2, ff, wc_obj, par, B, V, lep):
if q2 >= 8.7 and q2 < 14:
warnings.warn("The predictions in the region of narrow charmonium resonances are not meaningful")
return add_dict((
helicity_amps_ff(q2, ff, wc_obj, par, B, V, lep, cp_conjugate=False),
get_ss(q2, wc_obj, par, B, V, cp_conjugate=False),
get_subleading(q2, wc_obj, par, B, V, cp_conjugate=False)
))
def helicity_amps_bar(q2, ff, wc_obj, par, B, V, lep):
if q2 >= 8.7 and q2 < 14:
warnings.warn("The predictions in the region of narrow charmonium resonances are not meaningful")
return add_dict((
helicity_amps_ff(q2, ff, wc_obj, par, B, V, lep, cp_conjugate=True),
get_ss(q2, wc_obj, par, B, V, cp_conjugate=True),
get_subleading(q2, wc_obj, par, B, V, cp_conjugate=True)
))
| 2,634 | 0 | 159 |
606b47acd58fd4dd17c55fc84eba614a90158647 | 84 | py | Python | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/unit_test_part/__init__.py | lmnotran/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 82 | 2016-06-29T17:24:43.000Z | 2021-04-16T06:49:17.000Z | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/unit_test_part/__init__.py | lmnotran/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 6 | 2022-01-12T18:22:08.000Z | 2022-03-25T10:19:27.000Z | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/unit_test_part/__init__.py | lmnotran/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 56 | 2016-08-02T10:50:50.000Z | 2021-07-19T08:57:34.000Z | """
Test code used for unit tests lives here.
Do not put production code here.
"""
| 14 | 41 | 0.702381 | """
Test code used for unit tests lives here.
Do not put production code here.
"""
| 0 | 0 | 0 |
e7d7276e87a2a43529f72b73e6e9c977003f554f | 19,554 | py | Python | mingle/utilities/phoenix_utils.py | jason-neal/companion_simulations | b5773e5539011d492b7128d0dd2778041ce50d52 | [
"MIT"
] | 1 | 2018-09-04T19:06:44.000Z | 2018-09-04T19:06:44.000Z | mingle/utilities/phoenix_utils.py | jason-neal/companion_simulations | b5773e5539011d492b7128d0dd2778041ce50d52 | [
"MIT"
] | 85 | 2017-03-25T22:37:02.000Z | 2022-03-01T16:49:14.000Z | mingle/utilities/phoenix_utils.py | jason-neal/companion_simulations | b5773e5539011d492b7128d0dd2778041ce50d52 | [
"MIT"
] | 1 | 2017-08-18T10:56:39.000Z | 2017-08-18T10:56:39.000Z | """Phoenix Utilities.
Some functions to deal with phoenix models
i.e. searching for models with certain parameters
Jason Neal, January 2017
"""
import glob
import itertools
import logging
import os
import warnings
import Starfish
import numpy as np
from Starfish.grid_tools import HDF5Interface
from astropy.io import fits
from logutils import BraceMessage as __
from spectrum_overload import Spectrum
# from typing import List
import simulators
from mingle.utilities.norm import spec_local_norm
from mingle.utilities.param_file import parse_paramfile
from mingle.utilities.simulation_utilities import check_inputs
from typing import Optional, List, Union
from numpy import (float64, int64)
def load_starfish_spectrum(params, limits=None, hdr=False, normalize=False,
area_scale=False, flux_rescale=False, wav_scale=True):
"""Load spectrum from hdf5 grid file.
Parameters
----------
params: list
Model parameters [teff, logg, Z]
limits= List[float, float] default=None
Wavelength limits.
hdr: bool
Include the model header. Default False.
normalize: bool
Locally normalize the spectrum. Default False.
area_scale: bool
Multiply by stellar surface area pi*R**2 (towards Earth)
flux_rescale: bool
Convert from /cm to /nm by dividing by 1e7
wav_scale: bool
Multiply by wavelength to turn into [erg/s/cm^2]
Returns
-------
spec: Spectrum
The loaded spectrum as Spectrum object.
"""
my_hdf5 = HDF5Interface()
my_hdf5.wl = my_hdf5.wl / 10 # Turn into Nanometer
if hdr:
flux, myhdr = my_hdf5.load_flux_hdr(np.array(params))
spec = Spectrum(flux=flux, xaxis=my_hdf5.wl, header=myhdr)
else:
flux = my_hdf5.load_flux(np.array(params))
spec = Spectrum(flux=flux, xaxis=my_hdf5.wl)
if flux_rescale:
spec = spec * 1e-7 # convert flux unit from /cm to /nm
if area_scale:
if hdr:
emitting_area = phoenix_area(spec.header)
spec = spec * emitting_area
spec.header["emit_area"] = (emitting_area, "pi*r^2")
else:
raise ValueError("No header provided for stellar area scaling")
if wav_scale:
# Convert into photon counts, (constants ignored)
spec = spec * spec.xaxis
if normalize:
spec = spec_local_norm(spec, method="exponential")
if limits is not None:
if limits[0] > spec.xaxis[-1] or limits[-1] < spec.xaxis[0]:
logging.warning("Warning: The wavelength limits do not overlap the spectrum."
"There is no spectrum left... Check your wavelength, or limits.")
spec.wav_select(*limits)
return spec
def load_btsettl_spectrum(params, limits=None, hdr=False, normalize=False, area_scale=False, flux_rescale=False):
"""Load spectrum from hdf5 grid file.
Parameters
----------
params: list
Model parameters [teff, logg, Z]
limits= List[float, float] default=None
Wavelength limits.
hdr: bool
Include the model header. Default False.
normalize: bool
Locally normalize the spectrum. Default False.
area_scale: bool
Multiply by stellar surface area pi*R**2 (towards Earth)
flux_rescale: bool
Convert from /cm to /nm by dividing by 1e7
Returns
-------
spec: Spectrum
The loaded spectrum as Spectrum object.
"""
# Starfish.grid["btsettl_hdf5_path"], instrument, ranges=Starfish.grid["parrange"]
my_hdf5 = HDF5Interface(filename=Starfish.grid["btsettl_hdf5_path"], key_name=Starfish.grid["key_name"])
my_hdf5.wl = my_hdf5.wl / 10 # Turn into Nanometer
if hdr:
flux, myhdr = my_hdf5.load_flux_hdr(np.array(params))
spec = Spectrum(flux=flux, xaxis=my_hdf5.wl, header=myhdr)
else:
flux = my_hdf5.load_flux(np.array(params))
spec = Spectrum(flux=flux, xaxis=my_hdf5.wl)
if flux_rescale:
spec = spec * 1e-7 # convert flux unit from /cm to /nm
if area_scale:
if hdr:
emitting_area = phoenix_area(spec.header)
spec = spec * emitting_area
spec.header["emit_area"] = (emitting_area, "pi*r^2")
else:
raise ValueError("No header provided for stellar area scaling")
if normalize:
spec = spec_local_norm(spec, method="exponential")
if limits is not None:
if limits[0] > spec.xaxis[-1] or limits[-1] < spec.xaxis[0]:
logging.warning("Warning: The wavelength limits do not overlap the spectrum."
"There is no spectrum left... Check your wavelength, or limits.")
spec.wav_select(*limits)
return spec
def phoenix_area(header):
"""In units of Gigameters.
Returns
-------
surface_area: float
Stellar effective surface area. in Gm**2
"""
if header is None:
raise ValueError("Header should not be None.")
# BUNIT 'erg/s/cm^2/cm' Unit of flux
# PHXREFF 67354000000.0 [cm] Effective stellar radius
radius = header["PHXREFF"] * 1e-11 # cm to Gm
surface_area = np.pi * radius ** 2 # towards Earth
return surface_area
def closest_model_params(teff: int, logg: float, feh: float, alpha: Optional[float] = None) -> List[Union[int64, float64]]:
"""Find the closest PHOENIX-ACES model parameters to the stellar parameters given.
Parameters
----------
teff: float
logg: float
feh: float
alpha: float (optional)
Returns
-------
params: list of floats
Parameters for the closest matching model.
"""
teffs = np.concatenate((np.arange(2300, 7000, 100),
np.arange(7000, 12100, 200)))
loggs = np.arange(0, 6.1, 0.5)
fehs = np.concatenate((np.arange(-4, -2, 1), np.arange(-2, 1.1, 0.5)))
alphas = np.arange(-0.2, 0.3, 0.2) # use only these alpha values if necessary
closest_teff = teffs[np.abs(teffs - teff).argmin()]
closest_logg = loggs[np.abs(loggs - logg).argmin()]
closest_feh = fehs[np.abs(fehs - feh).argmin()]
if alpha is not None:
if abs(alpha) > 0.2:
logging.warning("Alpha is outside acceptable range -0.2->0.2")
closest_alpha = alphas[np.abs(alphas - alpha).argmin()]
return [closest_teff, closest_logg, closest_feh, closest_alpha]
else:
return [closest_teff, closest_logg, closest_feh]
# find_closest_phoenix_name # Should change to this
def find_closest_phoenix_name(data_dir, teff, logg, feh, alpha=None, Z=True):
"""Find the closest PHOENIX-ACES model to the stellar parameters given.
alpha parameter is not implemented yet.
Parameters
----------
data_dir: str
Path to the Phoenix-aces folders Z+-.../
teff: float
logg: float
feh: float
alpha: float (optional)
Returns
-------
phoenix_model: str
Path/Filename to the closest matching model.
"""
if alpha is not None:
closest_teff, closest_logg, closest_feh, closest_alpha = closest_model_params(teff, logg, feh, alpha=alpha)
else:
closest_teff, closest_logg, closest_feh = closest_model_params(teff, logg, feh, alpha=None)
if alpha is not None:
if abs(alpha) > 0.2:
logging.warning("Alpha is outside acceptable range -0.2->0.2")
phoenix_glob = ("Z{2:+4.1f}.Alpha={3:+5.2f}/*{0:05d}-{1:4.2f}"
"{2:+4.1f}.Alpha={3:+5.2f}.PHOENIX*.fits"
"").format(closest_teff, closest_logg, closest_feh,
closest_alpha)
else:
if Z:
phoenix_glob = ("Z{2:+4.1f}/*{0:05d}-{1:4.2f}{2:+4.1f}.PHOENIX*.fits"
"").format(closest_teff, closest_logg, closest_feh)
else:
phoenix_glob = ("*{0:05d}-{1:4.2f}{2:+4.1f}.PHOENIX*.fits"
"").format(closest_teff, closest_logg, closest_feh)
phoenix_glob = phoenix_glob.replace("+0.0", "-0.0") # Replace positive 0 metallicity with negative 0
logging.debug(__("New Phoenix_glob {0}", phoenix_glob))
joint_glob = os.path.join(data_dir, phoenix_glob)
logging.debug(__("Data dir = {0}", data_dir))
logging.debug(__("Glob path/file {0}", os.path.join(data_dir, phoenix_glob)))
logging.debug(__("joint Glob path/file {0}", joint_glob))
files = glob.glob(os.path.join(data_dir, phoenix_glob))
if len(files) > 1:
logging.warning("More than one file returned")
return files
def phoenix_name_from_params(data_dir, paramfile):
"""Return closest phoenix model given a stellar parameter file.
Obtain temp, metallicity, and logg from parameter file.
Parameters
----------
data_dir: str
Directory to phoenix models.
params: str or dict, or list
Parameter filename if a string is given.
Dictionary of parameters if dict is provided, or
list of parameters in the correct order.
Returns
-------
phoenix_model: str
Filename of phoenix model closest to given parameters.
"""
logging.debug(__("phoenix_from_params Data dir = {0}", data_dir))
if isinstance(paramfile, str):
params = parse_paramfile(paramfile)
else:
params = paramfile
if isinstance(params, dict):
if "alpha" not in params.keys():
params["alpha"] = None
params = [params["temp"], params["logg"], params["fe_h"], params["alpha"]]
elif isinstance(params, list):
if len(params) == 3:
params = params + [None] # for alpha
elif len(params) != 4:
raise ValueError("Length of parameter list given is not valid, {}".format(len(params)))
return find_closest_phoenix_name(data_dir, params[0], params[1], params[2], alpha=params[3])
def generate_close_params(params, small=True, limits="phoenix"):
"""teff, logg, Z.
"small" is a mode selector basically.
"""
temp, logg, metals = params[0], params[1], params[2]
new_temps, new_loggs, new_metals = gen_new_param_values(temp, logg, metals, small=small)
phoenix_limits = get_phoenix_limits(limits)
new_temps, new_loggs, new_metals = set_model_limits(new_temps, new_loggs, new_metals, phoenix_limits)
for t, l, m in itertools.product(new_temps, new_loggs, new_metals):
yield [t, l, m]
def generate_close_params_with_simulator(params, target, limits="phoenix"):
"""teff, logg, Z.
"Target" is required to make sure this is used correctly..."""
if target not in ["host", "companion"]:
raise ValueError("Target must be 'host' or 'companion', not '{}'".format(target))
temp, logg, metals = params[0], params[1], params[2]
# This is the backup if not specified in config file.
bk_temps, bk_loggs, bk_metals = gen_new_param_values(temp, logg, metals, small=target)
# print("params", params, target, small, limits)
teff_key = "teff_1" if target == "host" else "teff_2"
logg_key = "logg_1" if target == "host" else "logg_2"
feh_key = "feh_1" if target == "host" else "feh_2"
teff_values = simulators.sim_grid.get(teff_key)
logg_values = simulators.sim_grid.get(logg_key)
feh_values = simulators.sim_grid.get(feh_key)
new_temps = make_grid_parameter(temp, teff_values, bk_temps)
new_loggs = make_grid_parameter(logg, logg_values, bk_loggs)
new_metals = make_grid_parameter(metals, feh_values, bk_metals)
phoenix_limits = get_phoenix_limits(limits)
new_temps, new_loggs, new_metals = set_model_limits(new_temps, new_loggs, new_metals, phoenix_limits)
dim = len(new_temps) * len(new_loggs) * len(new_metals)
new_temps, new_loggs, new_metals = set_model_limits(new_temps, new_loggs, new_metals,
simulators.starfish_grid["parrange"])
dim_2 = len(new_temps) * len(new_loggs) * len(new_metals)
if dim_2 < dim:
# Warning in-case you do not remember about parrange limits.
logging.warning("Some models were cut out using the 'parrange' limits.")
new_temps = check_inputs(new_temps)
new_loggs = check_inputs(new_loggs)
new_metals = check_inputs(new_metals)
for t, l, m in itertools.product(new_temps, new_loggs, new_metals):
yield [t, l, m]
def set_model_limits(temps, loggs, metals, limits):
"""Apply limits to list of models
limits format = [[temp1, temp2][log-1, logg2][feh_1, feh_2]
"""
new_temps = temps[(temps >= limits[0][0]) * (temps <= limits[0][1])]
new_loggs = loggs[(loggs >= limits[1][0]) * (loggs <= limits[1][1])]
new_metals = metals[(metals >= limits[2][0]) * (metals <= limits[2][1])]
if len(temps) > len(new_temps) | len(loggs) > len(new_loggs) | len(metals) > len(new_metals):
logging.warning("Some models were removed using the 'parrange' limits.")
return new_temps, new_loggs, new_metals
def make_grid_parameter(param, step_config, backup):
"""Extend parameter grid about param. Using step_config=[start, stop, step].
param:
Value of the parameter to increment from.
step_config:
[Start, stop, step] or can be None.
backup:
Pre-calculated values if the step_config is not given in config.yaml.
"""
if step_config is None or step_config == "None":
return backup
else:
values = np.arange(*step_config)
if len(values) == 1 and values[0] != 0:
print("The configured parameter range is {}".format(values))
raise ValueError("Invalid parameter configuration. No single model grid with offset !=0 allowed!")
else:
if 0 not in values:
warnings.warn("The grids do not span the closest parameters. Values={}. Check config".format(values))
return param + values
def generate_bhm_config_params(params, limits="phoenix"):
"""Generate teff, logg, Z values given star params and config values.
Version of "generate_close_params_with_simulator" for bhm.
"""
temp, logg, metals = params[0], params[1], params[2]
# This is the backup if not specified in config file.
bk_temps, bk_loggs, bk_metals = gen_new_param_values(temp, logg, metals, small=True)
teff_values = simulators.sim_grid.get("teff_1")
logg_values = simulators.sim_grid.get("logg_1")
feh_values = simulators.sim_grid.get("feh_1")
new_temps = make_grid_parameter(temp, teff_values, bk_temps)
new_loggs = make_grid_parameter(logg, logg_values, bk_loggs)
new_metals = make_grid_parameter(metals, feh_values, bk_metals)
phoenix_limits = get_phoenix_limits(limits)
new_temps, new_loggs, new_metals = set_model_limits(new_temps, new_loggs, new_metals, phoenix_limits)
new_temps, new_loggs, new_metals = set_model_limits(new_temps, new_loggs, new_metals,
simulators.starfish_grid["parrange"])
new_temps = check_inputs(new_temps)
new_loggs = check_inputs(new_loggs)
new_metals = check_inputs(new_metals)
for t, l, m in itertools.product(new_temps, new_loggs, new_metals):
yield [t, l, m]
# def find_phoenix_model_names(base_dir: str, original_model: str) -> List[str]: # mypy
def find_phoenix_model_names(base_dir, original_model):
"""Find other phoenix models with similar temp and metallicities.
Returns list of model name strings.
"""
# "lte05200-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits"
z_dir = "Z" in original_model
try:
model_name = os.path.split(original_model)[-1]
except Exception:
model_name = original_model
logging.debug(__("Original_name = {0}", original_model))
logging.debug(__("model_name = {0}", model_name))
temp = int(model_name[3:8])
logg = float(model_name[9:13])
metals = float(model_name[13:17])
new_temps, new_loggs, new_metals = gen_new_param_values(temp, logg, metals, small=False)
close_models = []
for t, l, m in itertools.product(new_temps, new_loggs, new_metals):
name = phoenix_name(t, l, m, Z=z_dir)
name = os.path.join(base_dir, name)
if os.path.isfile(name):
close_models.append(name)
return close_models
| 36.144177 | 123 | 0.644267 | """Phoenix Utilities.
Some functions to deal with phoenix models
i.e. searching for models with certain parameters
Jason Neal, January 2017
"""
import glob
import itertools
import logging
import os
import warnings
import Starfish
import numpy as np
from Starfish.grid_tools import HDF5Interface
from astropy.io import fits
from logutils import BraceMessage as __
from spectrum_overload import Spectrum
# from typing import List
import simulators
from mingle.utilities.norm import spec_local_norm
from mingle.utilities.param_file import parse_paramfile
from mingle.utilities.simulation_utilities import check_inputs
from typing import Optional, List, Union
from numpy import (float64, int64)
def load_phoenix_spectrum(phoenix_name, limits=None, normalize=False):
wav_dir = simulators.starfish_grid["raw_path"]
wav_model = fits.getdata(os.path.join(wav_dir, "WAVE_PHOENIX-ACES-AGSS-COND-2011.fits"))
wav_model /= 10 # turn into nanometers
flux = fits.getdata(phoenix_name)
spec = Spectrum(flux=flux, xaxis=wav_model)
# Limit to K band
spec.wav_select(2070, 2350)
if normalize:
spec = spec_local_norm(spec, method="exponential")
if limits is not None:
spec.wav_select(*limits)
return spec
def load_starfish_spectrum(params, limits=None, hdr=False, normalize=False,
area_scale=False, flux_rescale=False, wav_scale=True):
"""Load spectrum from hdf5 grid file.
Parameters
----------
params: list
Model parameters [teff, logg, Z]
limits= List[float, float] default=None
Wavelength limits.
hdr: bool
Include the model header. Default False.
normalize: bool
Locally normalize the spectrum. Default False.
area_scale: bool
Multiply by stellar surface area pi*R**2 (towards Earth)
flux_rescale: bool
Convert from /cm to /nm by dividing by 1e7
wav_scale: bool
Multiply by wavelength to turn into [erg/s/cm^2]
Returns
-------
spec: Spectrum
The loaded spectrum as Spectrum object.
"""
my_hdf5 = HDF5Interface()
my_hdf5.wl = my_hdf5.wl / 10 # Turn into Nanometer
if hdr:
flux, myhdr = my_hdf5.load_flux_hdr(np.array(params))
spec = Spectrum(flux=flux, xaxis=my_hdf5.wl, header=myhdr)
else:
flux = my_hdf5.load_flux(np.array(params))
spec = Spectrum(flux=flux, xaxis=my_hdf5.wl)
if flux_rescale:
spec = spec * 1e-7 # convert flux unit from /cm to /nm
if area_scale:
if hdr:
emitting_area = phoenix_area(spec.header)
spec = spec * emitting_area
spec.header["emit_area"] = (emitting_area, "pi*r^2")
else:
raise ValueError("No header provided for stellar area scaling")
if wav_scale:
# Convert into photon counts, (constants ignored)
spec = spec * spec.xaxis
if normalize:
spec = spec_local_norm(spec, method="exponential")
if limits is not None:
if limits[0] > spec.xaxis[-1] or limits[-1] < spec.xaxis[0]:
logging.warning("Warning: The wavelength limits do not overlap the spectrum."
"There is no spectrum left... Check your wavelength, or limits.")
spec.wav_select(*limits)
return spec
def load_btsettl_spectrum(params, limits=None, hdr=False, normalize=False, area_scale=False, flux_rescale=False):
"""Load spectrum from hdf5 grid file.
Parameters
----------
params: list
Model parameters [teff, logg, Z]
limits= List[float, float] default=None
Wavelength limits.
hdr: bool
Include the model header. Default False.
normalize: bool
Locally normalize the spectrum. Default False.
area_scale: bool
Multiply by stellar surface area pi*R**2 (towards Earth)
flux_rescale: bool
Convert from /cm to /nm by dividing by 1e7
Returns
-------
spec: Spectrum
The loaded spectrum as Spectrum object.
"""
# Starfish.grid["btsettl_hdf5_path"], instrument, ranges=Starfish.grid["parrange"]
my_hdf5 = HDF5Interface(filename=Starfish.grid["btsettl_hdf5_path"], key_name=Starfish.grid["key_name"])
my_hdf5.wl = my_hdf5.wl / 10 # Turn into Nanometer
if hdr:
flux, myhdr = my_hdf5.load_flux_hdr(np.array(params))
spec = Spectrum(flux=flux, xaxis=my_hdf5.wl, header=myhdr)
else:
flux = my_hdf5.load_flux(np.array(params))
spec = Spectrum(flux=flux, xaxis=my_hdf5.wl)
if flux_rescale:
spec = spec * 1e-7 # convert flux unit from /cm to /nm
if area_scale:
if hdr:
emitting_area = phoenix_area(spec.header)
spec = spec * emitting_area
spec.header["emit_area"] = (emitting_area, "pi*r^2")
else:
raise ValueError("No header provided for stellar area scaling")
if normalize:
spec = spec_local_norm(spec, method="exponential")
if limits is not None:
if limits[0] > spec.xaxis[-1] or limits[-1] < spec.xaxis[0]:
logging.warning("Warning: The wavelength limits do not overlap the spectrum."
"There is no spectrum left... Check your wavelength, or limits.")
spec.wav_select(*limits)
return spec
def phoenix_area(header):
"""In units of Gigameters.
Returns
-------
surface_area: float
Stellar effective surface area. in Gm**2
"""
if header is None:
raise ValueError("Header should not be None.")
# BUNIT 'erg/s/cm^2/cm' Unit of flux
# PHXREFF 67354000000.0 [cm] Effective stellar radius
radius = header["PHXREFF"] * 1e-11 # cm to Gm
surface_area = np.pi * radius ** 2 # towards Earth
return surface_area
def closest_model_params(teff: int, logg: float, feh: float, alpha: Optional[float] = None) -> List[Union[int64, float64]]:
"""Find the closest PHOENIX-ACES model parameters to the stellar parameters given.
Parameters
----------
teff: float
logg: float
feh: float
alpha: float (optional)
Returns
-------
params: list of floats
Parameters for the closest matching model.
"""
teffs = np.concatenate((np.arange(2300, 7000, 100),
np.arange(7000, 12100, 200)))
loggs = np.arange(0, 6.1, 0.5)
fehs = np.concatenate((np.arange(-4, -2, 1), np.arange(-2, 1.1, 0.5)))
alphas = np.arange(-0.2, 0.3, 0.2) # use only these alpha values if necessary
closest_teff = teffs[np.abs(teffs - teff).argmin()]
closest_logg = loggs[np.abs(loggs - logg).argmin()]
closest_feh = fehs[np.abs(fehs - feh).argmin()]
if alpha is not None:
if abs(alpha) > 0.2:
logging.warning("Alpha is outside acceptable range -0.2->0.2")
closest_alpha = alphas[np.abs(alphas - alpha).argmin()]
return [closest_teff, closest_logg, closest_feh, closest_alpha]
else:
return [closest_teff, closest_logg, closest_feh]
def all_aces_params():
teffs = np.concatenate((np.arange(2300, 7000, 100),
np.arange(7000, 12001, 200)))
loggs = np.arange(0, 6.01, 0.5)
fehs = np.concatenate((np.arange(-4, -2, 1), np.arange(-2, 1.01, 0.5)))
alphas = np.arange(-0.2, 1.21, 0.2)
return teffs, loggs, fehs, alphas
def all_btsettl_params(model="cifist2011_2015"):
if model == "cifist2011_2015":
teffs = np.arange(1200, 7000, 100)
loggs = np.arange(2.5, 5.1, 0.5)
fehs = np.arange(0, 0.1, 1)
alphas = np.arange(0, 0.1, 0.2)
else:
NotImplementedError("all_btsettl_params not supported for model {0}".format(model))
return teffs, loggs, fehs, alphas
# find_closest_phoenix_name # Should change to this
def find_closest_phoenix_name(data_dir, teff, logg, feh, alpha=None, Z=True):
"""Find the closest PHOENIX-ACES model to the stellar parameters given.
alpha parameter is not implemented yet.
Parameters
----------
data_dir: str
Path to the Phoenix-aces folders Z+-.../
teff: float
logg: float
feh: float
alpha: float (optional)
Returns
-------
phoenix_model: str
Path/Filename to the closest matching model.
"""
if alpha is not None:
closest_teff, closest_logg, closest_feh, closest_alpha = closest_model_params(teff, logg, feh, alpha=alpha)
else:
closest_teff, closest_logg, closest_feh = closest_model_params(teff, logg, feh, alpha=None)
if alpha is not None:
if abs(alpha) > 0.2:
logging.warning("Alpha is outside acceptable range -0.2->0.2")
phoenix_glob = ("Z{2:+4.1f}.Alpha={3:+5.2f}/*{0:05d}-{1:4.2f}"
"{2:+4.1f}.Alpha={3:+5.2f}.PHOENIX*.fits"
"").format(closest_teff, closest_logg, closest_feh,
closest_alpha)
else:
if Z:
phoenix_glob = ("Z{2:+4.1f}/*{0:05d}-{1:4.2f}{2:+4.1f}.PHOENIX*.fits"
"").format(closest_teff, closest_logg, closest_feh)
else:
phoenix_glob = ("*{0:05d}-{1:4.2f}{2:+4.1f}.PHOENIX*.fits"
"").format(closest_teff, closest_logg, closest_feh)
phoenix_glob = phoenix_glob.replace("+0.0", "-0.0") # Replace positive 0 metallicity with negative 0
logging.debug(__("New Phoenix_glob {0}", phoenix_glob))
joint_glob = os.path.join(data_dir, phoenix_glob)
logging.debug(__("Data dir = {0}", data_dir))
logging.debug(__("Glob path/file {0}", os.path.join(data_dir, phoenix_glob)))
logging.debug(__("joint Glob path/file {0}", joint_glob))
files = glob.glob(os.path.join(data_dir, phoenix_glob))
if len(files) > 1:
logging.warning("More than one file returned")
return files
def phoenix_name_from_params(data_dir, paramfile):
"""Return closest phoenix model given a stellar parameter file.
Obtain temp, metallicity, and logg from parameter file.
Parameters
----------
data_dir: str
Directory to phoenix models.
params: str or dict, or list
Parameter filename if a string is given.
Dictionary of parameters if dict is provided, or
list of parameters in the correct order.
Returns
-------
phoenix_model: str
Filename of phoenix model closest to given parameters.
"""
logging.debug(__("phoenix_from_params Data dir = {0}", data_dir))
if isinstance(paramfile, str):
params = parse_paramfile(paramfile)
else:
params = paramfile
if isinstance(params, dict):
if "alpha" not in params.keys():
params["alpha"] = None
params = [params["temp"], params["logg"], params["fe_h"], params["alpha"]]
elif isinstance(params, list):
if len(params) == 3:
params = params + [None] # for alpha
elif len(params) != 4:
raise ValueError("Length of parameter list given is not valid, {}".format(len(params)))
return find_closest_phoenix_name(data_dir, params[0], params[1], params[2], alpha=params[3])
def generate_close_params(params, small=True, limits="phoenix"):
"""teff, logg, Z.
"small" is a mode selector basically.
"""
temp, logg, metals = params[0], params[1], params[2]
new_temps, new_loggs, new_metals = gen_new_param_values(temp, logg, metals, small=small)
phoenix_limits = get_phoenix_limits(limits)
new_temps, new_loggs, new_metals = set_model_limits(new_temps, new_loggs, new_metals, phoenix_limits)
for t, l, m in itertools.product(new_temps, new_loggs, new_metals):
yield [t, l, m]
def get_phoenix_limits(limits="phoenix"):
if limits == "phoenix":
phoenix_limits = [[2300, 12000], [0, 6], [-4, 1]]
elif limits == "cifist":
phoenix_limits = [[1200, 7000], [2.5, 5], [0, 0]]
else:
raise ValueError("Error with phoenix limits. Invalid limits name '{0}'".format(limits))
return phoenix_limits
def generate_close_params_with_simulator(params, target, limits="phoenix"):
"""teff, logg, Z.
"Target" is required to make sure this is used correctly..."""
if target not in ["host", "companion"]:
raise ValueError("Target must be 'host' or 'companion', not '{}'".format(target))
temp, logg, metals = params[0], params[1], params[2]
# This is the backup if not specified in config file.
bk_temps, bk_loggs, bk_metals = gen_new_param_values(temp, logg, metals, small=target)
# print("params", params, target, small, limits)
teff_key = "teff_1" if target == "host" else "teff_2"
logg_key = "logg_1" if target == "host" else "logg_2"
feh_key = "feh_1" if target == "host" else "feh_2"
teff_values = simulators.sim_grid.get(teff_key)
logg_values = simulators.sim_grid.get(logg_key)
feh_values = simulators.sim_grid.get(feh_key)
new_temps = make_grid_parameter(temp, teff_values, bk_temps)
new_loggs = make_grid_parameter(logg, logg_values, bk_loggs)
new_metals = make_grid_parameter(metals, feh_values, bk_metals)
phoenix_limits = get_phoenix_limits(limits)
new_temps, new_loggs, new_metals = set_model_limits(new_temps, new_loggs, new_metals, phoenix_limits)
dim = len(new_temps) * len(new_loggs) * len(new_metals)
new_temps, new_loggs, new_metals = set_model_limits(new_temps, new_loggs, new_metals,
simulators.starfish_grid["parrange"])
dim_2 = len(new_temps) * len(new_loggs) * len(new_metals)
if dim_2 < dim:
# Warning in-case you do not remember about parrange limits.
logging.warning("Some models were cut out using the 'parrange' limits.")
new_temps = check_inputs(new_temps)
new_loggs = check_inputs(new_loggs)
new_metals = check_inputs(new_metals)
for t, l, m in itertools.product(new_temps, new_loggs, new_metals):
yield [t, l, m]
def set_model_limits(temps, loggs, metals, limits):
"""Apply limits to list of models
limits format = [[temp1, temp2][log-1, logg2][feh_1, feh_2]
"""
new_temps = temps[(temps >= limits[0][0]) * (temps <= limits[0][1])]
new_loggs = loggs[(loggs >= limits[1][0]) * (loggs <= limits[1][1])]
new_metals = metals[(metals >= limits[2][0]) * (metals <= limits[2][1])]
if len(temps) > len(new_temps) | len(loggs) > len(new_loggs) | len(metals) > len(new_metals):
logging.warning("Some models were removed using the 'parrange' limits.")
return new_temps, new_loggs, new_metals
def make_grid_parameter(param, step_config, backup):
"""Extend parameter grid about param. Using step_config=[start, stop, step].
param:
Value of the parameter to increment from.
step_config:
[Start, stop, step] or can be None.
backup:
Pre-calculated values if the step_config is not given in config.yaml.
"""
if step_config is None or step_config == "None":
return backup
else:
values = np.arange(*step_config)
if len(values) == 1 and values[0] != 0:
print("The configured parameter range is {}".format(values))
raise ValueError("Invalid parameter configuration. No single model grid with offset !=0 allowed!")
else:
if 0 not in values:
warnings.warn("The grids do not span the closest parameters. Values={}. Check config".format(values))
return param + values
def generate_bhm_config_params(params, limits="phoenix"):
"""Generate teff, logg, Z values given star params and config values.
Version of "generate_close_params_with_simulator" for bhm.
"""
temp, logg, metals = params[0], params[1], params[2]
# This is the backup if not specified in config file.
bk_temps, bk_loggs, bk_metals = gen_new_param_values(temp, logg, metals, small=True)
teff_values = simulators.sim_grid.get("teff_1")
logg_values = simulators.sim_grid.get("logg_1")
feh_values = simulators.sim_grid.get("feh_1")
new_temps = make_grid_parameter(temp, teff_values, bk_temps)
new_loggs = make_grid_parameter(logg, logg_values, bk_loggs)
new_metals = make_grid_parameter(metals, feh_values, bk_metals)
phoenix_limits = get_phoenix_limits(limits)
new_temps, new_loggs, new_metals = set_model_limits(new_temps, new_loggs, new_metals, phoenix_limits)
new_temps, new_loggs, new_metals = set_model_limits(new_temps, new_loggs, new_metals,
simulators.starfish_grid["parrange"])
new_temps = check_inputs(new_temps)
new_loggs = check_inputs(new_loggs)
new_metals = check_inputs(new_metals)
for t, l, m in itertools.product(new_temps, new_loggs, new_metals):
yield [t, l, m]
def gen_new_param_values(temp, logg, metals, small=True):
if small == "host":
# only include error bounds.
new_temps = np.array([-100, 0, 100]) + temp
new_metals = np.array([-0.5, 0.0, 0.5]) + metals
new_loggs = np.array([-0.5, 0.0, 0.5]) + logg
elif small:
new_temps = np.arange(-600, 601, 100) + temp
new_metals = np.array([-0.5, 0.5, 0.5]) + metals
new_loggs = np.array([-0.5, 0.5, 0.5]) + logg
else:
new_temps = np.arange(-500, 501, 100) + temp
new_metals = np.arange(-1, 1.1, 0.5) + metals
new_loggs = np.arange(-1, 1.1, 0.5) + logg
return new_temps, new_loggs, new_metals
# def find_phoenix_model_names(base_dir: str, original_model: str) -> List[str]: # mypy
def find_phoenix_model_names(base_dir, original_model):
"""Find other phoenix models with similar temp and metallicities.
Returns list of model name strings.
"""
# "lte05200-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits"
z_dir = "Z" in original_model
try:
model_name = os.path.split(original_model)[-1]
except Exception:
model_name = original_model
logging.debug(__("Original_name = {0}", original_model))
logging.debug(__("model_name = {0}", model_name))
temp = int(model_name[3:8])
logg = float(model_name[9:13])
metals = float(model_name[13:17])
new_temps, new_loggs, new_metals = gen_new_param_values(temp, logg, metals, small=False)
close_models = []
for t, l, m in itertools.product(new_temps, new_loggs, new_metals):
name = phoenix_name(t, l, m, Z=z_dir)
name = os.path.join(base_dir, name)
if os.path.isfile(name):
close_models.append(name)
return close_models
def phoenix_name(teff, logg, feh, alpha=None, Z=False):
if alpha is not None:
raise NotImplementedError("Need to add alpha to phoenix name.")
name = os.path.join("lte{0:05d}-{1:1.02f}{2:+1.10}."
"PHOENIX-ACES-AGSS-COND-2011-HiRes.fits".format(teff, logg, feh))
if Z:
name = os.path.join("Z{0:+1.10}".format(feh), name)
if "+0.0" in name: # Positive zero is not allowed in naming
name = name.replace("+0.0", "-0.0")
return name
def phoenix_regex(teff, logg, feh, alpha=None, Z=False):
if alpha is not None:
raise NotImplementedError("Need to add alpha to phoenix name.")
regex = ("*{0:05d}-{1:4.2f}{2:+4.1f}.PHOENIX*.fits"
"").format(teff, logg, feh)
if Z:
regex = os.path.join("Z{0:+1.10}".format(feh), regex)
if "+0.0" in regex: # Positive zero is not allowed in naming
regex = regex.replace("+0.0", "-0.0")
return regex
| 3,082 | 0 | 161 |
5d71a6fd59057f44b5149d32337e46b4adc3d7e9 | 8,062 | py | Python | ahmia/solr_grouping_backend.py | keyboardcowboy42/ahmia | a474a30e77f002ddf2be771dbe5ee7371a3c0ec8 | [
"BSD-3-Clause"
] | 176 | 2015-01-12T10:27:28.000Z | 2022-02-01T16:43:12.000Z | ahmia/solr_grouping_backend.py | keyboardcowboy42/ahmia | a474a30e77f002ddf2be771dbe5ee7371a3c0ec8 | [
"BSD-3-Clause"
] | 17 | 2015-01-05T11:58:51.000Z | 2022-03-02T14:54:14.000Z | ahmia/solr_grouping_backend.py | keyboardcowboy42/ahmia | a474a30e77f002ddf2be771dbe5ee7371a3c0ec8 | [
"BSD-3-Clause"
] | 80 | 2015-01-07T14:43:36.000Z | 2020-11-09T01:03:51.000Z | # encoding: utf-8
"""Experimental Solr Grouping / Field Collapsing backend for Haystack 2.0"""
# NOTE: You must be running the latest Pysolr master - no PyPI release yet!
# See https://gist.github.com/3750774 for the current version of this code
# See http://wiki.apache.org/solr/FieldCollapsing for the Solr feature documentation
from __future__ import absolute_import
import logging
from django.db.models.loading import get_model
from haystack.backends import EmptyResults
from haystack.backends.solr_backend import (SolrEngine, SolrSearchBackend,
SolrSearchQuery)
from haystack.constants import DJANGO_CT, DJANGO_ID, ID
from haystack.models import SearchResult
from haystack.query import SearchQuerySet
# Since there's no chance of this being portable (yet!) we'll import explicitly
# rather than using the generic imports:
| 39.326829 | 110 | 0.632721 | # encoding: utf-8
"""Experimental Solr Grouping / Field Collapsing backend for Haystack 2.0"""
# NOTE: You must be running the latest Pysolr master - no PyPI release yet!
# See https://gist.github.com/3750774 for the current version of this code
# See http://wiki.apache.org/solr/FieldCollapsing for the Solr feature documentation
from __future__ import absolute_import
import logging
from django.db.models.loading import get_model
from haystack.backends import EmptyResults
from haystack.backends.solr_backend import (SolrEngine, SolrSearchBackend,
SolrSearchQuery)
from haystack.constants import DJANGO_CT, DJANGO_ID, ID
from haystack.models import SearchResult
from haystack.query import SearchQuerySet
# Since there's no chance of this being portable (yet!) we'll import explicitly
# rather than using the generic imports:
class GroupedSearchQuery(SolrSearchQuery):
def __init__(self, *args, **kwargs):
super(GroupedSearchQuery, self).__init__(*args, **kwargs)
self.grouping_field = None
self._total_document_count = None
def _clone(self, **kwargs):
clone = super(GroupedSearchQuery, self)._clone(**kwargs)
clone.grouping_field = self.grouping_field
return clone
def add_group_by(self, field_name):
self.grouping_field = field_name
def post_process_facets(self, results):
# FIXME: remove this hack once https://github.com/toastdriven/django-haystack/issues/750 lands
# See matches dance in _process_results below:
total = 0
if 'hits' in results:
total = int(results['hits'])
elif 'matches' in results:
total = int(results['matches'])
self._total_document_count = total
return super(GroupedSearchQuery, self).post_process_facets(results)
def get_total_document_count(self):
"""Return the total number of matching documents rather than document groups
If the query has not been run, this will execute the query and store the results.
"""
if self._total_document_count is None:
self.run()
return self._total_document_count
def build_params(self, *args, **kwargs):
res = super(GroupedSearchQuery, self).build_params(*args, **kwargs)
if self.grouping_field is not None:
res.update({'group': 'true',
'group.field': self.grouping_field,
'group.ngroups': 'true',
'group.limit': 2, # TODO: Don't hard-code this
'group.sort': 'django_ct desc, score desc',
'group.facet': 'true',
'result_class': GroupedSearchResult})
return res
class GroupedSearchResult(object):
def __init__(self, field_name, group_data, raw_results={}):
self.field_name = field_name
self.key = group_data['groupValue'] # TODO: convert _to_python
self.hits = group_data['doclist']['numFound']
self.documents = list(self.process_documents(group_data['doclist']['docs'],
raw_results=raw_results))
def __unicode__(self):
return 'GroupedSearchResult({0.field_name}={0.group_key}, hits={0.hits})'.format(self)
def process_documents(self, doclist, raw_results):
# TODO: tame import spaghetti
from haystack import connections
engine = connections["default"]
conn = engine.get_backend().conn
unified_index = engine.get_unified_index()
indexed_models = unified_index.get_indexed_models()
for raw_result in doclist:
app_label, model_name = raw_result[DJANGO_CT].split('.')
additional_fields = {}
model = get_model(app_label, model_name)
if model and model in indexed_models:
for key, value in raw_result.items():
index = unified_index.get_index(model)
string_key = str(key)
if string_key in index.fields and hasattr(index.fields[string_key], 'convert'):
additional_fields[string_key] = index.fields[string_key].convert(value)
else:
additional_fields[string_key] = conn._to_python(value)
del(additional_fields[DJANGO_CT])
del(additional_fields[DJANGO_ID])
del(additional_fields['score'])
if raw_result[ID] in getattr(raw_results, 'highlighting', {}):
additional_fields['highlighted'] = raw_results.highlighting[raw_result[ID]]
result = SearchResult(app_label, model_name, raw_result[DJANGO_ID],
raw_result['score'], **additional_fields)
yield result
class GroupedSearchQuerySet(SearchQuerySet):
def __init__(self, *args, **kwargs):
super(GroupedSearchQuerySet, self).__init__(*args, **kwargs)
if not isinstance(self.query, GroupedSearchQuery):
raise TypeError("GroupedSearchQuerySet must be used with a GroupedSearchQuery query")
def group_by(self, field_name):
"""Have Solr group results based on the provided field name"""
clone = self._clone()
clone.query.add_group_by(field_name)
return clone
def post_process_results(self, results):
# Override the default model-specific processing
return results
def total_document_count(self):
"""Returns the count for the total number of matching documents rather than groups
A GroupedSearchQuerySet normally returns the number of document groups; this allows
you to indicate the total number of matching documents - quite handy for making facet counts match the
displayed numbers
"""
if self.query.has_run():
return self.query.get_total_document_count()
else:
clone = self._clone()
return clone.query.get_total_document_count()
class GroupedSolrSearchBackend(SolrSearchBackend):
def build_search_kwargs(self, *args, **kwargs):
group_kwargs = [(i, kwargs.pop(i)) for i in kwargs.keys() if i.startswith("group")]
res = super(GroupedSolrSearchBackend, self).build_search_kwargs(*args, **kwargs)
res.update(group_kwargs)
if group_kwargs and 'sort' not in kwargs:
res['sort'] = 'score desc'
return res
def _process_results(self, raw_results, result_class=None, **kwargs):
res = super(GroupedSolrSearchBackend, self)._process_results(raw_results,
result_class=result_class,
**kwargs)
if result_class and not issubclass(result_class, GroupedSearchResult):
return res
if len(raw_results.docs):
raise RuntimeError("Grouped Solr searches should return grouped elements, not docs!")
assert not res['results']
assert not res['hits']
if isinstance(raw_results, EmptyResults):
return res
assert len(raw_results.grouped) == 1, "Grouping on more than one field is not supported"
res['results'] = results = []
for field_name, field_group in raw_results.grouped.items():
res['hits'] = field_group['ngroups']
res['matches'] = field_group['matches']
for group in field_group['groups']:
if group['groupValue'] is None:
logging.warning("Unexpected NULL grouping", extra={'data': raw_results})
res['hits'] -= 1 # Avoid confusing Haystack with excluded bogon results
continue
results.append(result_class(field_name, group, raw_results=raw_results))
return res
class GroupedSolrEngine(SolrEngine):
backend = GroupedSolrSearchBackend
query = GroupedSearchQuery
| 5,459 | 1,472 | 250 |
cbdab3e6e4d30e8dd46dc3d9430542aa328f9c5e | 743 | py | Python | digits-knn.py | WuzipengYL/2-scikit-learn-intro | afd1011d2fad77e85724f9d12591bc3574d3c51e | [
"Apache-2.0"
] | null | null | null | digits-knn.py | WuzipengYL/2-scikit-learn-intro | afd1011d2fad77e85724f9d12591bc3574d3c51e | [
"Apache-2.0"
] | null | null | null | digits-knn.py | WuzipengYL/2-scikit-learn-intro | afd1011d2fad77e85724f9d12591bc3574d3c51e | [
"Apache-2.0"
] | null | null | null |
## 这里我们用K-近邻估计来判断手写数字的扫描图像来判断数字是多少。
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier
## 导入数据
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
## 拆分训练集和测试集
#X_train = X_digits[:.9 * n_samples]
#y_train = y_digits[:.9 * n_samples]
#X_test = X_digits[.9 * n_samples:]
#y_test = y_digits[.9 * n_samples:]
#上面的代码错了,切片只允许整数,所以正确的代码应该是
X_train = X_digits[:int(.9*1797)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
model = KNeighborsClassifier()
## 训练模型
model.fit(X_train, y_train)
## 进行预测
prediction = model.predict(X_test)
score = model.score(X_test, y_test)
print(score)
| 20.081081 | 50 | 0.732167 |
## 这里我们用K-近邻估计来判断手写数字的扫描图像来判断数字是多少。
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier
## 导入数据
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
## 拆分训练集和测试集
#X_train = X_digits[:.9 * n_samples]
#y_train = y_digits[:.9 * n_samples]
#X_test = X_digits[.9 * n_samples:]
#y_test = y_digits[.9 * n_samples:]
#上面的代码错了,切片只允许整数,所以正确的代码应该是
X_train = X_digits[:int(.9*1797)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
model = KNeighborsClassifier()
## 训练模型
model.fit(X_train, y_train)
## 进行预测
prediction = model.predict(X_test)
score = model.score(X_test, y_test)
print(score)
| 0 | 0 | 0 |
d7ac50a8eb587719728dbff277531c620447d436 | 6,381 | py | Python | DB/Database.py | fefelson/FelsonSports | bc0c16d63b19ffe4d468dcda5ab224013abe23fa | [
"MIT"
] | null | null | null | DB/Database.py | fefelson/FelsonSports | bc0c16d63b19ffe4d468dcda5ab224013abe23fa | [
"MIT"
] | null | null | null | DB/Database.py | fefelson/FelsonSports | bc0c16d63b19ffe4d468dcda5ab224013abe23fa | [
"MIT"
] | null | null | null | import os
import sqlite3
import unicodedata
import tarfile
import json
import shutil
from .. import Environ as ENV
from abc import ABCMeta, abstractmethod
from itertools import chain
from pprint import pprint
################################################################################
################################################################################
checkTableCmd = "SELECT * FROM sqlite_master"
indexCmd = "CREATE INDEX idx_{} ON {} ({})"
updateCmd = "UPDATE {0[table]} SET {0[colCmd]} WHERE {0[pk]} = ?"
################################################################################
################################################################################
class DatabaseManager(metaclass=ABCMeta):
"""
INPUTS ARE NOT VALIDATED HERE
"""
_schema = None
_dbPath = ENV.dbPath
@abstractmethod
@abstractmethod
################################################################################
################################################################################
| 31.126829 | 166 | 0.542548 | import os
import sqlite3
import unicodedata
import tarfile
import json
import shutil
from .. import Environ as ENV
from abc import ABCMeta, abstractmethod
from itertools import chain
from pprint import pprint
################################################################################
################################################################################
def yId(yahooId):
try:
return yahooId.split(".")[-1]
except AttributeError:
return "-1"
def normal(name):
name = unicodedata.normalize("NFD", name)
name = "".join(c for c in name if not unicodedata.combining(c))
return name
checkTableCmd = "SELECT * FROM sqlite_master"
indexCmd = "CREATE INDEX idx_{} ON {} ({})"
updateCmd = "UPDATE {0[table]} SET {0[colCmd]} WHERE {0[pk]} = ?"
################################################################################
################################################################################
class DatabaseManager(metaclass=ABCMeta):
"""
INPUTS ARE NOT VALIDATED HERE
"""
_schema = None
_dbPath = ENV.dbPath
def __init__(self, dbPath):
self.conn = None
self.curs = None
self.dbPath = dbPath
if not os.path.exists(dbPath):
os.makedirs("/".join(self.dbPath.split("/")[:-1]))
self.openDB()
self.create()
self.seed()
self.closeDB()
def create(self):
for line in self._schema:
self.curs.execute(line)
self.conn.commit()
def openDB(self):
self.conn = sqlite3.connect(self.dbPath)
self.curs = self.conn.cursor()
@abstractmethod
def seed(self):
pass
@abstractmethod
def insertGame(self, gameData):
pass
def checkEntry(self, table, item):
pk = self.fetchOne("PRAGMA table_info({})".format(table))[1]
cmd = "SELECT * FROM {} WHERE {} = {}".format(table, pk, item)
self.curs.execute(cmd)
answer = bool(self.curs.fetchone())
return answer
def closeDB(self):
self.cursor.close()
self.conn.close()
def executeCmd(self, cmd, values=[]):
self.curs.execute(cmd, values)
def fetchItem(self, cmd, values=[]):
self.executeCmd(cmd, values)
item = None
try:
item = self.curs.fetchone()[0]
except TypeError:
pass
return item
def fetchOne(self, cmd, values=[]):
self.executeCmd(cmd, values)
return self.curs.fetchone()
def fetchAll(self, cmd, values=[]):
self.executeCmd(cmd, values)
return self.curs.fetchall()
def insert(self, table, *, colNames=None, info=None, values=None ):
insertCmd = "INSERT INTO {0[table]} {0[cols]} VALUES( {0[qMarks]} )"
if info:
values = [info[col] for col in self.getTableCols(table) ]
cols = "({})".format(",".join(colNames)) if colNames else ""
qMarks = ",".join(["?" for x in values])
self.executeCmd(insertCmd.format({"qMarks": qMarks, "table": table, "cols":cols}), values)
def nextKey(self, table):
pk = self.fetchOne("PRAGMA table_info({})".format(table))[1]
keyCmd = "SELECT MAX({}) FROM {}".format(pk, table)
try:
key = int(self.fetchOne(keyCmd)[0]) + 1
except TypeError:
key = 1
return key
def getTableCols(self, table):
return [x[1] for x in self.fetchAll("PRAGMA table_info({})".format(table))]
def getKey(self, table, **kwargs):
pk = self.fetchOne("PRAGMA table_info({})".format(table))[1]
whereCmd = " AND ".join(["{}={}".format(key,value) for key, value in kwargs.items()])
keyCmd = "SELECT {} FROM {} WHERE {}".format(pk, table, whereCmd)
try:
key = self.fetchOne(keyCmd)[0]
except TypeError:
key = self.nextKey(table)
kwargs[pk] = key
self.insert(table, info=kwargs)
return key
def update(self, table, pk, itemId, **kwargs):
colCmd = ", ".join(["{} = {}".format(key, value) for key, value in kwargs.items()])
cmd = updateCmd.format({"table":table, "colCmd":colCmd, "pk":pk})
self.executeCmd(cmd, (itemId,))
def insertPlayers(self):
print("insertPlayers function")
tar = tarfile.open("/home/ededub/FEFelson/{}/players.tar.xz".format(self._abrv))
tar.extractall("/home/ededub/FEFelson/{}/temp".format(self._abrv))
tar.close()
for player in ["/home/ededub/FEFelson/{}/temp/".format(self._abrv)+ x for x in os.listdir("/home/ededub/FEFelson/{}/temp".format(self._abrv))]:
print(player)
with open(player) as fileIn:
info = json.load(fileIn)
self.insertPlayer(info)
[os.remove("/home/ededub/FEFelson/{}/temp/".format(self._abrv)+ x) for x in os.listdir("/home/ededub/FEFelson/{}/temp".format(self._abrv))]
def insertBoxScores(self):
tar = tarfile.open("/home/ededub/FEFelson/{}/boxscores.tar.xz".format(self._abrv))
tar.extractall("/home/ededub/FEFelson/{}/temp".format(self._abrv))
tar.close()
fileList = []
for filePath, _, fileNames in os.walk("/home/ededub/FEFelson/{}/temp".format(self._abrv)):
[fileList.append(filePath+"/"+fileName) for fileName in fileNames if fileName != "scoreboard.json" and fileName[0] != "M"]
for filePath, _, fileNames in os.walk("/home/ededub/FEFelson/{}/{}".format(self._abrv, self._season)):
[fileList.append(filePath+"/"+fileName) for fileName in fileNames if fileName != "scoreboard.json" and fileName != "schedule.json" and fileName[0] != "M"]
for fileName in sorted(fileList, key=lambda x: int(x.split("/")[-1].split(".")[0])):
print(fileName)
with open(fileName) as fileIn:
try:
self.insertGame(json.load(fileIn))
except json.decoder.JSONDecodeError:
pass
[shutil.rmtree("/home/ededub/FEFelson/{}/temp/".format(self._abrv)+ x) for x in os.listdir("/home/ededub/FEFelson/{}/temp".format(self._abrv))]
################################################################################
################################################################################
| 4,783 | 0 | 530 |
afa2e8424b7013cdb4acfc58337a742581335e8a | 667 | py | Python | modulo 1/d004/tipoprimitivo.py | rafa-evangelista/PYTHON | 761ec7e01f1617263bc023a6b82b599a936275ee | [
"MIT"
] | null | null | null | modulo 1/d004/tipoprimitivo.py | rafa-evangelista/PYTHON | 761ec7e01f1617263bc023a6b82b599a936275ee | [
"MIT"
] | null | null | null | modulo 1/d004/tipoprimitivo.py | rafa-evangelista/PYTHON | 761ec7e01f1617263bc023a6b82b599a936275ee | [
"MIT"
] | null | null | null | p = (input('Digite algo: '))
print('A variável ', p, ' é alfabética? ', (p.isalpha()))
print('A variável ', p, ' é numérica? ', (p.isnumeric()))
print('A variável ', p, ' é alfanumérica? ', (p.isalnum()))
print('A variável ', p, ' é dígito? ', (p.isdigit()))
print('A variável ', p, ' é minúscula? ', (p.islower()))
print('A variável ', p, ' é maíuscula? ', (p.isupper()))
print('A variável ', p, ' é um espaço? ', (p.isspace()))
print('A variável ', p, ' é ASCII? ', (p.isascii()))
print('A variável ', p, ' é decimal? ', (p.isdecimal()))
print('A variável ', p, ' é identificador? ', (p.isidentifier()))
print('A variável ', p, ' é imprimível? ', (p.isprintable())) | 55.583333 | 65 | 0.584708 | p = (input('Digite algo: '))
print('A variável ', p, ' é alfabética? ', (p.isalpha()))
print('A variável ', p, ' é numérica? ', (p.isnumeric()))
print('A variável ', p, ' é alfanumérica? ', (p.isalnum()))
print('A variável ', p, ' é dígito? ', (p.isdigit()))
print('A variável ', p, ' é minúscula? ', (p.islower()))
print('A variável ', p, ' é maíuscula? ', (p.isupper()))
print('A variável ', p, ' é um espaço? ', (p.isspace()))
print('A variável ', p, ' é ASCII? ', (p.isascii()))
print('A variável ', p, ' é decimal? ', (p.isdecimal()))
print('A variável ', p, ' é identificador? ', (p.isidentifier()))
print('A variável ', p, ' é imprimível? ', (p.isprintable())) | 0 | 0 | 0 |
552e612284da911df93384aaa96e4ceeb40333db | 274 | py | Python | 06/quad_eq.py | Siketyan/Programming-I | 0749c1ae045d53cd8a67f0de7ab13c26030ddd74 | [
"Apache-2.0"
] | null | null | null | 06/quad_eq.py | Siketyan/Programming-I | 0749c1ae045d53cd8a67f0de7ab13c26030ddd74 | [
"Apache-2.0"
] | null | null | null | 06/quad_eq.py | Siketyan/Programming-I | 0749c1ae045d53cd8a67f0de7ab13c26030ddd74 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# 2次方程式の解を求める
import math
print("2次方程式の係数を入れてください")
a = float(input("a = "))
b = float(input("b = "))
c = float(input("c = "))
d = math.sqrt((b * b) - (4 * a * c))
x1 = (- b + d) / (2 * a)
x2 = (- b - d) / (2 * a)
print("解は %6.2f と %6.2f です。" %(x1, x2))
| 17.125 | 39 | 0.489051 | # coding: utf-8
# 2次方程式の解を求める
import math
print("2次方程式の係数を入れてください")
a = float(input("a = "))
b = float(input("b = "))
c = float(input("c = "))
d = math.sqrt((b * b) - (4 * a * c))
x1 = (- b + d) / (2 * a)
x2 = (- b - d) / (2 * a)
print("解は %6.2f と %6.2f です。" %(x1, x2))
| 0 | 0 | 0 |
0ad14b8593a4e18a694b203872fa03d40ab663c8 | 1,415 | py | Python | armory/baseline_models/keras/resnet50.py | KyleBittner/armory | 7e403d6aa9f8f60f34365de06cd990697a1c5b1c | [
"MIT"
] | 1 | 2021-06-17T23:05:58.000Z | 2021-06-17T23:05:58.000Z | armory/baseline_models/keras/resnet50.py | KyleBittner/armory | 7e403d6aa9f8f60f34365de06cd990697a1c5b1c | [
"MIT"
] | null | null | null | armory/baseline_models/keras/resnet50.py | KyleBittner/armory | 7e403d6aa9f8f60f34365de06cd990697a1c5b1c | [
"MIT"
] | null | null | null | """
ResNet50 CNN model for 244x244x3 image classification
"""
from typing import Optional
import tensorflow as tf
from art.classifiers import KerasClassifier
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.layers import Lambda
from tensorflow.keras.models import Model
IMAGENET_MEANS = [103.939, 116.779, 123.68]
def get_art_model(
model_kwargs: dict, wrapper_kwargs: dict, weights_path: Optional[str] = None
) -> KerasClassifier:
"""
resnet pretrained on Imagenet. The initial layers transform the input from canonical form to the expected input
format for the DenseNet-121.
"""
input = tf.keras.Input(shape=(224, 224, 3))
# Preprocessing layers
img_scaled_to_255 = Lambda(lambda image: image * 255)(input)
# Reorder image channels i.e. img = img[..., ::-1]
img_channel_reorder = Lambda(lambda image: tf.reverse(image, axis=[-1]))(
img_scaled_to_255
)
# Model was trained with inputs zero-centered on ImageNet mean
img_normalized = Lambda(lambda image: image - IMAGENET_MEANS)(img_channel_reorder)
resnet50 = ResNet50(weights=None, input_tensor=img_normalized, **model_kwargs)
model = Model(inputs=input, outputs=resnet50.output)
if weights_path:
model.load_weights(weights_path)
wrapped_model = KerasClassifier(model, clip_values=(0.0, 1.0), **wrapper_kwargs,)
return wrapped_model
| 33.690476 | 115 | 0.737102 | """
ResNet50 CNN model for 244x244x3 image classification
"""
from typing import Optional
import tensorflow as tf
from art.classifiers import KerasClassifier
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.layers import Lambda
from tensorflow.keras.models import Model
IMAGENET_MEANS = [103.939, 116.779, 123.68]
def get_art_model(
model_kwargs: dict, wrapper_kwargs: dict, weights_path: Optional[str] = None
) -> KerasClassifier:
"""
resnet pretrained on Imagenet. The initial layers transform the input from canonical form to the expected input
format for the DenseNet-121.
"""
input = tf.keras.Input(shape=(224, 224, 3))
# Preprocessing layers
img_scaled_to_255 = Lambda(lambda image: image * 255)(input)
# Reorder image channels i.e. img = img[..., ::-1]
img_channel_reorder = Lambda(lambda image: tf.reverse(image, axis=[-1]))(
img_scaled_to_255
)
# Model was trained with inputs zero-centered on ImageNet mean
img_normalized = Lambda(lambda image: image - IMAGENET_MEANS)(img_channel_reorder)
resnet50 = ResNet50(weights=None, input_tensor=img_normalized, **model_kwargs)
model = Model(inputs=input, outputs=resnet50.output)
if weights_path:
model.load_weights(weights_path)
wrapped_model = KerasClassifier(model, clip_values=(0.0, 1.0), **wrapper_kwargs,)
return wrapped_model
| 0 | 0 | 0 |
4b59942824827c6641bf0499d42429470c8035b4 | 93,027 | py | Python | src/python/attic/evenblocks.py | BBN-Q/pyqgl2 | 7acc8b244ee7799c21df03ecff8325e15cdb94d3 | [
"Apache-2.0"
] | 10 | 2017-09-30T14:31:42.000Z | 2021-12-12T07:52:05.000Z | src/python/attic/evenblocks.py | BBN-Q/pyqgl2 | 7acc8b244ee7799c21df03ecff8325e15cdb94d3 | [
"Apache-2.0"
] | 56 | 2017-04-11T15:47:15.000Z | 2019-10-11T15:50:35.000Z | src/python/attic/evenblocks.py | BBN-Q/pyqgl2 | 7acc8b244ee7799c21df03ecff8325e15cdb94d3 | [
"Apache-2.0"
] | 3 | 2019-06-20T07:08:39.000Z | 2020-10-24T19:29:05.000Z | # Copyright 2016 by Raytheon BBN Technologies Corp. All Rights Reserved.
# Note: This code is QGL not QGL2
# This file contains code to replace Barrier instructions with appropriate Id()
# pulses to make channels line up without using a Wait where possible.
# Where not possible, it replaces the Barrier with Sync then WaitSome.
# See replaceBarriers().
'''
Assumptions
* A given function invocation / program has an even # (possibly 0) of Barriers on
each channel
* Given barriers at both start/end of with concur or with inlined,
and no manual additions, this is guaranteed. If the statements in a
function are not in a with-concur then they are treated as
sequential, and barriers will be inserted between sequential statements.
* QGL2 programmers may not manually insert Goto, Call, Return,
BlockLabel, Repeat, LoadRepeat, or Barrier
* Open question: We could relax this if other things hold true, but why? Perhaps you
can write a QGL1 function because QGL2 is not expressive enough?
* Open question: Perhaps you can add Barriers in a sort of export mode?
* LoadRepeat is immediately followed by the BlockLabel we'll repeat to
* That is, this is what QGL2 will do
* We could relax this, but why?
* LoadRepeat value will be integer of at least 2
* That is, this is what QGL2 will do
* The statement block to be repeated (between BlockLabel target of
Repeat & Repeat) does not include a Goto without also including the
BlockLabel target of the Goto
* Note that there may be unused BlockLabels.
* Block to be repeated (between BlockLabel & Repeat) does not include a
Call without including the target BlockLabel and the matching Return
* Call and Goto and Repeat target BlockLabels exist on each relevant
channel's program & are unique (per program)
* Code 'between' Call and Return includes an even # (possibly 0) of
Barriers
* where 'between' follows execution order not order in the sequence
* Note there may be some number of Goto and CMP statements in the middle.
* Code 'between' 2 'paired' Barriers does not include a Call without [its
BlockLabel target, obviously, and] Return
* where 'paired' refers to the indentation level in the source and is
not immediately apparent once 'compiled'; e.g. matching
* there may be other nested barriers in the middle
* A valid QGL2 program calls init() (as currently defined) on all channels that will be used in the program concurrently
* because it includes a global Wait that requires a Sync from all channels before the program can proceed
* Call and Repeat blocks may be nested
* If a Wait or WaitSome waits on a channel, then all those channels will have a matching Wait.
* You cannot wait for a sync from a channel that does not itself Wait
* A Wait or WaitSome must be preceded by a Sync
Some things you cannot assume:
* The BlockLabel target of a Goto is often numerically BEFORE the
Goto; make no assumption about its relative placement
* The BlockLabel target of a Call may be numerically before or after
the Call; make no assumption about its relative placement
* The Return is numerically before or after the BlockLabel target of a
Call; make no assumption about its relative placement
* The Repeat is numerically before or after the LoadRepeat /
BlockLabel target; make no assumption about its relative placement
'''
# Other points
# * There may be diff # of Barriers on diff channels
# * Each barrier has a globally unique Id and list of channels that include this barrier,
# meaning that all those barriers wait on this barrier
# * Wait is like a barrier on all channels.
# * When all channels start with a Barrier, make it a Wait
from QGL.ControlFlow import Goto, Call, Return, LoadRepeat, Repeat, Wait, LoadCmp, Sync, ComparisonInstruction, ControlInstruction
from qgl2.qgl1control import Barrier, WaitSome
from QGL.PulseSequencer import Pulse, CompositePulse, PulseBlock
from QGL.BlockLabel import BlockLabel
from QGL.PulsePrimitives import Id
from QGL.ChannelLibraries import QubitFactory
from copy import copy
import logging
logger = logging.getLogger('QGL.Compiler.qgl2')
# Convenience functions to identify pulse/control elements
def pulseLengths(pulses):
'''QGL1 function to get the length of a pulse, pulseblock,
compositepulse, or list or tuple of such things.'''
# First just try to get/return the length attribute
try:
# This should cover Pulse, CompositePulse, PulseBlock,
# ControlInstruction, BlockLabel - all the usual things
logger.debug("Pulse %s has length %s", pulses, pulses.length)
return pulses.length
except:
pass
# If we get here, it has no length attribute
# Errors
if pulses is None:
raise Exception("pulses was None")
if isinstance(pulses, str):
raise Exception("pulseLengths got string pulses: %r" % pulses)
lenRes = 0
if isinstance(pulses, list) or isinstance(pulses, tuple):
logger.debug("pulses was list: %r", pulses)
if len(pulses) == 0:
logger.debug("pulses was list of length 0")
return 0
for pulse in pulses:
lenRes += pulseLengths(pulse)
return lenRes
# Not a pulse or list of pulses that we know how to handle
# FIXME! Raise some kind of error?
# Or are there pulse like things in there that we should ignore?
logger.warning("Unknown sequence element %s of type %s assumed to have length 0", pulses, type(pulses))
return lenRes
# FIXME: This gets called a bunch. Removing log statements doesn't
# speed it up. What would help?
def markBarrierLengthCalculated(barrierCtr, seqIdx, addLen=float('nan')):
'''Update the barrier object in our 3 data structures
for the given counter, sequence to add the given length
to lengthSince; adding in the length of the Id pulse.
Note that if it becomes a wait, addLen is NAN, and adding that
gives NAN as desired.'''
# To be called for each sequence that this barrier is on
global barriersByCtr, barriersBySeqByPos, barriersBySeqByCtr
logger.debug("markBarrierLength (seq %d) adding to barrier '%s' length: %s", seqIdx, barrierCtr, addLen)
barrier = barriersByCtr.get(barrierCtr, None)
try:
if barrier['lengthCalculated']:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("markBarrier (seq %d) found in barriersByCtr %s already calculated", seqIdx, barrierCtr)
logger.debug("Old dict: {'counter': '%s', 'type': '%s', 'seqIndex': %s, 'lengthSince': %s, 'prevBarrierCtr': '%s', 'lengthCalculated': %s}", barrier['counter'], barrier['type'], barrier['seqIndex'], barrier['lengthSince'], barrier['prevBarrierCtr'], barrier['lengthCalculated'])
elif barrier['seqIndex'] != seqIdx:
# We call this once per sequence so we'll catch this next time through
# logger.debug("markBarrier (seq %d) on '%s' found wrong sequence in barriersByCtr (%d) - skip", seqIdx, barrierCtr, barrier['seqIndex'])
pass
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("markBarrierLength updating barriersByCtr object: {'counter': '%s', 'type': '%s', 'seqIndex': %s, 'lengthSince': %s, 'prevBarrierCtr': '%s', 'lengthCalculated': %s}", barrier['counter'], barrier['type'], barrier['seqIndex'], barrier['lengthSince'], barrier['prevBarrierCtr'], barrier['lengthCalculated'])
barrier['lengthSince'] += addLen
barrier['lengthCalculated'] = True
barriersByCtr[barrierCtr] = barrier
logger.debug(" ... updated length: %s", barriersByCtr[barrierCtr]['lengthSince'])
except:
logger.warning("Barrier '%s' not in barriersByCtr", barrierCtr)
# Update barriersBySeqByCtr
pos = None
try:
barrier = barriersBySeqByCtr[seqIdx][barrierCtr]
pos = barrier['seqPos']
if barrier['lengthCalculated']:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("markBarrier (seq %d) found in barriersBySeqByCtr '%s' already calculated: {'counter': '%s', 'type': '%s', 'seqIndex': %s, 'lengthSince': %s, 'prevBarrierCtr': '%s', 'lengthCalculated': %s}", seqIdx, barrierCtr, barrier['counter'], barrier['type'], barrier['seqIndex'], barrier['lengthSince'], barrier['prevBarrierCtr'], barrier['lengthCalculated'])
else:
barrier['lengthSince'] += addLen
barrier['lengthCalculated'] = True
barriersBySeqByCtr[seqIdx][barrierCtr] = barrier
except:
logger.debug("markBarrier didn't find %s on seq %d", barrierCtr, seqIdx)
# barriersBySeqByPos
barrier = None
try:
barrier = barriersBySeqByPos[seqIdx][pos]
except:
try:
for pos in barriersBySeqByPos[seqIdx]:
if barrierCtr == barriersBySeqByPos[seqIdx][pos]['counter']:
barrier = barriersBySeqByPos[seqIdx][pos]
break
except:
logger.warning("Failed to find barrier %d or sequence %d in barriersBySeqByPos", pos, seqIdx)
try:
if barrier['lengthCalculated']:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("markBarrier (seq %d) found in barriersBySeqByPos '%s' already calculated: {'counter': '%s', 'type': '%s', 'seqIndex': %s, 'seqPos': %s, 'lengthSince': %s, 'prevBarrierCtr': '%s', 'lengthCalculated': %s}", seqIdx, barrierCtr, barrier['counter'], barrier['type'], barrier['seqIndex'], barrier['seqPos'], barrier['lengthSince'], barrier['prevBarrierCtr'], barrier['lengthCalculated'])
else:
barrier['lengthSince'] += addLen
barrier['lengthCalculated'] = True
barriersBySeqByPos[seqIdx][pos] = barrier
except:
# no barrier found
pass
def getBarrierForSeqCtr(seqInd, currCtr):
'''Get the barrier object for the currCtr Barrier
ID for the sequence seqInd, or -1 if not found.'''
global barriersBySeqByCtr
try:
return barriersBySeqByCtr[seqInd].get(currCtr, -1)
except:
if currCtr is None:
logger.debug("getBarrierForSeq got None currCtr")
if not seqInd in barriersBySeqByCtr:
# FIXME: Error?
logger.warning("getBarrierForSeqCtr got seqInd %s not in barriersBySeqByCtr", seqInd)
return -1
# For a wait, could look it up by the index
# But the wait should be on the ByCtr list too
# if str(currCtr).startswith('with-'):
# # It was a with, so this has the index in it
# return barriersBySeqByPos[seqInd].get(int(currCtr[5:]), -1)
# elif currCtr == -1:
# # start - should really be -1, but if the sequence has something, why not?
def getLengthBetweenBarriers(seqInd, currCtr, prevCtr='-1', iterCnt=0):
'''For the given sequence, find the length between the given 2 barriers.
Return float('NaN') if indeterminate.
Recurses up the list of barriers adding up the lengths we previously
calculated for each pair of Barriers.
So if the length between any 2 barriers within that chain are indeterminate,
the whole thing is indeterminate.
'''
# ctr of '-1' means start
# ctr of 'wait-' means a Wait of some kind: Format is 'wait-chans-%s-ctr-%d' % (curBarrier['channels'], curBarrier['waitCount'])
# seqInd is the index of the sequence
import math
if currCtr == prevCtr:
logger.debug("%sgetLengthBetweenBarriers asked for length to self '%s' (0)", " "*iterCnt, currCtr)
return 0
# find the barrier lengths for this channel
# follow the previous pointers, adding lengths
currBarrier = getBarrierForSeqCtr(seqInd, currCtr)
if currBarrier == -1:
logger.debug("%sgetLengthBetweenBarriers from current -1 (start or error), use length 0", " "*iterCnt)
# from start
return 0
logger.debug("%sgetLengthBetweenBarriers: currBarrier: {'counter': '%s', 'type': '%s', 'seqIndex': %s, 'lengthSince': %s, 'prevBarrierCtr': '%s', 'lengthCalculated': %s}", " "*iterCnt, currBarrier['counter'], currBarrier['type'], currBarrier['seqIndex'], currBarrier['lengthSince'], currBarrier['prevBarrierCtr'], currBarrier['lengthCalculated'])
# Basic case: the previous barrier is the one we're looking for
prevBarrierCtr = currBarrier['prevBarrierCtr']
prevLen = currBarrier['lengthSince']
# FIXME: Guard against barrier not having these fields?
if prevBarrierCtr == prevCtr:
logger.debug("%sDesired previous barrier '%s' is actual previous from current '%s', so use stored length: %s", " "*iterCnt, prevCtr, currCtr, prevLen)
return prevLen
if not currBarrier['lengthCalculated'] and iterCnt>0:
logger.warn("%slength from '%s' to '%s' is not reliable cause not calculated", " "*iterCnt, currCtr, prevBarrierCtr)
# Old code stored firstPrev and prevPrev to handle repeat with barrier inside
# But now realize that doesn't make sense; any barrier inside a repeat (if allowed at all)
# must be treated as indetermine / a Wait
# If the length so far is indeterminate, no use in recursing -
# the whole thing will be indeterminate
if math.isnan(prevLen):
logger.debug("%sLength to previous from current '%s' was NaN, return that", " "*iterCnt, currCtr)
return prevLen
logger.debug("%sLength from curr '%s' to prev '%s' will start with length from curr to next '%s': %s", " "*iterCnt, currCtr, prevCtr, prevBarrierCtr, prevLen)
# If this barrier doesn't store the desired length, then recurse
return prevLen + getLengthBetweenBarriers(seqInd, prevBarrierCtr, prevCtr, iterCnt+1)
def isReplaceableBarrier(barrier, seqs):
'''Is the given barrier object replacable on its sequence?
Start, Wait, WaitSome, and barriers that are no longer in
their sequence are not replacable. So only a Barrier() of the correct id (counter).
HOWEVER: We now pretend Wait/WaitSome are replacable so that later we calculate the real length, though
we don't actually do the replacement.
'''
# Is the given barrier something we can replace?
# Not a Wait or WaitSome, and still in its sequence
# return boolean
ind = barrier['seqPos']
nextCtr = barrier['counter']
nextType = barrier['type']
seqInd = barrier['seqIndex']
logger.debug("Checking if barrier '%s' is replaceable: %s", nextCtr, barrier)
if ind < 0:
logger.debug("Barrier '%s' is start, not replaceable", nextCtr)
return False
# 7/8: Don't bail here; so we can calculate the length later
# if nextType in ('wait', 'waitsome'):
# logger.debug("Barrier %s is a wait, not replaceable", nextCtr)
# return False
if seqs:
if not (seqInd >= 0 and seqInd < len(seqs)):
logger.warn("Barrier '%s' claims to be on sequence %d which doesn't exist (can't replace)", nextCtr, seqInd)
return False
if len(seqs[seqInd]) <= ind:
logger.warn("Barrier '%s' claims to be at position %d on sequence %d; the sequence has only %d items (can't replace)", nextCtr, ind, seqInd, len(seqs[seqInd]))
return False
if hasattr(seqs[seqInd][ind], 'value') and seqs[seqInd][ind].value == nextCtr:
# This is a barrier with the desired counter on the proper sequence
return True
if isID(seqs[seqInd][ind]):
# Expected when we've already done a replacement
logger.debug("Barrier '%s' actual element is (now) %s on sequence %d (don't replace)", nextCtr, seqs[seqInd][ind], seqInd)
return False
# 7/8: We want to let it go through if it's a Wait or WaitSome for now
if isWait(seqs[seqInd][ind]) or isWaitSome(seqs[seqInd][ind]):
logger.debug("Barrier '%s' on sequence %d is a Wait or WaitSome - pretend it's replaceable so we calculate the length: %s", nextCtr, seqInd, seqs[seqInd][ind])
return True
if not isBarrier(seqs[seqInd][ind]):
# We don't think we've replaced any barriers with waits, so this is unexpected
logger.debug("Barrier '%s' claims type %s but actual element is (now) %s on sequence %d (not replaceable)", nextCtr, nextType, seqs[seqInd][ind], seqInd)
return False
else:
# It's a barrier but the wrong barrier ID?
logger.warning("Barrier '%s' should be at %d on sequence %d, but instead found %s (can't replace)", nextCtr, ind, seqInd, seqs[seqInd][ind])
return False
return False
def getNextBarrierCtr(seqs, seqInd, currCtr, positions):
''' Find the id (counter) of the next Barrier after currCtr on the given sequence
that we could (still) replace. So skip barriers no longer in the sequence.
positions is sorted indices in sequence seqInd in barriersBySeqByPos.
Return '-1' if there is none.
'''
# Walk to the next barrier past currCtr on sequence seqInd and return the counter of that barrier
# Return '-1' if no more
# This is just iterating over barriers on this channel
# This is for following execution path of a sequence to find
# all the barriers and swap them all
# seqInd is the sequence index
global barriersBySeqByPos, barriersBySeqByCtr
logger.debug("Looking for next barrier to replace on sequence %d after '%s'", seqInd, currCtr)
# Handle case where there's no current - we're looking for the first
if str(currCtr) == '-1':
logger.debug("Looking for 1st barrier on sequence %d", seqInd)
for i in positions:
barrier = barriersBySeqByPos[seqInd][i]
# Make sure that barrier is actually still in the sequence it claims to be in;
# we might have already removed it
if isReplaceableBarrier(barrier, seqs):
# return this ctr
if str(barrier) == '-1':
return '-1'
elif barrier['lengthCalculated']:
logger.debug("... but this barrier length already calculated, continue")
continue
else:
logger.debug("First replaceable barrier on sequence %d: %s\n", seqInd, barrier['counter'])
return barrier['counter']
else:
# logger.debug("%s not (no longer) replaceable", barrier)
# keep looping
continue
# If we get here, there are no replaceable barriers
logger.debug("No (more) replaceable barriers on sequence %d\n", seqInd)
return '-1'
# Find this barrier object in barriersBySeqByCtr or -1
currBarrier = getBarrierForSeqCtr(seqInd, currCtr)
found = False
try:
currPos = currBarrier['seqPos']
for pos in positions:
# Start looking at things after curBarrier
if pos < currPos:
continue
barrier = barriersBySeqByPos[seqInd][pos]
nextCtr = barrier['counter']
# logger.debug("getNextBarrier after '%s' on seq %d: looking at barrier '%s'", currCtr, seqInd, barrier['counter'])
# Could use barriersEqual but we know both use same seqInd and curr uses currCtr
if not found and nextCtr == currCtr:
# If we hadn't yet found the desired barrier but did now, say so
# logger.debug("found current")
if pos != currPos:
logger.warning("Huh? Barrier ctrs are same (%s) but positions in sequence are diff (curr %d != pos %d)", nextCtr, currPos, pos)
found = True
continue
if found:
logger.debug("'%s' is barrier after '%s' on sequence %d", nextCtr, currCtr, seqInd)
# But if we had found it, then the next one we found is next
# NOW....
# Before blindly returning this barrier, see if it is actually still in the sequence
# Make sure that barrier is actually still in the sequence it claims to be in;
# we might have already removed it
if isReplaceableBarrier(barrier, seqs):
# return this ctr
if str(barrier) == '-1':
logger.debug("... returning it as next\n")
return '-1'
elif barrier['lengthCalculated']:
logger.debug("... but this barrier length already calculated, continue")
continue
else:
logger.debug("... returning it as next\n")
return nextCtr
else:
# logger.debug("... but not (any longer) replaceable")
# keep looping
continue
except:
# EG currBarrier was -1
pass
# We didn't find the desired barrier, or else didn't find a next
logger.debug("getNextBarrierCtr failed to find a next (none left?) for sequence %d, after barrier '%s'\n", seqInd, currCtr)
return '-1'
# End getNextBarrierCtr
def barriersEqual(thisB, thatB):
'''Return True iff 2 barrier dictionaries are effectively equal,
i.e. same sequence and same counter.'''
try:
return thisB['hash'] == thatB['hash']
except:
return thisB == thatB
def barrierHash(barrier):
'''Compute a hash for comparing barriers quickly'''
return hash(str(barrier['counter']) + str(barrier['seqIndex']))
def getBarrierChannels(barrierCtr):
'''Return a list of Channel objects whose sequences have this barrier,
and which this Barrier claims it blocks.
For a Wait this will be allChannels.
On error this will be an empty list.
'''
# What channels is this a barrier on?
# Return allChannels or empty list or a list of the channel objects on this barrier
global barriersByCtr, allChannels
if str(barrierCtr) == '-1':# or str(barrierCtr).startswith('wait-'):
# Start will wait on all channels
logger.debug("%s waits on all channels", barrierCtr)
return allChannels
if not barrierCtr in barriersByCtr:
logger.warning("Barrier %s unknown; assume it waits on no channels", barrierCtr)
# FIXME: Could extract the channel that is the seq for the seqInd of this barrier and assume that
return []
return barriersByCtr[barrierCtr]['channels']
def getBarrierIdx(seqInd, barrierCtr):
'''Get the int position index of the given barrier on the given sequence,
or -1 if it is not found.'''
# Get the position index in the sequence with index seqInd for the barrier
# with ID barrierCtr, or -1 if not found
global barriersBySeqByCtr
if not seqInd in barriersBySeqByCtr:
logger.warning("getBarrierIDx: Unknown sequence %d", seqInd)
# Error - unknown channel
return -1
barriers = barriersBySeqByCtr[seqInd]
if barrierCtr in barriers:
return barriers[barrierCtr]['seqPos']
else:
# Error - this channel's sequence doesn't have this barrier
logger.info("Sequence %d doesn't have barrier %s", seqInd, barrierCtr)
return -1
# Note one could also loop thru entries in barriersBySeqByPos and return
# the relevant key when/if we find this barrierCtr
def getLastSharedBarrierCtr(channels, barrierCtr):
'''Find the last Barrier id (counter) before barrierCtr with at least the same set
of channels as barrierCtr. Return '-1' if there is none.'''
# Find the last (before given barrier) barrier shared by the channels on this barrier
# Return its ID (ctr)
# Return '-1' if none (start)
# each barrier has a prevBarrier
# I'm looking for a barrier whose 'channels' is a superset of the given 'channels'
global barriersBySeqByCtr, barriersByCtr
if str(barrierCtr) == '-1' or barrierCtr is None:
# This is the start - all channels have this and there's nothing prior
return '-1'
if not barrierCtr in barriersByCtr:
logger.warning("Barrier '%s' unknown", barrierCtr)
return '-1'
barrier = barriersByCtr[barrierCtr]
# Try to set channels if not given
if not channels:
if not barrier or barrier == -1 or not "channels" in barrier:
logger.warning("Couldn't find channels on barrier '%s'", barrierCtr)
return '-1'
channels = barrier["channels"]
if not channels:
logger.debug("getLastSharedBarrier couldn't find channels for Barrier '%s'", barrierCtr)
return '-1'
startBarrier = barrier
if not startBarrier:
raise Exception("Couldn't find Barrier '%s' in getLastSharedBarrierCtr" % barrierCtr)
# Pick one of the sequences that has this Barrier, basically arbitrarily
seqInd = startBarrier.get('seqIndex', -1)
if seqInd == -1:
for seqI in barriersBySeqByCtr.keys():
if barrierCtr in barriersBySeqByCtr[seqI]:
seqInd = seqI
break
if seqInd == -1:
raise Exception("Couldn't find Barrier '%s' in list by sequence" % barrierCtr)
logger.debug("Using sequence %d to find the last shared barrier", seqInd)
seqBs = barriersBySeqByCtr[seqInd]
channelsSet = set(channels)
prevChannelSet = set()
currBarrier = startBarrier
prevBarrierCtr = barrierCtr
prevBarrier = startBarrier
# Loop up the barrier's previous pointers, looking to see if its channel set contains all the channels
# for this Barrier. We're looking for the first previous barrier that is a supert of the channels
# for this Barrier.
# Unfortunately, Qubits don't seem to have a nice .equals so set comparison (channelsSet <= prevChannelSet) fails.
# Here we rely on the string rep of Qubits being sufficient
while not (all(str(chan) in str(prevChannelSet) for chan in channelsSet)):
#logger.debug("curr %s, prev %s", str(channelsSet), str(prevChannelSet))
currBarrier = prevBarrier
prevBarrierCtr = currBarrier['prevBarrierCtr']
#logger.debug("From barrier %s, prevBarrierCtr: %s", currBarrier['counter'], prevBarrierCtr)
prevBarrier = seqBs.get(prevBarrierCtr, None)
if prevBarrier is None:
logger.warning("Failed to find prev Barrier '%s' on sequence %d in getLastSharedBarrierCtr", prevBarrierCtr, seqInd)
# This would happen if the last shared barrier is the start
return '-1'
if prevBarrierCtr == '-1':
logger.debug("Previous possibly shared barrier is the start")
return prevBarrierCtr
if not prevBarrier or prevBarrier == -1 or barriersEqual(prevBarrier, currBarrier):
logger.warning("Failed to find previous Barrier '%s' on sequence %d in getLastSharedBarrierCtr", prevBarrierCtr, seqInd)
# This would happen if the last shared barrier is the start
return '-1'
prevChannelSet = set(prevBarrier.get('channels', []))
# Look for error where a barrier claims more channels than sequences it is found on
# Typically this is a Wait() that isn't on all channels, or WaitSome
# not on the channels it listed
psc = 0
psIs = []
for sI in barriersBySeqByCtr:
if prevBarrierCtr in barriersBySeqByCtr[sI]:
psc += 1
psIs.append(sI)
if psc != len(prevChannelSet):
# This is an error if we insist all channels share a Wait/WaitSome that waits on those channels
# Our current naming convention for Waits and way of finding matching waits assumes this
logger.error("Candidate prevBarrier '%s' claims %d channels but found on only %d sequences (channels %s but sequences %s)", prevBarrierCtr, len(prevChannelSet), psc, prevChannelSet, psIs)
logger.debug(" currChannelSet: %s; prev %s ChannelSet: %s", channelsSet, prevBarrierCtr, prevChannelSet)
# End of while looking for a prevBarrier with a superset of channels
if all(str(chan) in str(prevChannelSet) for chan in channelsSet):
logger.debug("Found previous barrier '%s' whose channels %s include at least the channels on Barrier '%s': %s (but not using set comparison)", prevBarrierCtr, prevChannelSet, barrierCtr, channelsSet)
# FIXME: Error check that this barrier is in fact on all the right channels?
return prevBarrierCtr
logger.info("Failed to find a common previous barrier to barrier '%s' on channels %s. Use start.", barrierCtr, channels)
return '-1'
def replaceBarrier(seqs, currCtr, prevForLengthCtr, channelIdxs, chanBySeq):
'''Replace Barrier currCtr on sequences with indices channelIdxs into seqs
with the proper Id pulse, or mark this barrier as indeterminate and leave it.
The Id pulse length is the time needed so all channels in channelIdxs take the same
time to get from Barrier prevForLengthCtr to currCtr.
Note the Id() pulse may have 0 length. Later compile_to_hardware drops such empty pulses.
Return the edited sequences.
chanBySeq is a dictionary by sequence ID to the channel object
'''
# Replace the barrier with ID currCtr across all channels
# Note that this function modifies seqs in place
# Note it takes a dict by sequence index to the channel object
# It also takes a list of the sequence indices that are involved in this barrier
import math
# Calculate the length of this segment on each sequence
# Use helper to do the actual calculation
lengths = dict()
logger.debug("replaceBarrier: calculate run length at '%s' since '%s'", currCtr, prevForLengthCtr)
for seqInd in channelIdxs:
seq = seqs[seqInd]
lengths[seqInd] = getLengthBetweenBarriers(seqInd, currCtr, prevForLengthCtr)
logger.debug("... Sequence %d length from curr '%s' to prev '%s': %s", seqInd, currCtr, prevForLengthCtr, lengths[seqInd])
# Find the max (at least 0), or NaN if any are NaN
numlist = list(lengths.values()) + [0]
maxBlockLen = max(numlist)
if any([math.isnan(x) for x in numlist]):
maxBlockLen = float('nan')
# If the block is of indeterminate length then later code
# will replace these Barriers,
# but mark the Barrier objects as having this indeterminate length
# so we don't try to recalculate this length.
indet = False
if math.isnan(maxBlockLen):
# This block is indeterminate
indet = True
logger.info("Wait at Barrier '%s' is indeterminate - later make it a Sync/Wait", currCtr)
for seqInd in channelIdxs:
markBarrierLengthCalculated(currCtr, seqInd, maxBlockLen)
return seqs
logger.debug("From '%s' to '%s': maxBlockLen: %s\n", currCtr, prevForLengthCtr, maxBlockLen)
# For each channel that has this Barrier
# replace the Barrier in the sequence with an Id pulse
# on the proper channel of the length (max-localLength).
# Then record on the Barrier object that we used an Id pulse
# of that length. That way later code to figure out the length
# for an enclosing pair of barriers gets the right answer.
# The Id pulse may be of 0 length.
for seqInd in channelIdxs:
seq = seqs[seqInd]
ind = getBarrierIdx(seqInd, currCtr)
if ind < 0:
raise Exception("Sequence %d doesn't appear to have Barrier '%s'!" % (seqInd, currCtr))
channel = chanBySeq[seqInd]
idlen = maxBlockLen - lengths[seqInd] # Length of Id pulse to pause till last channel done
# 7/8: If this barrier is a wait or waitsome, then don't do the replace, just update the length
barrier = getBarrierForSeqCtr(seqInd, currCtr)
if barrier != -1 and barrier.get('type', 'barrier') not in ('wait', 'waitsome'):
if idlen == 0:
# Instead of creating a new pulse Id(q, length=0) that
# uses memory and just has to get removed later, put
# in a constant - 0, which we remove later. We put in
# something here to keep the indices of barriers from changing.
logger.info("Sequence %d: Removing %s (is Id(length=0))", seqInd, seq[ind])
seq[ind] = 0
else:
logger.info("Sequence %d: Replacing %s with Id(%s, length=%s)\n", seqInd, seq[ind],
channel, idlen)
seq[ind] = Id(channel, idlen)
else:
logger.debug("Sequence %d: NOT replacing %s with Id, but marking it as length=%s\n", seqInd, seq[ind], idlen)
markBarrierLengthCalculated(currCtr, seqInd, idlen)
return seqs
def getPreviousUndoneBarrierCtr(currCtr, prevCtr, seqIdx, iterCnt = 0):
'''Find the previous barrier from currCtr on sequence seqIdx
which is not marked as lengthCalculated.
Return None if none found.
'''
# For the given channel, loop up previous barriers,
# if lengthCalculated==False, return it
# Nominally prevCtr should have lengthCalculated=True,
# But if it didn't, we'd want to do it
global barriersBySeqByCtr
# logger.debug(" %sgetPrevUndoneBarrier asked for last undone from curr '%s' to prev '%s'", " "*iterCnt, currCtr, prevCtr)
if currCtr not in barriersBySeqByCtr[seqIdx]:
raise Exception("Looking for prevUndoneBarrier: Sequence %d didn't have expected barrier '%s'" % (seqIdx, currCtr))
prevBarrier = barriersBySeqByCtr[seqIdx].get(prevCtr, None)
if prevBarrier is None:
raise Exception("getPrevUndone failed to find prev '%s' in sequence %d" % (prevCtr, seqIdx))
if prevBarrier != -1 and not prevBarrier['lengthCalculated']:
logger.debug(" %sgetPrevUndone: prev was not done - so it is last: %s", " "*iterCnt, prevBarrier)
return prevCtr
# prevCtr is done or -1....
barrier = barriersBySeqByCtr[seqIdx][currCtr]
if barrier is None or barrier == -1:
logger.debug(" %sgetPrevUndone: curr was None/-1: %s", " "*iterCnt, currCtr)
return None
if barrier['lengthCalculated']:
# Here we assume that if a barrier is done, then all barriers before it are done
logger.debug(" %sgetPrevUndone: curr is done, so return None. Current: %s", " "*iterCnt, barrier)
return None
# barrier / currCtr is not done, and prev is done or -1
# if currCtr == prevCtr - cannot happen
curPrevCtr = barrier['prevBarrierCtr']
if curPrevCtr == prevCtr:
# Already know that prevCtr is done and currCtr is not
# so if the one before curr is prev, return curr
logger.debug(" %sgetPrevUndone: prev from '%s' is prevCtr '%s', return curr", " "*iterCnt, currCtr, prevCtr)
return currCtr
# If we get here, there's something between curr and prev
curPrevBarrier = barriersBySeqByCtr[seqIdx].get(curPrevCtr, None)
if not curPrevBarrier:
raise Exception("getPrevUndoneBarrier failed to find the barrier that current '%s' says comes before it: '%s'" % (currCtr, curPrevCtr))
if curPrevBarrier != -1 and curPrevBarrier['lengthCalculated']:
# The one before curr is done but curr is not done, so curr is last
logger.debug(" %sgetPrevUndone: prev from '%s' is '%s' which is done, so return curr", " "*iterCnt, currCtr, curPrevCtr)
return currCtr
# the 1 before cur is also not done
# Haven't reached the end, need to iterate
logger.debug(" %sgetPrevUndone: curr's prev '%s' is undone so iterate...", " "*iterCnt, curPrevCtr)
return getPreviousUndoneBarrierCtr(curPrevCtr, prevCtr, seqIdx, iterCnt+1)
def getLastUnMeasuredBarrierCtr(currCtr, prevCtr, seqIdxes):
'''Return the counter/id of the last Barrier on the list of sequences
not already marked as measured (will be WaitSome or know
the Id pulse length)
Return None if all are measured.
'''
# Across all sequences in seqIdxes
# Start at currCtr, work back to prevCtr
# return first barrier not marked as measured
# FIXME: This is a depth first search. So it does not give the latest or earliest
# such barrier, just the first we encounter. Is that OK?
for seqIdx in seqIdxes:
logger.debug("Looking for last unmeasured barrier on sequence %d from barrier '%s' to '%s'", seqIdx, currCtr, prevCtr)
undoneBarrierCtr = getPreviousUndoneBarrierCtr(currCtr, prevCtr, seqIdx)
if undoneBarrierCtr is not None and undoneBarrierCtr != currCtr and undoneBarrierCtr != '-1' and undoneBarrierCtr != prevCtr:
logger.debug(" ... found '%s'", undoneBarrierCtr)
return undoneBarrierCtr
else:
logger.debug(" ... found none ('%s')", undoneBarrierCtr)
return None
def replaceOneBarrier(currCtr, seqIdxToChannelMap, seqs, seqInd = None):
'''Replace the barrier with id currCtr on all sequences.
Use the version of the barrier on the given sequence seqInd if given.
Recursively find intervening Barriers on any related channel that is not
marked as 'measured' (turned into an Id or will be a WaitSome),
and replace those first, so that we can correctly calculate
the length for this Barrier.
Then use the helper replaceBarrier to do the actual replacement.
'''
# Set seqInd and get the Barrier object for the right sequence
# logger.debug("Replacing barrier '%s'", currCtr)
if seqInd is None:
barrier = barriersByCtr[currCtr]
if barrier != -1:
seqInd = barrier['seqIndex']
else:
seqInd = -1
else:
barrier = getBarrierForSeqCtr(seqInd, currCtr)
# Get the set of channels this barrier is on
waitChans = getBarrierChannels(currCtr)
if waitChans == []:
logger.warn("Barrier on no channels? Pretend '%s' is on current sequence %d (channel %s) where we found it", currCtr, seqInd, seqIdxToChannelMap[seqInd].label)
waitChans = [channel]
# Turn those Channel objects into sequence indices
waitSeqIdxes = [ind for ind in seqIdxToChannelMap for chan in waitChans if seqIdxToChannelMap[ind] == chan]
logger.debug("Replacing Barrier '%s' on channels %s, sequences %s", currCtr, waitChans, waitSeqIdxes)
# 7/8: No longer skip these here; let the core replace method do the right thing
# # Skip barriers that are Wait or WaitSome instances
# if barrier != -1 and barrier.get('type', 'barrier') in ('wait', 'waitsome'):
# # do this later
# logger.info("Found wait barrier %s; handle it later", currCtr)
# # It should be true that lengthCalculated==True on this barrier
# # - across all relevant channels. Make sure.
# for idx in waitSeqIdxes:
# markBarrierLengthCalculated(currCtr, idx)
# return seqs
prevForLengthCtr = getLastSharedBarrierCtr(waitChans, currCtr)
for seqInd in waitSeqIdxes:
if prevForLengthCtr not in barriersBySeqByCtr[seqInd]:
raise Exception("When replacing barrier '%s' (%s), found last shared barrier was ID '%s', but it is not on sequence %d (channel %s) that the original barrier is on" % (currCtr, seqs[seqInd][barrier['seqPos']], prevForLengthCtr, seqInd, seqIdxToChannelMap[seqInd]))
if currCtr not in barriersBySeqByCtr[seqInd]:
raise Exception("When replacing barrier '%s' (%s), found it is not on sequence %d (channel %s) when the barrier claimed channels %s" % (currCtr, seqs[barrier['seqIndex']][barrier['seqPos']], seqInd, seqIdxToChannelMap[seqInd], waitChans))
logger.debug("Using length since barrier '%s'", prevForLengthCtr)
logger.debug("Check for any intervening barriers not yet replaced / marked measured....")
# If there are any intervening Barriers not marked as measured on any channel
# (not turned into an Id or identifies as indeterminate)
# then replace them, by recursively calling this function.
# This will recurse to get to the first such barrier when considered
# sequentially, and then pop out until they're all handled.
# Looping here handles the case where there are multiple channels involved.
# We have to replace those earlier barriers, so that we can add things up
# to get the length for this Barrier.
undoneBarrierCtr = getLastUnMeasuredBarrierCtr(currCtr, prevForLengthCtr, waitSeqIdxes)
while undoneBarrierCtr and undoneBarrierCtr != currCtr:
logger.debug("Found undone barrier '%s' to replace first!\n", undoneBarrierCtr)
seqs = replaceOneBarrier(undoneBarrierCtr, seqIdxToChannelMap, seqs)
undoneBarrierCtr = getLastUnMeasuredBarrierCtr(currCtr, prevForLengthCtr, waitSeqIdxes)
logger.debug("Done replacing any intervening barriers\n")
# Now want all the lengths between curr and prev, and figure out min, replace as appropriate
seqs = replaceBarrier(seqs, currCtr, prevForLengthCtr,
waitSeqIdxes, seqIdxToChannelMap)
logger.debug("Done replacing barrier '%s'\n", currCtr)
return seqs
# End of replaceOneBarrier
# TODO
# * Check for logic gaps
# * Improve documentation
# * Raise exceptions don't just log for bad things
# * How do we check that all channels start with sync/wait? Should we?
# * Or related, do we do anything special if all channels start with a shared barrier?
# * What if we discover multiple sequential Waits or WaitSomes on same channels?
# * Can we or should we remove duplicates?
# * Consider passing around data structures instead of making them globals
# * Testing, including
# * 3+ qubits
# * explicit WaitSomes and Waits
# * Barrier that becomes WaitSome inside a call or a repeat or both
# * call or goto that goes backwards or forwards (4 cases)
# * Nested Calls
# * Nested Repeats
# * Nested Barriers
# * Sequences that dont start with a barrier
def replaceBarriers(seqs, seqIdxToChannelMap):
'''
Replace all Barrier() instructions with Sync() and WaitSome() or Id() pulses.
Use WaitSome() if there's some intervening indeterminate length operation,
like a CMP() or LoadCmp().
Otherwise pause using Id on the less busy channels.
This modifies the sequences and returns the updated sequences.
Assumes Barriers list the channels they are on,
and have an ID.
Each Barrier is used exactly once per channel during operation
(or else has guaranteed same length since prior Barrier,
effectively meaning it is a WaitSome).
'''
# Approach:
# Walk through each sequence building up barrier objects
# that record each barrier including length in execution time
# since the last barrier.
# Then walk through barriers replacing them with Id pulses
# where possible.
# Then replace the remaining barriers with WaitSomes
# Barrier objects are kept in 3 dictionaries: by sequence
# by position in the sequence (where each sequence has different
# instance of the object), by sequence by counter (id), and
# independent of sequence by counter (in which case this is
# just one instance of this barrier)
# Each barrier has its sequence, position, channels,
# ID, previous barrier, length since previous barrier
# (float, may be 'nan' meaning it becomes a WaitSome)
# A barrier object that is -1 means the start
# A barrier ID of '-1' means the start
# A wait has a barrier ID of 'wait-chans-%s-ctr-%d' % (curBarrier['channels'], curBarrier['waitCount'])
# * where the channels are sorted, and the waitCount is the count of Waits encountered at runtime with the same channels
# * Note the assumption that if a Wait specifies channels, then all channels have that Wait
# barrier position of -1 is the start
global barriersBySeqByPos, barriersBySeqByCtr, barriersByCtr, allChannels
logger.debug("In replaceBarriers...")
barriersBySeqByPos = dict() # by sequence Index in seqs, by Pos index of element in sequence
barriersBySeqByCtr = dict() # by sequence Index in seqs, by Counter ID of barrier
barriersByCtr = dict() # by Counter ID of barrier
allChannels = sorted([ch for ch in seqIdxToChannelMap.values()], key=lambda chan: repr(chan)) # actual Channel objects sorted alphabeticallyy
startBarrier = dict()
startBarrier['type'] = 'start'
startBarrier['counter'] = '-1' # notional 'start' barrier has counter '-1', pos -1
startBarrier['seqPos'] = -1 # index in sequence
# Have we determined the length of the Id pulse or if this is a WaitSome?
startBarrier['lengthCalculated'] = False
# Walking thru running of this sequence, the length since the last Barrier on this sequence,
# including this element.
# Note that due to other elements on other sequences, this is not the same as the length
# of the resulting Id pulse
startBarrier['lengthSince'] = 0
startBarrier['prevBarrier'] = None
startBarrier['prevBarrierPos'] = -1
startBarrier['prevBarrierCtr'] = '-1'
startBarrier['lengthCalculated'] = True
barriersByCtr['-1'] = startBarrier
# Loop over all sequences
# Walk through each sequence in execution order
# (following Call/Goto/Repeat/Return)
# As we encounter barriers, add them to our data structures
# We'll uses those data structures later to replace the Barriers in the sequences.
# We track BlockLabels as we go, and of course Call and LoadRepeat
for seqInd, seq in enumerate(seqs):
logger.debug("Looking for barriers on Sequence %d", seqInd)
#logger.debug(seq)
barriersBySeqByPos[seqInd] = dict()
barriersBySeqByCtr[seqInd] = dict()
# Put a startBarrier in the front for this channel
startBarrier['seqIndex'] = seqInd
startBarrier['hash'] = barrierHash(startBarrier)
barriersBySeqByPos[seqInd][-1] = startBarrier
barriersBySeqByCtr[seqInd]['-1'] = startBarrier
# Dict of BlockLabel's label (string) to index in sequence
# Used to find the target of Call/Goto/Repeat calls
# This is filled in lazily, as we find the labels.
blockLabels = dict()
# Times the repeat block if any is to be repeated (LIFO stack)
rptCount = []
# Length of sub-block before starting current repeat, if any
rptStartLen = []
# Index into sequence where repeat block starts
rptStartInd = []
# LIFO stack of index where Return (from Call) returns to (point to next thing to run)
retInd = []
# The previous barrier that this barrier will point to
prevBarrierPos = -1
prevBarrier = startBarrier
prevBarrierCtr = '-1'
# The total length through this element, which we'll
# reset at each barrier
curLen = 0
# Is this block between 2 barriers of indeterminate length
nonDet = False
# counter of waits in sequence by channels it waits on
waitsOnChannels = dict()
# index into the sequence of the current element
seqPos = 0
# Now loop over elements in the sequence
# Note that some code blocks will reset seqPos to other points
# to follow Call/Return/Repeat/Goto commands
while seqPos < len(seq):
elem = seq[seqPos]
logger.debug("Examining element at %d: %s", seqPos, elem)
# if the element is a barrier, we save the length since the last barrier and a pointer to that previous barrier
# If it is a CMP, then this block is indeterminate length. Next barrier must say so
# Handle all kinds of barriers by putting them in our data structures
if isBarrier(elem) or isWaitSome(elem) or isWait(elem):
# The current barrier
curBarrier = dict()
curBarrier['type'] = 'barrier'
curBarrier['prevBarrierPos'] = prevBarrierPos
curBarrier['prevBarrierCtr'] = prevBarrierCtr
curBarrier['seqIndex'] = seqInd
curBarrier['seqPos'] = seqPos
if isBarrier(elem):
logger.debug("Next barrier on seq %s is at %d: %s", seqInd, seqPos, elem)
curBarrier['type'] = 'barrier'
chans = list()
for chan in elem.chanlist:
if isinstance(chan, str):
logger.warning("Channel %s on %s was a string", chan, elem)
if chan.startswith('QBIT_'):
chan = chan[5:]
chan = QubitFactory(chan)
chans.append(chan)
# Store the channels sorted alphabetically for later comparison
# Only sort if we have more than 1 channel.
if len(chans) > 1:
curBarrier['channels'] = sorted(chans, key=lambda chan: repr(chan))
else:
curBarrier['channels'] = [chans[0]]
curBarrier['chanKey'] = frozenset(curBarrier['channels'])
curBarrier['counter'] = elem.value
elif isWaitSome(elem):
# This shouldn't really happen I think, but maybe?
# But if previous is a Sync then treat this as a Barrier on its listed channels?
logger.info("Got %s at pos %d?!", elem, seqPos)
curBarrier['type'] = 'waitsome'
chans = list()
for chan in elem.chanlist:
if isinstance(chan, str):
logger.warning("Channel %s on %s was a string", chan, elem)
if chan.startswith('QBIT_'):
chan = chan[5:]
chan = QubitFactory(chan)
chans.append(chan)
# Store the channels sorted alphabetically for later comparison
curBarrier['channels'] = sorted(chans, key=lambda chan: repr(chan))
# Make that a frozenset to use as key in dict
curBarrier['chanKey'] = frozenset(curBarrier['channels'])
if curBarrier['chanKey'] not in waitsOnChannels:
waitsOnChannels[curBarrier['chanKey']] = 0
# Keep counter of # times seen a wait for same channels
# as the 2nd wait on same channels should match a waitsome on
# the other sequences on the same channels
waitsOnChannels[curBarrier['chanKey']] += 1
curBarrier['waitCount'] = waitsOnChannels[curBarrier['chanKey']]
curBarrier['counter'] = 'wait-chans-%s-ctr-%d' % (curBarrier['channels'], curBarrier['waitCount'])
if not isSync(seq[seqPos-1]):
logger.warning("Previous element was not a Sync, but %s", seq[seqPos-1])
elif isWait(elem):
logger.info("Got %s at %d", elem, seqPos)
curBarrier['type'] = 'wait'
curBarrier['channels'] = allChannels
curBarrier['chanKey'] = frozenset(allChannels)
if curBarrier['chanKey'] not in waitsOnChannels:
waitsOnChannels[curBarrier['chanKey']] = 0
waitsOnChannels[curBarrier['chanKey']] += 1
curBarrier['waitCount'] = waitsOnChannels[curBarrier['chanKey']]
curBarrier['counter'] = 'wait-chans-%s-ctr-%d' % (curBarrier['channels'], curBarrier['waitCount'])
if not isSync(seq[seqPos-1]):
if isBarrier(seq[seqPos-1]) and isSync(seq[seqPos-2]):
logger.debug("9/2016: Core inserts a barrier between the sync and wait to ensure sequential")
else:
logger.warning("Previous element was not a Sync, but %s", seq[seqPos-1])
if nonDet:
logger.debug("Marking this barrier as indeterminate length")
curBarrier['lengthSince'] = float('nan')
# For these barriers, we consider we know the length
# So later it's safe to skip over this barrier on other channels;
# getLengthBetweenBarriers will get the nan lengthSince and that's accurate
curBarrier['lengthCalculated'] = True
# Note that for nested blocks the outer barrier will become
# nonDet for the whole block because this one is,
# but the range to the next barrier may not be
nonDet = False # Reset for next block
elif rptCount:
# We're in a Repeat block. The only way that's legal is to treat this as a Wait of some kind
logger.debug("%s is inside a Repeat block; treat as a Wait", elem)
curBarrier['lengthSince'] = float('nan')
# For these barriers, we consider we know the length
# So later it's safe to skip over this barrier on other channels;
# getLengthBetweenBarriers will get the nan lengthSince and that's accurate
curBarrier['lengthCalculated'] = True
else:
curBarrier['lengthSince'] = curLen + pulseLengths(elem)
curBarrier['lengthCalculated'] = False
curBarrier['hash'] = barrierHash(curBarrier)
# logger.debug("This barrier has Channels: %s, length: %s, counter: '%s', prevBarrier: '%s' at index %d", curBarrier['channels'], curBarrier['lengthSince'], curBarrier['counter'], curBarrier['prevBarrierCtr'], curBarrier['prevBarrierPos'])
logger.debug("Barrier dict: %s", curBarrier)
# Store this barrier
barriersByCtr[curBarrier['counter']] = curBarrier
# We used to do a deepcopy here, but a regular shallow copy passes the unit tests.
# If this causes problems later, use pyqgl2.quickcopy.quickcopy
barriersBySeqByPos[seqInd][seqPos] = copy(curBarrier)
barriersBySeqByCtr[seqInd][curBarrier['counter']] = copy(curBarrier)
# Reset vars for next barrier block
prevBarrier = curBarrier
prevBarrierCtr = curBarrier['counter']
prevBarrierPos = seqPos
# the length counter starts at 0 for the next block
curLen = 0
# Move to the next element in the sequence
seqPos += 1
continue
# CMP
# Note this is a nonDeterminate block
if isCMP(elem) or isLoadCmp(elem):
logger.info("Indeterminate length block on sequence %d; has %s at %d", seqInd, elem, seqPos)
nonDet = True
seqPos += 1
curLen += pulseLengths(elem)
continue
# LoadRepeat
# Goes with a later Repeat(<label>) call
# That <label> could nominally be anywhere, but QGL2 puts it on the line after the LoadRepeat
# Note that you can nominally nest Repeat blocks, so we keep a LIFO stack of rptCounts.
# Inside the block of code to be repeated, you can't in general have a Barrier; the elements
# to that barrier are different between the first and later times through the loop, so the Barrier
# is 2 different Id blocks so it isn't a repeat.
# Exceptions: If the element right before the block is a barrier, and the last element in the block
# is a Barrier, or you otherwise construct things carefully, then the block is the same length.
# Or else if the Barrier is a WaitSome (on both 1st and later times through the loop), then it is an identical
# Pulse.
# Put another way: you can only use a given Barrier in a single way in any channel.
# However there's another issue with a Barrier in a Repeat block: each channel for that Barrier must use the barrier
# the same # of times, and in the same way, such that it makes sense to line up the barrier.
if isLoadRepeat(elem):
if elem.value < 1:
logger.warning("Sequence %d at %d got %s with value %d: Treat as 1", seqInd, seqPos, elem, elem.value)
elem.value = 1
logger.debug("Found %s at index %d. Length so far: %f", elem, seqPos, curLen)
rptCount.append(elem.value)
# Guess that the repeat will want to go to line after LoadRepeat - if not, we'll start looking there
# for the proper destination
rptStartInd.append(seqPos+1)
curLen += pulseLengths(elem)
# Store the length of this block up through this element.
# That way when we hit the Repeat block, we can potentially calculate the length of the block being repeated,
# and just add it, without having to re-walk
# Here we assume that the repeat block in fact starts at the next element after this LoadRepeat
rptStartLen.append(curLen)
seqPos += 1
continue
# See note above on Repeat blocks.
# This code allows target of repeat to be anywhere.
# It guesses that the Repeat goes to the line after LoadRepeat (as guessed above).
# It does nothing special about intervening Barriers; elsewhere we ensure they are Waits not Ids
# When we get here, we've already added to curlen the result of doing this repeat block once
if isRepeat(elem):
curLen += pulseLengths(elem)
if not rptCount:
# FIXME: Ignore instead? Use NodeError?
raise Exception("Sequence %d got %s at %d without a LoadRepeat" % (seqInd, elem, seqPos))
# Get the # of times left to repeat
rc = rptCount[-1] - 1
logger.debug("Found %s at index %d. Remaining repeats: %d", elem, seqPos, rc)
# If there are no more repeats, move on
if rc <= 0:
# Just finished last time through the loop
# Clear all the repeat variables
rptCount.pop()
while len(rptStartInd) > len(rptCount):
rptStartInd.pop()
while len(rptStartLen) > len(rptCount):
rptStartLen.pop()
# Move on to the next element
seqPos += 1
continue
# If we get here, we need to repeat that block at least once
# Update the repeats remaining counter
rptCount[-1] = rc
# Do blockLabels comparison by label
target = elem.target
if isBlockLabel(target):
target = target.label
# Find proper start index
realRptStartInd = -1
if target in blockLabels:
realRptStartInd = blockLabels[target]
logger.debug("Found Repeat target in cache at %d", realRptStartInd)
else:
# Loop thru rest of seq to find the target. Then loop thru start of seq to here
found = False
for posNext in range(seqPos, len(seq)):
if isBlockLabel(seq[posNext]) and seq[posNext].label == target:
blockLabels[target] = posNext
realRptStartInd = posNext
found = True
logger.debug("Found Repeat target in rest of sequence at %d", realRptStartInd)
break
if not found:
for posNext in range(0, seqPos):
if isBlockLabel(seq[posNext]) and seq[posNext].label == target:
blockLabels[target] = posNext
realRptStartInd = posNext
found = True
logger.debug("Found target in first part of sequence at %d", realRptStartInd)
break
if not found:
raise Exception("Sequence %d at %d: Failed to find %s target '%s'" % (seqInd, seqPos, elem, elem.target))
# If the start of the repeat block is same as that in rptStartInd,
# then use curlen-rptStartLen as length of block to repeat.
# Mutiply that by rc and add to curlen
# Then clear the rpt LIFOs and move on
if rptStartInd[-1] == realRptStartInd:
# We guessed correctly where to start repeat from
rs = rptStartLen.pop()
rptAdd = (curLen - rs) * rc
logger.debug("Stashed startElemInd matches target. Finish by adding (curlen %f - startLen %f) * repeatsToGo %d = %f", curLen, rs, rc, rptAdd)
curLen += rptAdd
# Just finished last time through the loop
# Clear all the repeat variables
rptCount.pop()
while len(rptStartInd) > len(rptCount):
rptStartInd.pop()
while len(rptStartLen) > len(rptCount):
rptStartLen.pop()
# Move on to the next element
seqPos += 1
continue
else:
# If the 2 are different, then reset rptStartInd to what we found, reset repeatStartLen to curlen, set seqPos to rptStartInd
logger.debug("Repeat started at %d for target %s, not guessed %d; going back", realRptStartInd, elem.target, rptStartInd[-1])
# We already reset the repeat counter properly
# Reset the startInd to be the proper one
# And reset the length for before the repeat to the length to this point
rptStartLen[-1] = curLen
rptStartInd[-1] = realRptStartInd
# Then continue from that starting point
seqPos = realRptStartInd
continue
# End of handling Repeat
# Handle Return
# This jumps back to the last Call
# Note that Call/Return blocks could be nested,
# and due to a Goto, the 'Return' may go to something later in the sequence.
# Note that if there is a Barrier inside a Call block, what does it mean to hit that Barrier
# twice? As in the comment above for Repeat, unless things are identical across channels,
# how do you line up the Barriers?
# So in general, a Barrier, including one inside a Call block, should only be called once
if isReturn(elem):
# Should have seen a previous call
# NOTE: If there was a barrier inside that block, we better only have called this block once
curLen += pulseLengths(elem)
if not retInd:
raise Exception("Sequence %d at %d: Have no saved index to go back to for %s" % (seqInd, seqPos, elem))
ri = retInd.pop()
logger.debug("Got %s: Returning to saved index %d", elem, ri)
seqPos = ri
continue
# Handle Call() or Goto()
# Both take a BlockLabel to jump to.
# Call() requires stashing the index after it to return to.
if isCall(elem) or isGoto(elem):
logger.debug("Got %s at %d - will jump to %s", elem, seqPos, elem.target)
# Call is slightly special
if isCall(elem):
if seqPos+1 == len(seq):
# The return would try to go to something off the end. That won't work.
raise Exception("Call() is last element in sequence %d: %s" % (seqInd, elem))
logger.debug("Next Return will go to %d", seqPos+1)
# stash away seqPos+1 as the next place to return
retInd.append(seqPos+1)
# Do BlockLabels comparison by label
target = elem.target
if isBlockLabel(target):
target = target.label
curLen += pulseLengths(elem)
if target in blockLabels:
seqPos = blockLabels[target]
logger.debug("Found target in cache at %d", seqPos)
continue
# Didn't find the target yet. Look.
# Loop thru rest of seq. Then loop thru start of seq to here
found = False
for posNext in range(seqPos, len(seq)):
if isBlockLabel(seq[posNext]) and seq[posNext].label == target:
blockLabels[target] = posNext
seqPos = posNext
found = True
logger.debug("Found target in rest of sequence at %d", seqPos)
break
if found:
continue
for posNext in range(0, seqPos):
if isBlockLabel(seq[posNext]) and seq[posNext].label == target:
blockLabels[target] = posNext
seqPos = posNext
found = True
logger.debug("Found target in first part of sequence at %d", seqPos)
break
if found:
continue
else:
raise Exception("Sequence %d at %d: Failed to find %s target '%s'" % (seqInd, seqPos, elem, elem.target))
# BlockLabel
# Save where this was in case it's a target for a Call/Goto/Repeat,
# otherwise it's a normal element.
if isBlockLabel(elem):
curLen += pulseLengths(elem)
# Stash away that label in case some Call/Goto/Repeat needs it
blockLabels[elem.label] = seqPos
seqPos += 1
continue
# Default
elen = pulseLengths(elem)
logger.debug("'%s' is a normal element - add its length (%s) and move on", elem, elen)
curLen += elen
seqPos += 1
continue
# Done looking at elements in this sequence
logger.debug("Done looking for Barriers on sequence %d\n", seqInd)
# Now we'll move to the next channel
# End of loop over channels
logger.debug("Done looking for Barriers on all sequences\n")
# At this point we've looked at every element in every sequence, adding up the lengths.
# This data is saved away in barrier objects for use to use next.
if logger.isEnabledFor(logging.DEBUG):
logger.debug("barriersBySeqByCtr: ")
for seqInd in sorted(barriersBySeqByCtr):
logger.debug(" Seq %d:", seqInd)
for ctr in barriersBySeqByCtr[seqInd]:
logger.debug(" %s: %s", ctr, barriersBySeqByCtr[seqInd][ctr])
logger.debug("barriersBySeqByPos: ")
for seqInd in sorted(barriersBySeqByPos):
logger.debug(" Seq %d:", seqInd)
for pos in sorted(barriersBySeqByPos[seqInd]):
logger.debug(" %s: %s", pos, barriersBySeqByPos[seqInd][pos])
logger.debug("barriersByCtr: ")
for ctr in barriersByCtr:
logger.debug(" %s: %s", ctr, barriersByCtr[ctr])
logger.debug("")
# And here is the main code to use the above functions and replace all barriers.
# First we replace Barriers that turn into Id pulses
# - because thats a 1 for 1 replacement (doesn't screw up saved indices)
# For each sequence, start at the 'start' barrier and go to 'next'
# where the next is a Barrier that is still in the sequence
# and not marked as already calculated (if still there but marked
# calculated it is turning into a WaitSome)
# When we have such a barrier, replace it.
# Note however that replacing a Barrier potentially requires
# first replacing some other barriers on other channels.
# EG if for q1 B3 links up with q3 and the last common is B0, but B1 and B2 are only on
# the q3 program, then for q3 to get the length up to B3,
# it needs to do B1 and B2 first.
# so replaceOneBarrier hides that recursion.
for seqInd in seqIdxToChannelMap.keys():
currCtr = '-1'
logger.debug("Handling Barriers on sequence %d", seqInd)
positions = []
try:
positions = sorted(barriersBySeqByPos[seqInd])
except Exception as e:
logger.warning("Failed to get sorted list of indices: Got %s", e)
currCtr = getNextBarrierCtr(seqs, seqInd, currCtr, positions)
while (currCtr != '-1'): # While there's another barrier
logger.info("Replacing Barrier '%s' found on sequence %d", currCtr, seqInd)
# replace that barrier, plus any other barriers (on other channels)
# necessary to calculate the length of the Id pulse here
seqs = replaceOneBarrier(currCtr, seqIdxToChannelMap, seqs, seqInd)
# Move on to the next barrier
currCtr = getNextBarrierCtr(seqs, seqInd, currCtr, positions)
# When we get here, we ran out of barriers that turn into Id pulses to replace
logger.debug("Done swapping non Wait Barriers\n")
# Now change any remaining barriers into Sync/WaitSome pairs
for seqInd in barriersBySeqByPos:
if seqInd < 0 or seqInd >= len(seqs):
logger.warn("No such channel %d?", seqInd)
continue
logger.debug("Swapping remaining Barriers on sequence %d with Sync/WaitSome", seqInd)
seq = seqs[seqInd]
# Count how many of these swaps we've done
# Because that's the # of extra elements we're inserting, so it is the
# amount that our saved indices are off
swapCnt = 0
# loop over our previous stash of barriers on this sequence
positions = sorted(barriersBySeqByPos[seqInd])
for barrierInd in positions:
if barrierInd < 0:
# skip the fake start
continue
# The new actual index of this element in the sequence
bInd = barrierInd + swapCnt
barrier = barriersBySeqByPos[seqInd][barrierInd]
bType = barrier.get('type', 'barrier')
bChannels = getBarrierChannels(barrier['counter'])
if bInd >= len(seq):
logger.debug("swapCnt: %d, seqLen: %d", swapCnt, len(seq))
raise Exception("Calculated index of barrier %s (%d, was originally %d) is past end of sequence %d" % (barrier['counter'], bInd, barrierInd, seqInd))
# Make sure it's a barrier still
if isBarrier(seq[bInd]):
swapCnt += 1
if bType == 'wait' or bChannels == allChannels:
logger.info("Replacing sequence %d index %d (%s) with Sync();Wait()", seqInd, bInd, seq[bInd])
# Replace
seqs[seqInd] = seq[:bInd] + [Sync(), Wait()] + seq[bInd+1:]
else:
logger.info("Replacing sequence %d index %d (%s) with Sync(); WaitSome(%s)", seqInd, bInd, seq[bInd], bChannels)
seqs[seqInd] = seq[:bInd] + [Sync(), WaitSome(bChannels)] + seq[bInd+1:]
else:
# This is common / expected
logger.debug("Spot %d (was %d) in sequence %d (channel %s) not (no longer) a barrier, but: %s", bInd, barrierInd, seqInd, seqIdxToChannelMap[seqInd], seq[bInd])
continue
logger.debug("Swapped %d barriers on sequence %d\n", swapCnt, seqInd)
# Now to next sequence
# Done swapping remaining barriers for Sync/Waits
# Now all Barriers should be gone.
# Now we can also remove the stub "0" entries for Id(q, length=0) pulses
for sidx, seq in enumerate(seqs):
newSeq = []
for idx in range(len(seq)):
if isBarrier(seq[idx]):
logger.warn("Sequence %d still has %s at %d - remove!", sidx,
seq[idx], idx)
elif seq[idx] == 0:
logger.debug("Removing placeholder 0 for Id(0) in sequence %d at %d", sidx, idx)
else:
newSeq.append(seq[idx])
seqs[sidx] = newSeq
# Debug: Print final sequences
if logger.isEnabledFor(logging.DEBUG):
logger.debug(" ")
logger.debug("Final QGL1 sequences:")
for sidx, seq in enumerate(seqs):
logger.debug("Sequence %d", sidx)
for idx in range(len(seq)):
logger.debug(" %d: %s", idx, seq[idx])
logger.debug(" ")
logger.debug("Done replacing Barriers\n")
return seqs
# End of replaceBarriers
#####
# Test code below - for unit testing code in this file.
if __name__ == '__main__':
from QGL.Compiler import find_unique_channels
from QGL.Channels import Qubit as qgl1Qubit
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
def repeatBarriers():
# This no longer works as we currently don't allow barriers inside repeat blocks
'''
for i in 1,2
with concur
for q in q1,q2
X(q)
LoadRepeat 2
loopstart
BARRIER - remove?
X(q)
BARRIER - becomes Id
Repeat(loopstart)
'''
from QGL.ChannelLibrary import QubitFactory
from QGL.BlockLabel import BlockLabel
from qgl2.qgl1control import Barrier
from QGL.ControlFlow import Sync
from QGL.ControlFlow import Wait
from QGL.PulsePrimitives import Id
from QGL.PulsePrimitives import MEAS
from QGL.PulsePrimitives import X
from QGL.PulsePrimitives import Y
q1 = QubitFactory('q1')
QBIT_q1 = q1
q2 = QubitFactory('q2')
QBIT_q2 = q2
q3 = QubitFactory('q3')
QBIT_q3 = q3
seqs = list()
seq = [
LoadRepeat(2),
BlockLabel('loopstart1'),
Barrier(),
X(q1, length=0.1),
Barrier(),
Repeat(BlockLabel('loopstart1')),
Barrier() # Including this causes error cause we see the Repeat without LoadRepeat
]
seqs += [seq]
seq = [
LoadRepeat(2),
BlockLabel('loopstart2'),
Barrier(),
X(q2, length=0.2),
Barrier(),
Repeat(BlockLabel('loopstart2')),
Barrier()
]
seqs += [seq]
return seqs
# Basic 3 qubits not all doing same stuff / diff # barriers
# seqs = testFunc()
# 2 qubits with a repeat inside, doing same stuff
# seqs = testFunc2()
# 2 qubits doing same thing
# Call inside a barrier
# which has barriers inside, and does a call itself
# seqs = testCall()
# test Repeats including nested repeats
# seqs = testRepeat()
# test explicit waits
# seqs = testWait()
# test explicit WaitSomes
# seqs = testWaitSome()
# test WaitSomes and Cmp/LoadCmp
seqs = testCMP()
logger.info("Seqs: \n%s", printSeqs(seqs))
seqIdxToChannelMap = dict()
for idx, seq in enumerate(seqs):
chs = find_unique_channels(seq)
for ch in chs:
# FIXME: Or just exclude Measurement channels?
if isinstance(ch, qgl1Qubit):
seqIdxToChannelMap[idx] = ch
logger.debug("Sequence %d is channel %s", idx, ch)
break
# Hack: skip the empty sequence(s) now before doing anything else
useseqs = list()
decr = 0 # How much to decrement the index
toDecr = dict() # Map of old index to amount to decrement
for idx, seq in enumerate(seqs):
if idx not in seqIdxToChannelMap:
# Indicates an error - that empty sequence
logger.debug("Sequence %d has no channel - skip", idx)
decr = decr+1
continue
if decr:
toDecr[idx] = decr
logger.debug("Will shift index of sequence %d by %d", idx, decr)
useseqs.append(seq)
seqs = useseqs
if decr:
newmap = dict()
for ind in seqIdxToChannelMap:
if ind in toDecr:
newmap[ind-decr] = seqIdxToChannelMap[ind]
logger.debug("Sequence %d (channel %s) is now sequence %d", ind, seqIdxToChannelMap[ind], ind-decr)
elif ind in seqIdxToChannelMap:
logger.debug("Sequence %d keeping map to %s", ind, seqIdxToChannelMap[ind])
newmap[ind] = seqIdxToChannelMap[ind]
else:
logger.debug("Dropping (empty) sequence %d", ind)
seqIdxToChannelMap = newmap
logger.info("Seqs just before replace:\n%s", printSeqs(seqs))
seqs = replaceBarriers(seqs, seqIdxToChannelMap)
logger.info("Seqs after replace: \n%s", printSeqs(seqs))
| 46.374377 | 411 | 0.602331 | # Copyright 2016 by Raytheon BBN Technologies Corp. All Rights Reserved.
# Note: This code is QGL not QGL2
# This file contains code to replace Barrier instructions with appropriate Id()
# pulses to make channels line up without using a Wait where possible.
# Where not possible, it replaces the Barrier with Sync then WaitSome.
# See replaceBarriers().
'''
Assumptions
* A given function invocation / program has an even # (possibly 0) of Barriers on
each channel
* Given barriers at both start/end of with concur or with inlined,
and no manual additions, this is guaranteed. If the statements in a
function are not in a with-concur then they are treated as
sequential, and barriers will be inserted between sequential statements.
* QGL2 programmers may not manually insert Goto, Call, Return,
BlockLabel, Repeat, LoadRepeat, or Barrier
* Open question: We could relax this if other things hold true, but why? Perhaps you
can write a QGL1 function because QGL2 is not expressive enough?
* Open question: Perhaps you can add Barriers in a sort of export mode?
* LoadRepeat is immediately followed by the BlockLabel we'll repeat to
* That is, this is what QGL2 will do
* We could relax this, but why?
* LoadRepeat value will be integer of at least 2
* That is, this is what QGL2 will do
* The statement block to be repeated (between BlockLabel target of
Repeat & Repeat) does not include a Goto without also including the
BlockLabel target of the Goto
* Note that there may be unused BlockLabels.
* Block to be repeated (between BlockLabel & Repeat) does not include a
Call without including the target BlockLabel and the matching Return
* Call and Goto and Repeat target BlockLabels exist on each relevant
channel's program & are unique (per program)
* Code 'between' Call and Return includes an even # (possibly 0) of
Barriers
* where 'between' follows execution order not order in the sequence
* Note there may be some number of Goto and CMP statements in the middle.
* Code 'between' 2 'paired' Barriers does not include a Call without [its
BlockLabel target, obviously, and] Return
* where 'paired' refers to the indentation level in the source and is
not immediately apparent once 'compiled'; e.g. matching
* there may be other nested barriers in the middle
* A valid QGL2 program calls init() (as currently defined) on all channels that will be used in the program concurrently
* because it includes a global Wait that requires a Sync from all channels before the program can proceed
* Call and Repeat blocks may be nested
* If a Wait or WaitSome waits on a channel, then all those channels will have a matching Wait.
* You cannot wait for a sync from a channel that does not itself Wait
* A Wait or WaitSome must be preceded by a Sync
Some things you cannot assume:
* The BlockLabel target of a Goto is often numerically BEFORE the
Goto; make no assumption about its relative placement
* The BlockLabel target of a Call may be numerically before or after
the Call; make no assumption about its relative placement
* The Return is numerically before or after the BlockLabel target of a
Call; make no assumption about its relative placement
* The Repeat is numerically before or after the LoadRepeat /
BlockLabel target; make no assumption about its relative placement
'''
# Other points
# * There may be diff # of Barriers on diff channels
# * Each barrier has a globally unique Id and list of channels that include this barrier,
# meaning that all those barriers wait on this barrier
# * Wait is like a barrier on all channels.
# * When all channels start with a Barrier, make it a Wait
from QGL.ControlFlow import Goto, Call, Return, LoadRepeat, Repeat, Wait, LoadCmp, Sync, ComparisonInstruction, ControlInstruction
from qgl2.qgl1control import Barrier, WaitSome
from QGL.PulseSequencer import Pulse, CompositePulse, PulseBlock
from QGL.BlockLabel import BlockLabel
from QGL.PulsePrimitives import Id
from QGL.ChannelLibraries import QubitFactory
from copy import copy
import logging
logger = logging.getLogger('QGL.Compiler.qgl2')
# Convenience functions to identify pulse/control elements
def isWait(pulse): return isinstance(pulse, Wait)
def isWaitSome(pulse): return isinstance(pulse, WaitSome)
def isBarrier(pulse): return isinstance(pulse, Barrier)
def isSync(pulse): return isinstance(pulse, Sync)
def isCMP(pulse): return isinstance(pulse, ComparisonInstruction)
def isLoadCmp(pulse): return isinstance(pulse, LoadCmp)
def isID(pulse): return (isinstance(pulse, Pulse) and pulse.label == "Id")
def isReturn(pulse): return isinstance(pulse, Return)
def isLoadRepeat(pulse): return isinstance(pulse, LoadRepeat)
def isRepeat(pulse): return isinstance(pulse, Repeat)
def isGoto(pulse): return isinstance(pulse, Goto)
def isCall(pulse): return isinstance(pulse, Call)
def isBlockLabel(pulse): return isinstance(pulse, BlockLabel)
def pulseLengths(pulses):
'''QGL1 function to get the length of a pulse, pulseblock,
compositepulse, or list or tuple of such things.'''
# First just try to get/return the length attribute
try:
# This should cover Pulse, CompositePulse, PulseBlock,
# ControlInstruction, BlockLabel - all the usual things
logger.debug("Pulse %s has length %s", pulses, pulses.length)
return pulses.length
except:
pass
# If we get here, it has no length attribute
# Errors
if pulses is None:
raise Exception("pulses was None")
if isinstance(pulses, str):
raise Exception("pulseLengths got string pulses: %r" % pulses)
lenRes = 0
if isinstance(pulses, list) or isinstance(pulses, tuple):
logger.debug("pulses was list: %r", pulses)
if len(pulses) == 0:
logger.debug("pulses was list of length 0")
return 0
for pulse in pulses:
lenRes += pulseLengths(pulse)
return lenRes
# Not a pulse or list of pulses that we know how to handle
# FIXME! Raise some kind of error?
# Or are there pulse like things in there that we should ignore?
logger.warning("Unknown sequence element %s of type %s assumed to have length 0", pulses, type(pulses))
return lenRes
# FIXME: This gets called a bunch. Removing log statements doesn't
# speed it up. What would help?
def markBarrierLengthCalculated(barrierCtr, seqIdx, addLen=float('nan')):
'''Update the barrier object in our 3 data structures
for the given counter, sequence to add the given length
to lengthSince; adding in the length of the Id pulse.
Note that if it becomes a wait, addLen is NAN, and adding that
gives NAN as desired.'''
# To be called for each sequence that this barrier is on
global barriersByCtr, barriersBySeqByPos, barriersBySeqByCtr
logger.debug("markBarrierLength (seq %d) adding to barrier '%s' length: %s", seqIdx, barrierCtr, addLen)
barrier = barriersByCtr.get(barrierCtr, None)
try:
if barrier['lengthCalculated']:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("markBarrier (seq %d) found in barriersByCtr %s already calculated", seqIdx, barrierCtr)
logger.debug("Old dict: {'counter': '%s', 'type': '%s', 'seqIndex': %s, 'lengthSince': %s, 'prevBarrierCtr': '%s', 'lengthCalculated': %s}", barrier['counter'], barrier['type'], barrier['seqIndex'], barrier['lengthSince'], barrier['prevBarrierCtr'], barrier['lengthCalculated'])
elif barrier['seqIndex'] != seqIdx:
# We call this once per sequence so we'll catch this next time through
# logger.debug("markBarrier (seq %d) on '%s' found wrong sequence in barriersByCtr (%d) - skip", seqIdx, barrierCtr, barrier['seqIndex'])
pass
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("markBarrierLength updating barriersByCtr object: {'counter': '%s', 'type': '%s', 'seqIndex': %s, 'lengthSince': %s, 'prevBarrierCtr': '%s', 'lengthCalculated': %s}", barrier['counter'], barrier['type'], barrier['seqIndex'], barrier['lengthSince'], barrier['prevBarrierCtr'], barrier['lengthCalculated'])
barrier['lengthSince'] += addLen
barrier['lengthCalculated'] = True
barriersByCtr[barrierCtr] = barrier
logger.debug(" ... updated length: %s", barriersByCtr[barrierCtr]['lengthSince'])
except:
logger.warning("Barrier '%s' not in barriersByCtr", barrierCtr)
# Update barriersBySeqByCtr
pos = None
try:
barrier = barriersBySeqByCtr[seqIdx][barrierCtr]
pos = barrier['seqPos']
if barrier['lengthCalculated']:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("markBarrier (seq %d) found in barriersBySeqByCtr '%s' already calculated: {'counter': '%s', 'type': '%s', 'seqIndex': %s, 'lengthSince': %s, 'prevBarrierCtr': '%s', 'lengthCalculated': %s}", seqIdx, barrierCtr, barrier['counter'], barrier['type'], barrier['seqIndex'], barrier['lengthSince'], barrier['prevBarrierCtr'], barrier['lengthCalculated'])
else:
barrier['lengthSince'] += addLen
barrier['lengthCalculated'] = True
barriersBySeqByCtr[seqIdx][barrierCtr] = barrier
except:
logger.debug("markBarrier didn't find %s on seq %d", barrierCtr, seqIdx)
# barriersBySeqByPos
barrier = None
try:
barrier = barriersBySeqByPos[seqIdx][pos]
except:
try:
for pos in barriersBySeqByPos[seqIdx]:
if barrierCtr == barriersBySeqByPos[seqIdx][pos]['counter']:
barrier = barriersBySeqByPos[seqIdx][pos]
break
except:
logger.warning("Failed to find barrier %d or sequence %d in barriersBySeqByPos", pos, seqIdx)
try:
if barrier['lengthCalculated']:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("markBarrier (seq %d) found in barriersBySeqByPos '%s' already calculated: {'counter': '%s', 'type': '%s', 'seqIndex': %s, 'seqPos': %s, 'lengthSince': %s, 'prevBarrierCtr': '%s', 'lengthCalculated': %s}", seqIdx, barrierCtr, barrier['counter'], barrier['type'], barrier['seqIndex'], barrier['seqPos'], barrier['lengthSince'], barrier['prevBarrierCtr'], barrier['lengthCalculated'])
else:
barrier['lengthSince'] += addLen
barrier['lengthCalculated'] = True
barriersBySeqByPos[seqIdx][pos] = barrier
except:
# no barrier found
pass
def getBarrierForSeqCtr(seqInd, currCtr):
'''Get the barrier object for the currCtr Barrier
ID for the sequence seqInd, or -1 if not found.'''
global barriersBySeqByCtr
try:
return barriersBySeqByCtr[seqInd].get(currCtr, -1)
except:
if currCtr is None:
logger.debug("getBarrierForSeq got None currCtr")
if not seqInd in barriersBySeqByCtr:
# FIXME: Error?
logger.warning("getBarrierForSeqCtr got seqInd %s not in barriersBySeqByCtr", seqInd)
return -1
# For a wait, could look it up by the index
# But the wait should be on the ByCtr list too
# if str(currCtr).startswith('with-'):
# # It was a with, so this has the index in it
# return barriersBySeqByPos[seqInd].get(int(currCtr[5:]), -1)
# elif currCtr == -1:
# # start - should really be -1, but if the sequence has something, why not?
def getLengthBetweenBarriers(seqInd, currCtr, prevCtr='-1', iterCnt=0):
'''For the given sequence, find the length between the given 2 barriers.
Return float('NaN') if indeterminate.
Recurses up the list of barriers adding up the lengths we previously
calculated for each pair of Barriers.
So if the length between any 2 barriers within that chain are indeterminate,
the whole thing is indeterminate.
'''
# ctr of '-1' means start
# ctr of 'wait-' means a Wait of some kind: Format is 'wait-chans-%s-ctr-%d' % (curBarrier['channels'], curBarrier['waitCount'])
# seqInd is the index of the sequence
import math
if currCtr == prevCtr:
logger.debug("%sgetLengthBetweenBarriers asked for length to self '%s' (0)", " "*iterCnt, currCtr)
return 0
# find the barrier lengths for this channel
# follow the previous pointers, adding lengths
currBarrier = getBarrierForSeqCtr(seqInd, currCtr)
if currBarrier == -1:
logger.debug("%sgetLengthBetweenBarriers from current -1 (start or error), use length 0", " "*iterCnt)
# from start
return 0
logger.debug("%sgetLengthBetweenBarriers: currBarrier: {'counter': '%s', 'type': '%s', 'seqIndex': %s, 'lengthSince': %s, 'prevBarrierCtr': '%s', 'lengthCalculated': %s}", " "*iterCnt, currBarrier['counter'], currBarrier['type'], currBarrier['seqIndex'], currBarrier['lengthSince'], currBarrier['prevBarrierCtr'], currBarrier['lengthCalculated'])
# Basic case: the previous barrier is the one we're looking for
prevBarrierCtr = currBarrier['prevBarrierCtr']
prevLen = currBarrier['lengthSince']
# FIXME: Guard against barrier not having these fields?
if prevBarrierCtr == prevCtr:
logger.debug("%sDesired previous barrier '%s' is actual previous from current '%s', so use stored length: %s", " "*iterCnt, prevCtr, currCtr, prevLen)
return prevLen
if not currBarrier['lengthCalculated'] and iterCnt>0:
logger.warn("%slength from '%s' to '%s' is not reliable cause not calculated", " "*iterCnt, currCtr, prevBarrierCtr)
# Old code stored firstPrev and prevPrev to handle repeat with barrier inside
# But now realize that doesn't make sense; any barrier inside a repeat (if allowed at all)
# must be treated as indetermine / a Wait
# If the length so far is indeterminate, no use in recursing -
# the whole thing will be indeterminate
if math.isnan(prevLen):
logger.debug("%sLength to previous from current '%s' was NaN, return that", " "*iterCnt, currCtr)
return prevLen
logger.debug("%sLength from curr '%s' to prev '%s' will start with length from curr to next '%s': %s", " "*iterCnt, currCtr, prevCtr, prevBarrierCtr, prevLen)
# If this barrier doesn't store the desired length, then recurse
return prevLen + getLengthBetweenBarriers(seqInd, prevBarrierCtr, prevCtr, iterCnt+1)
def isReplaceableBarrier(barrier, seqs):
'''Is the given barrier object replacable on its sequence?
Start, Wait, WaitSome, and barriers that are no longer in
their sequence are not replacable. So only a Barrier() of the correct id (counter).
HOWEVER: We now pretend Wait/WaitSome are replacable so that later we calculate the real length, though
we don't actually do the replacement.
'''
# Is the given barrier something we can replace?
# Not a Wait or WaitSome, and still in its sequence
# return boolean
ind = barrier['seqPos']
nextCtr = barrier['counter']
nextType = barrier['type']
seqInd = barrier['seqIndex']
logger.debug("Checking if barrier '%s' is replaceable: %s", nextCtr, barrier)
if ind < 0:
logger.debug("Barrier '%s' is start, not replaceable", nextCtr)
return False
# 7/8: Don't bail here; so we can calculate the length later
# if nextType in ('wait', 'waitsome'):
# logger.debug("Barrier %s is a wait, not replaceable", nextCtr)
# return False
if seqs:
if not (seqInd >= 0 and seqInd < len(seqs)):
logger.warn("Barrier '%s' claims to be on sequence %d which doesn't exist (can't replace)", nextCtr, seqInd)
return False
if len(seqs[seqInd]) <= ind:
logger.warn("Barrier '%s' claims to be at position %d on sequence %d; the sequence has only %d items (can't replace)", nextCtr, ind, seqInd, len(seqs[seqInd]))
return False
if hasattr(seqs[seqInd][ind], 'value') and seqs[seqInd][ind].value == nextCtr:
# This is a barrier with the desired counter on the proper sequence
return True
if isID(seqs[seqInd][ind]):
# Expected when we've already done a replacement
logger.debug("Barrier '%s' actual element is (now) %s on sequence %d (don't replace)", nextCtr, seqs[seqInd][ind], seqInd)
return False
# 7/8: We want to let it go through if it's a Wait or WaitSome for now
if isWait(seqs[seqInd][ind]) or isWaitSome(seqs[seqInd][ind]):
logger.debug("Barrier '%s' on sequence %d is a Wait or WaitSome - pretend it's replaceable so we calculate the length: %s", nextCtr, seqInd, seqs[seqInd][ind])
return True
if not isBarrier(seqs[seqInd][ind]):
# We don't think we've replaced any barriers with waits, so this is unexpected
logger.debug("Barrier '%s' claims type %s but actual element is (now) %s on sequence %d (not replaceable)", nextCtr, nextType, seqs[seqInd][ind], seqInd)
return False
else:
# It's a barrier but the wrong barrier ID?
logger.warning("Barrier '%s' should be at %d on sequence %d, but instead found %s (can't replace)", nextCtr, ind, seqInd, seqs[seqInd][ind])
return False
return False
def getNextBarrierCtr(seqs, seqInd, currCtr, positions):
''' Find the id (counter) of the next Barrier after currCtr on the given sequence
that we could (still) replace. So skip barriers no longer in the sequence.
positions is sorted indices in sequence seqInd in barriersBySeqByPos.
Return '-1' if there is none.
'''
# Walk to the next barrier past currCtr on sequence seqInd and return the counter of that barrier
# Return '-1' if no more
# This is just iterating over barriers on this channel
# This is for following execution path of a sequence to find
# all the barriers and swap them all
# seqInd is the sequence index
global barriersBySeqByPos, barriersBySeqByCtr
logger.debug("Looking for next barrier to replace on sequence %d after '%s'", seqInd, currCtr)
# Handle case where there's no current - we're looking for the first
if str(currCtr) == '-1':
logger.debug("Looking for 1st barrier on sequence %d", seqInd)
for i in positions:
barrier = barriersBySeqByPos[seqInd][i]
# Make sure that barrier is actually still in the sequence it claims to be in;
# we might have already removed it
if isReplaceableBarrier(barrier, seqs):
# return this ctr
if str(barrier) == '-1':
return '-1'
elif barrier['lengthCalculated']:
logger.debug("... but this barrier length already calculated, continue")
continue
else:
logger.debug("First replaceable barrier on sequence %d: %s\n", seqInd, barrier['counter'])
return barrier['counter']
else:
# logger.debug("%s not (no longer) replaceable", barrier)
# keep looping
continue
# If we get here, there are no replaceable barriers
logger.debug("No (more) replaceable barriers on sequence %d\n", seqInd)
return '-1'
# Find this barrier object in barriersBySeqByCtr or -1
currBarrier = getBarrierForSeqCtr(seqInd, currCtr)
found = False
try:
currPos = currBarrier['seqPos']
for pos in positions:
# Start looking at things after curBarrier
if pos < currPos:
continue
barrier = barriersBySeqByPos[seqInd][pos]
nextCtr = barrier['counter']
# logger.debug("getNextBarrier after '%s' on seq %d: looking at barrier '%s'", currCtr, seqInd, barrier['counter'])
# Could use barriersEqual but we know both use same seqInd and curr uses currCtr
if not found and nextCtr == currCtr:
# If we hadn't yet found the desired barrier but did now, say so
# logger.debug("found current")
if pos != currPos:
logger.warning("Huh? Barrier ctrs are same (%s) but positions in sequence are diff (curr %d != pos %d)", nextCtr, currPos, pos)
found = True
continue
if found:
logger.debug("'%s' is barrier after '%s' on sequence %d", nextCtr, currCtr, seqInd)
# But if we had found it, then the next one we found is next
# NOW....
# Before blindly returning this barrier, see if it is actually still in the sequence
# Make sure that barrier is actually still in the sequence it claims to be in;
# we might have already removed it
if isReplaceableBarrier(barrier, seqs):
# return this ctr
if str(barrier) == '-1':
logger.debug("... returning it as next\n")
return '-1'
elif barrier['lengthCalculated']:
logger.debug("... but this barrier length already calculated, continue")
continue
else:
logger.debug("... returning it as next\n")
return nextCtr
else:
# logger.debug("... but not (any longer) replaceable")
# keep looping
continue
except:
# EG currBarrier was -1
pass
# We didn't find the desired barrier, or else didn't find a next
logger.debug("getNextBarrierCtr failed to find a next (none left?) for sequence %d, after barrier '%s'\n", seqInd, currCtr)
return '-1'
# End getNextBarrierCtr
def barriersEqual(thisB, thatB):
'''Return True iff 2 barrier dictionaries are effectively equal,
i.e. same sequence and same counter.'''
try:
return thisB['hash'] == thatB['hash']
except:
return thisB == thatB
def barrierHash(barrier):
'''Compute a hash for comparing barriers quickly'''
return hash(str(barrier['counter']) + str(barrier['seqIndex']))
def getBarrierChannels(barrierCtr):
'''Return a list of Channel objects whose sequences have this barrier,
and which this Barrier claims it blocks.
For a Wait this will be allChannels.
On error this will be an empty list.
'''
# What channels is this a barrier on?
# Return allChannels or empty list or a list of the channel objects on this barrier
global barriersByCtr, allChannels
if str(barrierCtr) == '-1':# or str(barrierCtr).startswith('wait-'):
# Start will wait on all channels
logger.debug("%s waits on all channels", barrierCtr)
return allChannels
if not barrierCtr in barriersByCtr:
logger.warning("Barrier %s unknown; assume it waits on no channels", barrierCtr)
# FIXME: Could extract the channel that is the seq for the seqInd of this barrier and assume that
return []
return barriersByCtr[barrierCtr]['channels']
def getBarrierIdx(seqInd, barrierCtr):
'''Get the int position index of the given barrier on the given sequence,
or -1 if it is not found.'''
# Get the position index in the sequence with index seqInd for the barrier
# with ID barrierCtr, or -1 if not found
global barriersBySeqByCtr
if not seqInd in barriersBySeqByCtr:
logger.warning("getBarrierIDx: Unknown sequence %d", seqInd)
# Error - unknown channel
return -1
barriers = barriersBySeqByCtr[seqInd]
if barrierCtr in barriers:
return barriers[barrierCtr]['seqPos']
else:
# Error - this channel's sequence doesn't have this barrier
logger.info("Sequence %d doesn't have barrier %s", seqInd, barrierCtr)
return -1
# Note one could also loop thru entries in barriersBySeqByPos and return
# the relevant key when/if we find this barrierCtr
def getLastSharedBarrierCtr(channels, barrierCtr):
'''Find the last Barrier id (counter) before barrierCtr with at least the same set
of channels as barrierCtr. Return '-1' if there is none.'''
# Find the last (before given barrier) barrier shared by the channels on this barrier
# Return its ID (ctr)
# Return '-1' if none (start)
# each barrier has a prevBarrier
# I'm looking for a barrier whose 'channels' is a superset of the given 'channels'
global barriersBySeqByCtr, barriersByCtr
if str(barrierCtr) == '-1' or barrierCtr is None:
# This is the start - all channels have this and there's nothing prior
return '-1'
if not barrierCtr in barriersByCtr:
logger.warning("Barrier '%s' unknown", barrierCtr)
return '-1'
barrier = barriersByCtr[barrierCtr]
# Try to set channels if not given
if not channels:
if not barrier or barrier == -1 or not "channels" in barrier:
logger.warning("Couldn't find channels on barrier '%s'", barrierCtr)
return '-1'
channels = barrier["channels"]
if not channels:
logger.debug("getLastSharedBarrier couldn't find channels for Barrier '%s'", barrierCtr)
return '-1'
startBarrier = barrier
if not startBarrier:
raise Exception("Couldn't find Barrier '%s' in getLastSharedBarrierCtr" % barrierCtr)
# Pick one of the sequences that has this Barrier, basically arbitrarily
seqInd = startBarrier.get('seqIndex', -1)
if seqInd == -1:
for seqI in barriersBySeqByCtr.keys():
if barrierCtr in barriersBySeqByCtr[seqI]:
seqInd = seqI
break
if seqInd == -1:
raise Exception("Couldn't find Barrier '%s' in list by sequence" % barrierCtr)
logger.debug("Using sequence %d to find the last shared barrier", seqInd)
seqBs = barriersBySeqByCtr[seqInd]
channelsSet = set(channels)
prevChannelSet = set()
currBarrier = startBarrier
prevBarrierCtr = barrierCtr
prevBarrier = startBarrier
# Loop up the barrier's previous pointers, looking to see if its channel set contains all the channels
# for this Barrier. We're looking for the first previous barrier that is a supert of the channels
# for this Barrier.
# Unfortunately, Qubits don't seem to have a nice .equals so set comparison (channelsSet <= prevChannelSet) fails.
# Here we rely on the string rep of Qubits being sufficient
while not (all(str(chan) in str(prevChannelSet) for chan in channelsSet)):
#logger.debug("curr %s, prev %s", str(channelsSet), str(prevChannelSet))
currBarrier = prevBarrier
prevBarrierCtr = currBarrier['prevBarrierCtr']
#logger.debug("From barrier %s, prevBarrierCtr: %s", currBarrier['counter'], prevBarrierCtr)
prevBarrier = seqBs.get(prevBarrierCtr, None)
if prevBarrier is None:
logger.warning("Failed to find prev Barrier '%s' on sequence %d in getLastSharedBarrierCtr", prevBarrierCtr, seqInd)
# This would happen if the last shared barrier is the start
return '-1'
if prevBarrierCtr == '-1':
logger.debug("Previous possibly shared barrier is the start")
return prevBarrierCtr
if not prevBarrier or prevBarrier == -1 or barriersEqual(prevBarrier, currBarrier):
logger.warning("Failed to find previous Barrier '%s' on sequence %d in getLastSharedBarrierCtr", prevBarrierCtr, seqInd)
# This would happen if the last shared barrier is the start
return '-1'
prevChannelSet = set(prevBarrier.get('channels', []))
# Look for error where a barrier claims more channels than sequences it is found on
# Typically this is a Wait() that isn't on all channels, or WaitSome
# not on the channels it listed
psc = 0
psIs = []
for sI in barriersBySeqByCtr:
if prevBarrierCtr in barriersBySeqByCtr[sI]:
psc += 1
psIs.append(sI)
if psc != len(prevChannelSet):
# This is an error if we insist all channels share a Wait/WaitSome that waits on those channels
# Our current naming convention for Waits and way of finding matching waits assumes this
logger.error("Candidate prevBarrier '%s' claims %d channels but found on only %d sequences (channels %s but sequences %s)", prevBarrierCtr, len(prevChannelSet), psc, prevChannelSet, psIs)
logger.debug(" currChannelSet: %s; prev %s ChannelSet: %s", channelsSet, prevBarrierCtr, prevChannelSet)
# End of while looking for a prevBarrier with a superset of channels
if all(str(chan) in str(prevChannelSet) for chan in channelsSet):
logger.debug("Found previous barrier '%s' whose channels %s include at least the channels on Barrier '%s': %s (but not using set comparison)", prevBarrierCtr, prevChannelSet, barrierCtr, channelsSet)
# FIXME: Error check that this barrier is in fact on all the right channels?
return prevBarrierCtr
logger.info("Failed to find a common previous barrier to barrier '%s' on channels %s. Use start.", barrierCtr, channels)
return '-1'
def replaceBarrier(seqs, currCtr, prevForLengthCtr, channelIdxs, chanBySeq):
'''Replace Barrier currCtr on sequences with indices channelIdxs into seqs
with the proper Id pulse, or mark this barrier as indeterminate and leave it.
The Id pulse length is the time needed so all channels in channelIdxs take the same
time to get from Barrier prevForLengthCtr to currCtr.
Note the Id() pulse may have 0 length. Later compile_to_hardware drops such empty pulses.
Return the edited sequences.
chanBySeq is a dictionary by sequence ID to the channel object
'''
# Replace the barrier with ID currCtr across all channels
# Note that this function modifies seqs in place
# Note it takes a dict by sequence index to the channel object
# It also takes a list of the sequence indices that are involved in this barrier
import math
# Calculate the length of this segment on each sequence
# Use helper to do the actual calculation
lengths = dict()
logger.debug("replaceBarrier: calculate run length at '%s' since '%s'", currCtr, prevForLengthCtr)
for seqInd in channelIdxs:
seq = seqs[seqInd]
lengths[seqInd] = getLengthBetweenBarriers(seqInd, currCtr, prevForLengthCtr)
logger.debug("... Sequence %d length from curr '%s' to prev '%s': %s", seqInd, currCtr, prevForLengthCtr, lengths[seqInd])
# Find the max (at least 0), or NaN if any are NaN
numlist = list(lengths.values()) + [0]
maxBlockLen = max(numlist)
if any([math.isnan(x) for x in numlist]):
maxBlockLen = float('nan')
# If the block is of indeterminate length then later code
# will replace these Barriers,
# but mark the Barrier objects as having this indeterminate length
# so we don't try to recalculate this length.
indet = False
if math.isnan(maxBlockLen):
# This block is indeterminate
indet = True
logger.info("Wait at Barrier '%s' is indeterminate - later make it a Sync/Wait", currCtr)
for seqInd in channelIdxs:
markBarrierLengthCalculated(currCtr, seqInd, maxBlockLen)
return seqs
logger.debug("From '%s' to '%s': maxBlockLen: %s\n", currCtr, prevForLengthCtr, maxBlockLen)
# For each channel that has this Barrier
# replace the Barrier in the sequence with an Id pulse
# on the proper channel of the length (max-localLength).
# Then record on the Barrier object that we used an Id pulse
# of that length. That way later code to figure out the length
# for an enclosing pair of barriers gets the right answer.
# The Id pulse may be of 0 length.
for seqInd in channelIdxs:
seq = seqs[seqInd]
ind = getBarrierIdx(seqInd, currCtr)
if ind < 0:
raise Exception("Sequence %d doesn't appear to have Barrier '%s'!" % (seqInd, currCtr))
channel = chanBySeq[seqInd]
idlen = maxBlockLen - lengths[seqInd] # Length of Id pulse to pause till last channel done
# 7/8: If this barrier is a wait or waitsome, then don't do the replace, just update the length
barrier = getBarrierForSeqCtr(seqInd, currCtr)
if barrier != -1 and barrier.get('type', 'barrier') not in ('wait', 'waitsome'):
if idlen == 0:
# Instead of creating a new pulse Id(q, length=0) that
# uses memory and just has to get removed later, put
# in a constant - 0, which we remove later. We put in
# something here to keep the indices of barriers from changing.
logger.info("Sequence %d: Removing %s (is Id(length=0))", seqInd, seq[ind])
seq[ind] = 0
else:
logger.info("Sequence %d: Replacing %s with Id(%s, length=%s)\n", seqInd, seq[ind],
channel, idlen)
seq[ind] = Id(channel, idlen)
else:
logger.debug("Sequence %d: NOT replacing %s with Id, but marking it as length=%s\n", seqInd, seq[ind], idlen)
markBarrierLengthCalculated(currCtr, seqInd, idlen)
return seqs
def getPreviousUndoneBarrierCtr(currCtr, prevCtr, seqIdx, iterCnt = 0):
'''Find the previous barrier from currCtr on sequence seqIdx
which is not marked as lengthCalculated.
Return None if none found.
'''
# For the given channel, loop up previous barriers,
# if lengthCalculated==False, return it
# Nominally prevCtr should have lengthCalculated=True,
# But if it didn't, we'd want to do it
global barriersBySeqByCtr
# logger.debug(" %sgetPrevUndoneBarrier asked for last undone from curr '%s' to prev '%s'", " "*iterCnt, currCtr, prevCtr)
if currCtr not in barriersBySeqByCtr[seqIdx]:
raise Exception("Looking for prevUndoneBarrier: Sequence %d didn't have expected barrier '%s'" % (seqIdx, currCtr))
prevBarrier = barriersBySeqByCtr[seqIdx].get(prevCtr, None)
if prevBarrier is None:
raise Exception("getPrevUndone failed to find prev '%s' in sequence %d" % (prevCtr, seqIdx))
if prevBarrier != -1 and not prevBarrier['lengthCalculated']:
logger.debug(" %sgetPrevUndone: prev was not done - so it is last: %s", " "*iterCnt, prevBarrier)
return prevCtr
# prevCtr is done or -1....
barrier = barriersBySeqByCtr[seqIdx][currCtr]
if barrier is None or barrier == -1:
logger.debug(" %sgetPrevUndone: curr was None/-1: %s", " "*iterCnt, currCtr)
return None
if barrier['lengthCalculated']:
# Here we assume that if a barrier is done, then all barriers before it are done
logger.debug(" %sgetPrevUndone: curr is done, so return None. Current: %s", " "*iterCnt, barrier)
return None
# barrier / currCtr is not done, and prev is done or -1
# if currCtr == prevCtr - cannot happen
curPrevCtr = barrier['prevBarrierCtr']
if curPrevCtr == prevCtr:
# Already know that prevCtr is done and currCtr is not
# so if the one before curr is prev, return curr
logger.debug(" %sgetPrevUndone: prev from '%s' is prevCtr '%s', return curr", " "*iterCnt, currCtr, prevCtr)
return currCtr
# If we get here, there's something between curr and prev
curPrevBarrier = barriersBySeqByCtr[seqIdx].get(curPrevCtr, None)
if not curPrevBarrier:
raise Exception("getPrevUndoneBarrier failed to find the barrier that current '%s' says comes before it: '%s'" % (currCtr, curPrevCtr))
if curPrevBarrier != -1 and curPrevBarrier['lengthCalculated']:
# The one before curr is done but curr is not done, so curr is last
logger.debug(" %sgetPrevUndone: prev from '%s' is '%s' which is done, so return curr", " "*iterCnt, currCtr, curPrevCtr)
return currCtr
# the 1 before cur is also not done
# Haven't reached the end, need to iterate
logger.debug(" %sgetPrevUndone: curr's prev '%s' is undone so iterate...", " "*iterCnt, curPrevCtr)
return getPreviousUndoneBarrierCtr(curPrevCtr, prevCtr, seqIdx, iterCnt+1)
def getLastUnMeasuredBarrierCtr(currCtr, prevCtr, seqIdxes):
'''Return the counter/id of the last Barrier on the list of sequences
not already marked as measured (will be WaitSome or know
the Id pulse length)
Return None if all are measured.
'''
# Across all sequences in seqIdxes
# Start at currCtr, work back to prevCtr
# return first barrier not marked as measured
# FIXME: This is a depth first search. So it does not give the latest or earliest
# such barrier, just the first we encounter. Is that OK?
for seqIdx in seqIdxes:
logger.debug("Looking for last unmeasured barrier on sequence %d from barrier '%s' to '%s'", seqIdx, currCtr, prevCtr)
undoneBarrierCtr = getPreviousUndoneBarrierCtr(currCtr, prevCtr, seqIdx)
if undoneBarrierCtr is not None and undoneBarrierCtr != currCtr and undoneBarrierCtr != '-1' and undoneBarrierCtr != prevCtr:
logger.debug(" ... found '%s'", undoneBarrierCtr)
return undoneBarrierCtr
else:
logger.debug(" ... found none ('%s')", undoneBarrierCtr)
return None
def replaceOneBarrier(currCtr, seqIdxToChannelMap, seqs, seqInd = None):
'''Replace the barrier with id currCtr on all sequences.
Use the version of the barrier on the given sequence seqInd if given.
Recursively find intervening Barriers on any related channel that is not
marked as 'measured' (turned into an Id or will be a WaitSome),
and replace those first, so that we can correctly calculate
the length for this Barrier.
Then use the helper replaceBarrier to do the actual replacement.
'''
# Set seqInd and get the Barrier object for the right sequence
# logger.debug("Replacing barrier '%s'", currCtr)
if seqInd is None:
barrier = barriersByCtr[currCtr]
if barrier != -1:
seqInd = barrier['seqIndex']
else:
seqInd = -1
else:
barrier = getBarrierForSeqCtr(seqInd, currCtr)
# Get the set of channels this barrier is on
waitChans = getBarrierChannels(currCtr)
if waitChans == []:
logger.warn("Barrier on no channels? Pretend '%s' is on current sequence %d (channel %s) where we found it", currCtr, seqInd, seqIdxToChannelMap[seqInd].label)
waitChans = [channel]
# Turn those Channel objects into sequence indices
waitSeqIdxes = [ind for ind in seqIdxToChannelMap for chan in waitChans if seqIdxToChannelMap[ind] == chan]
logger.debug("Replacing Barrier '%s' on channels %s, sequences %s", currCtr, waitChans, waitSeqIdxes)
# 7/8: No longer skip these here; let the core replace method do the right thing
# # Skip barriers that are Wait or WaitSome instances
# if barrier != -1 and barrier.get('type', 'barrier') in ('wait', 'waitsome'):
# # do this later
# logger.info("Found wait barrier %s; handle it later", currCtr)
# # It should be true that lengthCalculated==True on this barrier
# # - across all relevant channels. Make sure.
# for idx in waitSeqIdxes:
# markBarrierLengthCalculated(currCtr, idx)
# return seqs
prevForLengthCtr = getLastSharedBarrierCtr(waitChans, currCtr)
for seqInd in waitSeqIdxes:
if prevForLengthCtr not in barriersBySeqByCtr[seqInd]:
raise Exception("When replacing barrier '%s' (%s), found last shared barrier was ID '%s', but it is not on sequence %d (channel %s) that the original barrier is on" % (currCtr, seqs[seqInd][barrier['seqPos']], prevForLengthCtr, seqInd, seqIdxToChannelMap[seqInd]))
if currCtr not in barriersBySeqByCtr[seqInd]:
raise Exception("When replacing barrier '%s' (%s), found it is not on sequence %d (channel %s) when the barrier claimed channels %s" % (currCtr, seqs[barrier['seqIndex']][barrier['seqPos']], seqInd, seqIdxToChannelMap[seqInd], waitChans))
logger.debug("Using length since barrier '%s'", prevForLengthCtr)
logger.debug("Check for any intervening barriers not yet replaced / marked measured....")
# If there are any intervening Barriers not marked as measured on any channel
# (not turned into an Id or identifies as indeterminate)
# then replace them, by recursively calling this function.
# This will recurse to get to the first such barrier when considered
# sequentially, and then pop out until they're all handled.
# Looping here handles the case where there are multiple channels involved.
# We have to replace those earlier barriers, so that we can add things up
# to get the length for this Barrier.
undoneBarrierCtr = getLastUnMeasuredBarrierCtr(currCtr, prevForLengthCtr, waitSeqIdxes)
while undoneBarrierCtr and undoneBarrierCtr != currCtr:
logger.debug("Found undone barrier '%s' to replace first!\n", undoneBarrierCtr)
seqs = replaceOneBarrier(undoneBarrierCtr, seqIdxToChannelMap, seqs)
undoneBarrierCtr = getLastUnMeasuredBarrierCtr(currCtr, prevForLengthCtr, waitSeqIdxes)
logger.debug("Done replacing any intervening barriers\n")
# Now want all the lengths between curr and prev, and figure out min, replace as appropriate
seqs = replaceBarrier(seqs, currCtr, prevForLengthCtr,
waitSeqIdxes, seqIdxToChannelMap)
logger.debug("Done replacing barrier '%s'\n", currCtr)
return seqs
# End of replaceOneBarrier
# TODO
# * Check for logic gaps
# * Improve documentation
# * Raise exceptions don't just log for bad things
# * How do we check that all channels start with sync/wait? Should we?
# * Or related, do we do anything special if all channels start with a shared barrier?
# * What if we discover multiple sequential Waits or WaitSomes on same channels?
# * Can we or should we remove duplicates?
# * Consider passing around data structures instead of making them globals
# * Testing, including
# * 3+ qubits
# * explicit WaitSomes and Waits
# * Barrier that becomes WaitSome inside a call or a repeat or both
# * call or goto that goes backwards or forwards (4 cases)
# * Nested Calls
# * Nested Repeats
# * Nested Barriers
# * Sequences that dont start with a barrier
def replaceBarriers(seqs, seqIdxToChannelMap):
'''
Replace all Barrier() instructions with Sync() and WaitSome() or Id() pulses.
Use WaitSome() if there's some intervening indeterminate length operation,
like a CMP() or LoadCmp().
Otherwise pause using Id on the less busy channels.
This modifies the sequences and returns the updated sequences.
Assumes Barriers list the channels they are on,
and have an ID.
Each Barrier is used exactly once per channel during operation
(or else has guaranteed same length since prior Barrier,
effectively meaning it is a WaitSome).
'''
# Approach:
# Walk through each sequence building up barrier objects
# that record each barrier including length in execution time
# since the last barrier.
# Then walk through barriers replacing them with Id pulses
# where possible.
# Then replace the remaining barriers with WaitSomes
# Barrier objects are kept in 3 dictionaries: by sequence
# by position in the sequence (where each sequence has different
# instance of the object), by sequence by counter (id), and
# independent of sequence by counter (in which case this is
# just one instance of this barrier)
# Each barrier has its sequence, position, channels,
# ID, previous barrier, length since previous barrier
# (float, may be 'nan' meaning it becomes a WaitSome)
# A barrier object that is -1 means the start
# A barrier ID of '-1' means the start
# A wait has a barrier ID of 'wait-chans-%s-ctr-%d' % (curBarrier['channels'], curBarrier['waitCount'])
# * where the channels are sorted, and the waitCount is the count of Waits encountered at runtime with the same channels
# * Note the assumption that if a Wait specifies channels, then all channels have that Wait
# barrier position of -1 is the start
global barriersBySeqByPos, barriersBySeqByCtr, barriersByCtr, allChannels
logger.debug("In replaceBarriers...")
barriersBySeqByPos = dict() # by sequence Index in seqs, by Pos index of element in sequence
barriersBySeqByCtr = dict() # by sequence Index in seqs, by Counter ID of barrier
barriersByCtr = dict() # by Counter ID of barrier
allChannels = sorted([ch for ch in seqIdxToChannelMap.values()], key=lambda chan: repr(chan)) # actual Channel objects sorted alphabeticallyy
startBarrier = dict()
startBarrier['type'] = 'start'
startBarrier['counter'] = '-1' # notional 'start' barrier has counter '-1', pos -1
startBarrier['seqPos'] = -1 # index in sequence
# Have we determined the length of the Id pulse or if this is a WaitSome?
startBarrier['lengthCalculated'] = False
# Walking thru running of this sequence, the length since the last Barrier on this sequence,
# including this element.
# Note that due to other elements on other sequences, this is not the same as the length
# of the resulting Id pulse
startBarrier['lengthSince'] = 0
startBarrier['prevBarrier'] = None
startBarrier['prevBarrierPos'] = -1
startBarrier['prevBarrierCtr'] = '-1'
startBarrier['lengthCalculated'] = True
barriersByCtr['-1'] = startBarrier
# Loop over all sequences
# Walk through each sequence in execution order
# (following Call/Goto/Repeat/Return)
# As we encounter barriers, add them to our data structures
# We'll uses those data structures later to replace the Barriers in the sequences.
# We track BlockLabels as we go, and of course Call and LoadRepeat
for seqInd, seq in enumerate(seqs):
logger.debug("Looking for barriers on Sequence %d", seqInd)
#logger.debug(seq)
barriersBySeqByPos[seqInd] = dict()
barriersBySeqByCtr[seqInd] = dict()
# Put a startBarrier in the front for this channel
startBarrier['seqIndex'] = seqInd
startBarrier['hash'] = barrierHash(startBarrier)
barriersBySeqByPos[seqInd][-1] = startBarrier
barriersBySeqByCtr[seqInd]['-1'] = startBarrier
# Dict of BlockLabel's label (string) to index in sequence
# Used to find the target of Call/Goto/Repeat calls
# This is filled in lazily, as we find the labels.
blockLabels = dict()
# Times the repeat block if any is to be repeated (LIFO stack)
rptCount = []
# Length of sub-block before starting current repeat, if any
rptStartLen = []
# Index into sequence where repeat block starts
rptStartInd = []
# LIFO stack of index where Return (from Call) returns to (point to next thing to run)
retInd = []
# The previous barrier that this barrier will point to
prevBarrierPos = -1
prevBarrier = startBarrier
prevBarrierCtr = '-1'
# The total length through this element, which we'll
# reset at each barrier
curLen = 0
# Is this block between 2 barriers of indeterminate length
nonDet = False
# counter of waits in sequence by channels it waits on
waitsOnChannels = dict()
# index into the sequence of the current element
seqPos = 0
# Now loop over elements in the sequence
# Note that some code blocks will reset seqPos to other points
# to follow Call/Return/Repeat/Goto commands
while seqPos < len(seq):
elem = seq[seqPos]
logger.debug("Examining element at %d: %s", seqPos, elem)
# if the element is a barrier, we save the length since the last barrier and a pointer to that previous barrier
# If it is a CMP, then this block is indeterminate length. Next barrier must say so
# Handle all kinds of barriers by putting them in our data structures
if isBarrier(elem) or isWaitSome(elem) or isWait(elem):
# The current barrier
curBarrier = dict()
curBarrier['type'] = 'barrier'
curBarrier['prevBarrierPos'] = prevBarrierPos
curBarrier['prevBarrierCtr'] = prevBarrierCtr
curBarrier['seqIndex'] = seqInd
curBarrier['seqPos'] = seqPos
if isBarrier(elem):
logger.debug("Next barrier on seq %s is at %d: %s", seqInd, seqPos, elem)
curBarrier['type'] = 'barrier'
chans = list()
for chan in elem.chanlist:
if isinstance(chan, str):
logger.warning("Channel %s on %s was a string", chan, elem)
if chan.startswith('QBIT_'):
chan = chan[5:]
chan = QubitFactory(chan)
chans.append(chan)
# Store the channels sorted alphabetically for later comparison
# Only sort if we have more than 1 channel.
if len(chans) > 1:
curBarrier['channels'] = sorted(chans, key=lambda chan: repr(chan))
else:
curBarrier['channels'] = [chans[0]]
curBarrier['chanKey'] = frozenset(curBarrier['channels'])
curBarrier['counter'] = elem.value
elif isWaitSome(elem):
# This shouldn't really happen I think, but maybe?
# But if previous is a Sync then treat this as a Barrier on its listed channels?
logger.info("Got %s at pos %d?!", elem, seqPos)
curBarrier['type'] = 'waitsome'
chans = list()
for chan in elem.chanlist:
if isinstance(chan, str):
logger.warning("Channel %s on %s was a string", chan, elem)
if chan.startswith('QBIT_'):
chan = chan[5:]
chan = QubitFactory(chan)
chans.append(chan)
# Store the channels sorted alphabetically for later comparison
curBarrier['channels'] = sorted(chans, key=lambda chan: repr(chan))
# Make that a frozenset to use as key in dict
curBarrier['chanKey'] = frozenset(curBarrier['channels'])
if curBarrier['chanKey'] not in waitsOnChannels:
waitsOnChannels[curBarrier['chanKey']] = 0
# Keep counter of # times seen a wait for same channels
# as the 2nd wait on same channels should match a waitsome on
# the other sequences on the same channels
waitsOnChannels[curBarrier['chanKey']] += 1
curBarrier['waitCount'] = waitsOnChannels[curBarrier['chanKey']]
curBarrier['counter'] = 'wait-chans-%s-ctr-%d' % (curBarrier['channels'], curBarrier['waitCount'])
if not isSync(seq[seqPos-1]):
logger.warning("Previous element was not a Sync, but %s", seq[seqPos-1])
elif isWait(elem):
logger.info("Got %s at %d", elem, seqPos)
curBarrier['type'] = 'wait'
curBarrier['channels'] = allChannels
curBarrier['chanKey'] = frozenset(allChannels)
if curBarrier['chanKey'] not in waitsOnChannels:
waitsOnChannels[curBarrier['chanKey']] = 0
waitsOnChannels[curBarrier['chanKey']] += 1
curBarrier['waitCount'] = waitsOnChannels[curBarrier['chanKey']]
curBarrier['counter'] = 'wait-chans-%s-ctr-%d' % (curBarrier['channels'], curBarrier['waitCount'])
if not isSync(seq[seqPos-1]):
if isBarrier(seq[seqPos-1]) and isSync(seq[seqPos-2]):
logger.debug("9/2016: Core inserts a barrier between the sync and wait to ensure sequential")
else:
logger.warning("Previous element was not a Sync, but %s", seq[seqPos-1])
if nonDet:
logger.debug("Marking this barrier as indeterminate length")
curBarrier['lengthSince'] = float('nan')
# For these barriers, we consider we know the length
# So later it's safe to skip over this barrier on other channels;
# getLengthBetweenBarriers will get the nan lengthSince and that's accurate
curBarrier['lengthCalculated'] = True
# Note that for nested blocks the outer barrier will become
# nonDet for the whole block because this one is,
# but the range to the next barrier may not be
nonDet = False # Reset for next block
elif rptCount:
# We're in a Repeat block. The only way that's legal is to treat this as a Wait of some kind
logger.debug("%s is inside a Repeat block; treat as a Wait", elem)
curBarrier['lengthSince'] = float('nan')
# For these barriers, we consider we know the length
# So later it's safe to skip over this barrier on other channels;
# getLengthBetweenBarriers will get the nan lengthSince and that's accurate
curBarrier['lengthCalculated'] = True
else:
curBarrier['lengthSince'] = curLen + pulseLengths(elem)
curBarrier['lengthCalculated'] = False
curBarrier['hash'] = barrierHash(curBarrier)
# logger.debug("This barrier has Channels: %s, length: %s, counter: '%s', prevBarrier: '%s' at index %d", curBarrier['channels'], curBarrier['lengthSince'], curBarrier['counter'], curBarrier['prevBarrierCtr'], curBarrier['prevBarrierPos'])
logger.debug("Barrier dict: %s", curBarrier)
# Store this barrier
barriersByCtr[curBarrier['counter']] = curBarrier
# We used to do a deepcopy here, but a regular shallow copy passes the unit tests.
# If this causes problems later, use pyqgl2.quickcopy.quickcopy
barriersBySeqByPos[seqInd][seqPos] = copy(curBarrier)
barriersBySeqByCtr[seqInd][curBarrier['counter']] = copy(curBarrier)
# Reset vars for next barrier block
prevBarrier = curBarrier
prevBarrierCtr = curBarrier['counter']
prevBarrierPos = seqPos
# the length counter starts at 0 for the next block
curLen = 0
# Move to the next element in the sequence
seqPos += 1
continue
# CMP
# Note this is a nonDeterminate block
if isCMP(elem) or isLoadCmp(elem):
logger.info("Indeterminate length block on sequence %d; has %s at %d", seqInd, elem, seqPos)
nonDet = True
seqPos += 1
curLen += pulseLengths(elem)
continue
# LoadRepeat
# Goes with a later Repeat(<label>) call
# That <label> could nominally be anywhere, but QGL2 puts it on the line after the LoadRepeat
# Note that you can nominally nest Repeat blocks, so we keep a LIFO stack of rptCounts.
# Inside the block of code to be repeated, you can't in general have a Barrier; the elements
# to that barrier are different between the first and later times through the loop, so the Barrier
# is 2 different Id blocks so it isn't a repeat.
# Exceptions: If the element right before the block is a barrier, and the last element in the block
# is a Barrier, or you otherwise construct things carefully, then the block is the same length.
# Or else if the Barrier is a WaitSome (on both 1st and later times through the loop), then it is an identical
# Pulse.
# Put another way: you can only use a given Barrier in a single way in any channel.
# However there's another issue with a Barrier in a Repeat block: each channel for that Barrier must use the barrier
# the same # of times, and in the same way, such that it makes sense to line up the barrier.
if isLoadRepeat(elem):
if elem.value < 1:
logger.warning("Sequence %d at %d got %s with value %d: Treat as 1", seqInd, seqPos, elem, elem.value)
elem.value = 1
logger.debug("Found %s at index %d. Length so far: %f", elem, seqPos, curLen)
rptCount.append(elem.value)
# Guess that the repeat will want to go to line after LoadRepeat - if not, we'll start looking there
# for the proper destination
rptStartInd.append(seqPos+1)
curLen += pulseLengths(elem)
# Store the length of this block up through this element.
# That way when we hit the Repeat block, we can potentially calculate the length of the block being repeated,
# and just add it, without having to re-walk
# Here we assume that the repeat block in fact starts at the next element after this LoadRepeat
rptStartLen.append(curLen)
seqPos += 1
continue
# See note above on Repeat blocks.
# This code allows target of repeat to be anywhere.
# It guesses that the Repeat goes to the line after LoadRepeat (as guessed above).
# It does nothing special about intervening Barriers; elsewhere we ensure they are Waits not Ids
# When we get here, we've already added to curlen the result of doing this repeat block once
if isRepeat(elem):
curLen += pulseLengths(elem)
if not rptCount:
# FIXME: Ignore instead? Use NodeError?
raise Exception("Sequence %d got %s at %d without a LoadRepeat" % (seqInd, elem, seqPos))
# Get the # of times left to repeat
rc = rptCount[-1] - 1
logger.debug("Found %s at index %d. Remaining repeats: %d", elem, seqPos, rc)
# If there are no more repeats, move on
if rc <= 0:
# Just finished last time through the loop
# Clear all the repeat variables
rptCount.pop()
while len(rptStartInd) > len(rptCount):
rptStartInd.pop()
while len(rptStartLen) > len(rptCount):
rptStartLen.pop()
# Move on to the next element
seqPos += 1
continue
# If we get here, we need to repeat that block at least once
# Update the repeats remaining counter
rptCount[-1] = rc
# Do blockLabels comparison by label
target = elem.target
if isBlockLabel(target):
target = target.label
# Find proper start index
realRptStartInd = -1
if target in blockLabels:
realRptStartInd = blockLabels[target]
logger.debug("Found Repeat target in cache at %d", realRptStartInd)
else:
# Loop thru rest of seq to find the target. Then loop thru start of seq to here
found = False
for posNext in range(seqPos, len(seq)):
if isBlockLabel(seq[posNext]) and seq[posNext].label == target:
blockLabels[target] = posNext
realRptStartInd = posNext
found = True
logger.debug("Found Repeat target in rest of sequence at %d", realRptStartInd)
break
if not found:
for posNext in range(0, seqPos):
if isBlockLabel(seq[posNext]) and seq[posNext].label == target:
blockLabels[target] = posNext
realRptStartInd = posNext
found = True
logger.debug("Found target in first part of sequence at %d", realRptStartInd)
break
if not found:
raise Exception("Sequence %d at %d: Failed to find %s target '%s'" % (seqInd, seqPos, elem, elem.target))
# If the start of the repeat block is same as that in rptStartInd,
# then use curlen-rptStartLen as length of block to repeat.
# Mutiply that by rc and add to curlen
# Then clear the rpt LIFOs and move on
if rptStartInd[-1] == realRptStartInd:
# We guessed correctly where to start repeat from
rs = rptStartLen.pop()
rptAdd = (curLen - rs) * rc
logger.debug("Stashed startElemInd matches target. Finish by adding (curlen %f - startLen %f) * repeatsToGo %d = %f", curLen, rs, rc, rptAdd)
curLen += rptAdd
# Just finished last time through the loop
# Clear all the repeat variables
rptCount.pop()
while len(rptStartInd) > len(rptCount):
rptStartInd.pop()
while len(rptStartLen) > len(rptCount):
rptStartLen.pop()
# Move on to the next element
seqPos += 1
continue
else:
# If the 2 are different, then reset rptStartInd to what we found, reset repeatStartLen to curlen, set seqPos to rptStartInd
logger.debug("Repeat started at %d for target %s, not guessed %d; going back", realRptStartInd, elem.target, rptStartInd[-1])
# We already reset the repeat counter properly
# Reset the startInd to be the proper one
# And reset the length for before the repeat to the length to this point
rptStartLen[-1] = curLen
rptStartInd[-1] = realRptStartInd
# Then continue from that starting point
seqPos = realRptStartInd
continue
# End of handling Repeat
# Handle Return
# This jumps back to the last Call
# Note that Call/Return blocks could be nested,
# and due to a Goto, the 'Return' may go to something later in the sequence.
# Note that if there is a Barrier inside a Call block, what does it mean to hit that Barrier
# twice? As in the comment above for Repeat, unless things are identical across channels,
# how do you line up the Barriers?
# So in general, a Barrier, including one inside a Call block, should only be called once
if isReturn(elem):
# Should have seen a previous call
# NOTE: If there was a barrier inside that block, we better only have called this block once
curLen += pulseLengths(elem)
if not retInd:
raise Exception("Sequence %d at %d: Have no saved index to go back to for %s" % (seqInd, seqPos, elem))
ri = retInd.pop()
logger.debug("Got %s: Returning to saved index %d", elem, ri)
seqPos = ri
continue
# Handle Call() or Goto()
# Both take a BlockLabel to jump to.
# Call() requires stashing the index after it to return to.
if isCall(elem) or isGoto(elem):
logger.debug("Got %s at %d - will jump to %s", elem, seqPos, elem.target)
# Call is slightly special
if isCall(elem):
if seqPos+1 == len(seq):
# The return would try to go to something off the end. That won't work.
raise Exception("Call() is last element in sequence %d: %s" % (seqInd, elem))
logger.debug("Next Return will go to %d", seqPos+1)
# stash away seqPos+1 as the next place to return
retInd.append(seqPos+1)
# Do BlockLabels comparison by label
target = elem.target
if isBlockLabel(target):
target = target.label
curLen += pulseLengths(elem)
if target in blockLabels:
seqPos = blockLabels[target]
logger.debug("Found target in cache at %d", seqPos)
continue
# Didn't find the target yet. Look.
# Loop thru rest of seq. Then loop thru start of seq to here
found = False
for posNext in range(seqPos, len(seq)):
if isBlockLabel(seq[posNext]) and seq[posNext].label == target:
blockLabels[target] = posNext
seqPos = posNext
found = True
logger.debug("Found target in rest of sequence at %d", seqPos)
break
if found:
continue
for posNext in range(0, seqPos):
if isBlockLabel(seq[posNext]) and seq[posNext].label == target:
blockLabels[target] = posNext
seqPos = posNext
found = True
logger.debug("Found target in first part of sequence at %d", seqPos)
break
if found:
continue
else:
raise Exception("Sequence %d at %d: Failed to find %s target '%s'" % (seqInd, seqPos, elem, elem.target))
# BlockLabel
# Save where this was in case it's a target for a Call/Goto/Repeat,
# otherwise it's a normal element.
if isBlockLabel(elem):
curLen += pulseLengths(elem)
# Stash away that label in case some Call/Goto/Repeat needs it
blockLabels[elem.label] = seqPos
seqPos += 1
continue
# Default
elen = pulseLengths(elem)
logger.debug("'%s' is a normal element - add its length (%s) and move on", elem, elen)
curLen += elen
seqPos += 1
continue
# Done looking at elements in this sequence
logger.debug("Done looking for Barriers on sequence %d\n", seqInd)
# Now we'll move to the next channel
# End of loop over channels
logger.debug("Done looking for Barriers on all sequences\n")
# At this point we've looked at every element in every sequence, adding up the lengths.
# This data is saved away in barrier objects for use to use next.
if logger.isEnabledFor(logging.DEBUG):
logger.debug("barriersBySeqByCtr: ")
for seqInd in sorted(barriersBySeqByCtr):
logger.debug(" Seq %d:", seqInd)
for ctr in barriersBySeqByCtr[seqInd]:
logger.debug(" %s: %s", ctr, barriersBySeqByCtr[seqInd][ctr])
logger.debug("barriersBySeqByPos: ")
for seqInd in sorted(barriersBySeqByPos):
logger.debug(" Seq %d:", seqInd)
for pos in sorted(barriersBySeqByPos[seqInd]):
logger.debug(" %s: %s", pos, barriersBySeqByPos[seqInd][pos])
logger.debug("barriersByCtr: ")
for ctr in barriersByCtr:
logger.debug(" %s: %s", ctr, barriersByCtr[ctr])
logger.debug("")
# And here is the main code to use the above functions and replace all barriers.
# First we replace Barriers that turn into Id pulses
# - because thats a 1 for 1 replacement (doesn't screw up saved indices)
# For each sequence, start at the 'start' barrier and go to 'next'
# where the next is a Barrier that is still in the sequence
# and not marked as already calculated (if still there but marked
# calculated it is turning into a WaitSome)
# When we have such a barrier, replace it.
# Note however that replacing a Barrier potentially requires
# first replacing some other barriers on other channels.
# EG if for q1 B3 links up with q3 and the last common is B0, but B1 and B2 are only on
# the q3 program, then for q3 to get the length up to B3,
# it needs to do B1 and B2 first.
# so replaceOneBarrier hides that recursion.
for seqInd in seqIdxToChannelMap.keys():
currCtr = '-1'
logger.debug("Handling Barriers on sequence %d", seqInd)
positions = []
try:
positions = sorted(barriersBySeqByPos[seqInd])
except Exception as e:
logger.warning("Failed to get sorted list of indices: Got %s", e)
currCtr = getNextBarrierCtr(seqs, seqInd, currCtr, positions)
while (currCtr != '-1'): # While there's another barrier
logger.info("Replacing Barrier '%s' found on sequence %d", currCtr, seqInd)
# replace that barrier, plus any other barriers (on other channels)
# necessary to calculate the length of the Id pulse here
seqs = replaceOneBarrier(currCtr, seqIdxToChannelMap, seqs, seqInd)
# Move on to the next barrier
currCtr = getNextBarrierCtr(seqs, seqInd, currCtr, positions)
# When we get here, we ran out of barriers that turn into Id pulses to replace
logger.debug("Done swapping non Wait Barriers\n")
# Now change any remaining barriers into Sync/WaitSome pairs
for seqInd in barriersBySeqByPos:
if seqInd < 0 or seqInd >= len(seqs):
logger.warn("No such channel %d?", seqInd)
continue
logger.debug("Swapping remaining Barriers on sequence %d with Sync/WaitSome", seqInd)
seq = seqs[seqInd]
# Count how many of these swaps we've done
# Because that's the # of extra elements we're inserting, so it is the
# amount that our saved indices are off
swapCnt = 0
# loop over our previous stash of barriers on this sequence
positions = sorted(barriersBySeqByPos[seqInd])
for barrierInd in positions:
if barrierInd < 0:
# skip the fake start
continue
# The new actual index of this element in the sequence
bInd = barrierInd + swapCnt
barrier = barriersBySeqByPos[seqInd][barrierInd]
bType = barrier.get('type', 'barrier')
bChannels = getBarrierChannels(barrier['counter'])
if bInd >= len(seq):
logger.debug("swapCnt: %d, seqLen: %d", swapCnt, len(seq))
raise Exception("Calculated index of barrier %s (%d, was originally %d) is past end of sequence %d" % (barrier['counter'], bInd, barrierInd, seqInd))
# Make sure it's a barrier still
if isBarrier(seq[bInd]):
swapCnt += 1
if bType == 'wait' or bChannels == allChannels:
logger.info("Replacing sequence %d index %d (%s) with Sync();Wait()", seqInd, bInd, seq[bInd])
# Replace
seqs[seqInd] = seq[:bInd] + [Sync(), Wait()] + seq[bInd+1:]
else:
logger.info("Replacing sequence %d index %d (%s) with Sync(); WaitSome(%s)", seqInd, bInd, seq[bInd], bChannels)
seqs[seqInd] = seq[:bInd] + [Sync(), WaitSome(bChannels)] + seq[bInd+1:]
else:
# This is common / expected
logger.debug("Spot %d (was %d) in sequence %d (channel %s) not (no longer) a barrier, but: %s", bInd, barrierInd, seqInd, seqIdxToChannelMap[seqInd], seq[bInd])
continue
logger.debug("Swapped %d barriers on sequence %d\n", swapCnt, seqInd)
# Now to next sequence
# Done swapping remaining barriers for Sync/Waits
# Now all Barriers should be gone.
# Now we can also remove the stub "0" entries for Id(q, length=0) pulses
for sidx, seq in enumerate(seqs):
newSeq = []
for idx in range(len(seq)):
if isBarrier(seq[idx]):
logger.warn("Sequence %d still has %s at %d - remove!", sidx,
seq[idx], idx)
elif seq[idx] == 0:
logger.debug("Removing placeholder 0 for Id(0) in sequence %d at %d", sidx, idx)
else:
newSeq.append(seq[idx])
seqs[sidx] = newSeq
# Debug: Print final sequences
if logger.isEnabledFor(logging.DEBUG):
logger.debug(" ")
logger.debug("Final QGL1 sequences:")
for sidx, seq in enumerate(seqs):
logger.debug("Sequence %d", sidx)
for idx in range(len(seq)):
logger.debug(" %d: %s", idx, seq[idx])
logger.debug(" ")
logger.debug("Done replacing Barriers\n")
return seqs
# End of replaceBarriers
#####
# Test code below - for unit testing code in this file.
if __name__ == '__main__':
from QGL.Compiler import find_unique_channels
from QGL.Channels import Qubit as qgl1Qubit
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
def testCall():
# Do a call/return
# with barriers inside the call block
# nested calls
# with concur:
# for q in q1, q2:
# X(q)
# Call(myFunc)
# Y(q)
# myFunc(q):
# Barrier
# X90(q)
# Call myFunc2
# Y90
# Barrier
# myFunc2:
# Barrier
# MEAS(q)
# Barrier
from QGL.ChannelLibrary import QubitFactory
from QGL.BlockLabel import BlockLabel
from qgl2.qgl1control import Barrier
from QGL.ControlFlow import Sync
from QGL.ControlFlow import Wait, Call, Goto, Return
from QGL.PulsePrimitives import Id
from QGL.PulsePrimitives import MEAS
from QGL.PulsePrimitives import X, X90
from QGL.PulsePrimitives import Y, Y90
q1 = QubitFactory('q1')
QBIT_q1 = q1
q2 = QubitFactory('q2')
QBIT_q2 = q2
q3 = QubitFactory('q3')
QBIT_q3 = q3
seqs = list()
# q1
seq = [
Barrier('0', [q1, q2]), # Id 0
X(q1, length=.1),
Call(BlockLabel('myfunc')),
Y(q1, length=.2),
Goto(BlockLabel('end')),
BlockLabel('myfunc2'),
Barrier('3', [q1]), # Id 0
MEAS(q1),
Barrier('4', [q1]), # Id 0
Return(),
BlockLabel('myfunc'),
Barrier('1', [q1, q2]), # Id 0
X90(q1, length=.3),
Call(BlockLabel('myfunc2')),
Y90(q1, length=.4),
Barrier('2', [q1, q2]), # Id 0
Return(),
BlockLabel('end'),
Barrier('5', [q1, q2]) # Id 0
]
seqs += [seq]
'''
B0 -- Id0
X - .1
Call
B1 -- Id .1
X90 - .3
Call
B3 <missing>
MEAS
B4 <missing>
Ret
Y90 - .4
B2 - last common is B1, should be .7, but I'm getting .4: It didn't carry over across the call block?
Ret
Y - .2
Goto
B5 - Id .2
'''
# q2
seq = [
Barrier('0', [q1, q2]), # Id 0
X(q2),
Call(BlockLabel('myfunc')),
Y(q2),
Goto(BlockLabel('end')),
BlockLabel('myfunc'),
Barrier('1', [q1, q2]), #Id .1
X90(q2),
Call(BlockLabel('myfunc2')),
Y90(q2),
Barrier('2', [q1, q2]), # Id .7
Return(),
BlockLabel('myfunc2'),
MEAS(q2),
Return(),
BlockLabel('end'),
Barrier('5', [q1, q2]) #Id .2
]
seqs += [seq]
return seqs
def testFunc2():
#with concur
# for i in 1,2
# for q in q1, q2
# X(q)
# Becomes:
#BARRIER - becomes a wait
#LoadRepeat 2
#loopstart
#X(q)
#Repeat(loopstart)
#BARRIER - becomes ID
from QGL.ChannelLibrary import QubitFactory
from QGL.BlockLabel import BlockLabel
from qgl2.qgl1control import Barrier
from QGL.ControlFlow import Sync
from QGL.ControlFlow import Wait
from QGL.PulsePrimitives import Id
from QGL.PulsePrimitives import MEAS
from QGL.PulsePrimitives import X
from QGL.PulsePrimitives import Y
q1 = QubitFactory('q1')
QBIT_q1 = q1
q2 = QubitFactory('q2')
QBIT_q2 = q2
q3 = QubitFactory('q3')
QBIT_q3 = q3
seqs = list()
# q1
seq = [
Barrier('0', [q1, q2]), # Id 0
LoadRepeat(2),
BlockLabel('loopstart'),
X(q1, length=0.5),
Repeat(BlockLabel('loopstart')),
Barrier('1', [q1, q2]) # Id 0
]
seqs += [seq]
# q2
seq = [
Barrier('0', [q1, q2]), # Id 0
LoadRepeat(2),
BlockLabel('loopstart2'),
X(q2),
Repeat(BlockLabel('loopstart2')),
Barrier('1', [q1, q2]) # Id 1
]
seqs += [seq]
return seqs
def testWait():
from QGL.ChannelLibrary import QubitFactory
from QGL.BlockLabel import BlockLabel
from qgl2.qgl1control import Barrier
from QGL.ControlFlow import Sync, Repeat, LoadRepeat
from QGL.ControlFlow import Wait
from QGL.PulsePrimitives import Id
from QGL.PulsePrimitives import MEAS
from QGL.PulsePrimitives import X, X90, X90m
from QGL.PulsePrimitives import Y, Y90
q1 = QubitFactory('q1')
QBIT_q1 = q1
q2 = QubitFactory('q2')
QBIT_q2 = q2
q3 = QubitFactory('q3')
QBIT_q3 = q3
seqs = list()
# q1
seq = [
Barrier('1', [q1, q2]), # Id 0
Sync(),
Wait(),
X(q1, length=0.1),
Barrier('2', [q1, q2]), # Id 0
Y(q1, length=0.2),
# Barrier('3', [q1, q2]),
Wait(),
X(q1, length=0.3),
Barrier('4', [q1, q2]) # Id 0
]
seqs += [seq]
# q2
seq = [
Barrier('1', [q1, q2]), # Id 0
Sync(),
Wait(),
X(q2),
Barrier('2', [q1, q2]), # Id .1
Y(q2),
# Barrier('3', [q1, q2]),
Wait(),
X(q2),
Barrier('4', [q1, q2]) # Id .3 (since the wait)
]
seqs += [seq]
return seqs
def testWaitSome():
from QGL.ChannelLibrary import QubitFactory
from QGL.BlockLabel import BlockLabel
from qgl2.qgl1control import Barrier
from QGL.ControlFlow import Sync, Repeat, LoadRepeat
from QGL.ControlFlow import Wait
from QGL.PulsePrimitives import Id
from QGL.PulsePrimitives import MEAS
from QGL.PulsePrimitives import X, X90, X90m
from QGL.PulsePrimitives import Y, Y90
q1 = QubitFactory('q1')
QBIT_q1 = q1
q2 = QubitFactory('q2')
QBIT_q2 = q2
q3 = QubitFactory('q3')
QBIT_q3 = q3
seqs = list()
# q1
seq = [
Barrier('1', [q1, q2, q3]), # Id 0
Sync(),
WaitSome([q1, q2]), # Note this isn't on q3 and matches line 3 of q2. Should be left alone.
X(q1, length=0.1),
Barrier('2', [q1, q2]), # Prev is the WaitSome; should become Id 0
Y(q1, length=0.2),
# Barrier('3', [q1, q2]),
# Wait(),
X(q1, length=0.3),
Barrier('4', [q1, q2, q3]) # Id .8 without q2 and waitsomes; lSince .6; Make this Id 0.9
]
seqs += [seq]
# q2
seq = [
Barrier('1', [q1, q2, q3]), # Id 0
Sync(),
WaitSome([q1, q2]), # Note this isn't on q3 and matches line 3 of q2. # Should be left alone.
X(q2),
Barrier('2', [q1, q2]), # Prev is the WaitSome; should become Id 0.1
Y(q2),
# Barrier('3', [q1, q2]),
WaitSome([q2, q3]), # Not on q1; prev is B1; lSince 0; computed could be 0.1; could become Id 0.8 (.9-.1) but leave alone
X(q2),
Barrier('4', [q1, q2, q3]) # Id .5 if no q3; Prev B1; lSince 0 but computed 0.9; Make this Id 0.6
]
seqs += [seq]
# q3
seq = [
Barrier('1', [q1, q2, q3]), # Id 0
Sync(),
# Wait(),
X(q3, length=0.4),
# Barrier('2', [q1, q2]),
Y(q3, length=0.5),
# Barrier('3', [q1, q2]),
WaitSome([q2, q3]), # Not on q1; prev is B1; lsince 0.9; could become Id 0 but leave alone
X(q3, length=0.6),
Barrier('4', [q1, q2, q3]) # Prev B1; lSince 1.5; Make this Id 0
]
seqs += [seq]
return seqs
def testCMP():
from QGL.ChannelLibrary import QubitFactory
from QGL.BlockLabel import BlockLabel
from qgl2.qgl1control import Barrier
from QGL.ControlFlow import Sync, Repeat, LoadRepeat
from QGL.ControlFlow import Wait
from QGL.ControlFlow import LoadCmp, CmpEq
from QGL.PulsePrimitives import Id
from QGL.PulsePrimitives import MEAS
from QGL.PulsePrimitives import X, X90, X90m
from QGL.PulsePrimitives import Y, Y90
q1 = QubitFactory('q1')
QBIT_q1 = q1
q2 = QubitFactory('q2')
QBIT_q2 = q2
q3 = QubitFactory('q3')
QBIT_q3 = q3
seqs = list()
# q1
seq = [
Barrier('1', [q1, q2, q3]), # Id 0
Sync(),
WaitSome([q1, q2]), # Note this isn't on q3 and matches line 3 of q2. Should be left alone.
X(q1, length=0.1),
Barrier('2', [q1, q2]), # Prev is the WaitSome; should become Id 0
Y(q1, length=0.2),
# Barrier('3', [q1, q2]),
# Wait(),
X(q1, length=0.3),
Barrier('4', [q1, q2, q3]) # Id .8 without q2 and waitsomes; lSince .6; Due to q3 becomes NaN: Sync/Wait
]
seqs += [seq]
# q2
seq = [
Barrier('1', [q1, q2, q3]), # Id 0
Sync(),
WaitSome([q1, q2]), # Note this isn't on q3 and matches line 3 of q2. # Should be left alone.
X(q2),
Barrier('2', [q1, q2]), # Prev is the WaitSome; should become Id 0.1
Y(q2),
# Barrier('3', [q1, q2]),
WaitSome([q2, q3]), # Not on q1; prev is B1; lSince 0; computed: NaN due to q3
X(q2),
Barrier('4', [q1, q2, q3]) # Id .5 if no q3; Prev B1; lSince 0; must be Sync/Wait to match q1, else Sync then Id 0.6?
]
seqs += [seq]
# q3
seq = [
Barrier('1', [q1, q2, q3]), # Id 0
Sync(),
# Wait(),
X(q3, length=0.4),
# Barrier('2', [q1, q2]),
LoadCmp(),
CmpEq(1),
Y(q3, length=0.5),
# Barrier('3', [q1, q2]),
WaitSome([q2, q3]), # Not on q1; prev is B1; lsince 0.9+NaN=NaN; computed NaN
X(q3, length=0.6),
Barrier('4', [q1, q2, q3]) # Prev B1; lSince NaN (0.6 since WaitSome); Make this Sync/Wait to match q1, else could be Sync;Id 0
]
seqs += [seq]
return seqs
def testRepeat():
from QGL.ChannelLibrary import QubitFactory
from QGL.BlockLabel import BlockLabel
from qgl2.qgl1control import Barrier
from QGL.ControlFlow import Sync, Repeat, LoadRepeat
from QGL.ControlFlow import Wait
from QGL.PulsePrimitives import Id
from QGL.PulsePrimitives import MEAS
from QGL.PulsePrimitives import X, X90, X90m
from QGL.PulsePrimitives import Y, Y90
q1 = QubitFactory('q1')
QBIT_q1 = q1
q2 = QubitFactory('q2')
QBIT_q2 = q2
q3 = QubitFactory('q3')
QBIT_q3 = q3
seqs = list()
# q1
seq = [
Barrier('1', [q1, q2]), # Id 0
X(q1, length=0.1),
Barrier('2', [q1, q2]), # Id 0
LoadRepeat(2),
BlockLabel('startRep1'),
Y(q1, length=0.2),
LoadRepeat(2),
BlockLabel('startRep2'),
X90m(q1, length=0.3),
Repeat(BlockLabel('startRep2')),
X90(q1, length=0.5),
Repeat(BlockLabel('startRep1')),
Barrier('3', [q1, q2]), # Id 0
Y90(q1, length=0.7),
Barrier('4', [q1, q2]) # Id 0
]
seqs += [seq]
# q2
seq = [
Barrier('1', [q1, q2]), # Id 0
X(q2),
Barrier('2', [q1, q2]), # Id .1
LoadRepeat(2),
BlockLabel('startRep1'),
Y(q2),
LoadRepeat(2),
BlockLabel('startRep2'),
X90m(q2),
Repeat(BlockLabel('startRep2')),
X90(q2),
Repeat(BlockLabel('startRep1')),
Barrier('3', [q1, q2]), # Id 2.6
Y90(q2),
Barrier('4', [q1, q2]) # Id .7
]
seqs += [seq]
return seqs
def repeatBarriers():
# This no longer works as we currently don't allow barriers inside repeat blocks
'''
for i in 1,2
with concur
for q in q1,q2
X(q)
LoadRepeat 2
loopstart
BARRIER - remove?
X(q)
BARRIER - becomes Id
Repeat(loopstart)
'''
from QGL.ChannelLibrary import QubitFactory
from QGL.BlockLabel import BlockLabel
from qgl2.qgl1control import Barrier
from QGL.ControlFlow import Sync
from QGL.ControlFlow import Wait
from QGL.PulsePrimitives import Id
from QGL.PulsePrimitives import MEAS
from QGL.PulsePrimitives import X
from QGL.PulsePrimitives import Y
q1 = QubitFactory('q1')
QBIT_q1 = q1
q2 = QubitFactory('q2')
QBIT_q2 = q2
q3 = QubitFactory('q3')
QBIT_q3 = q3
seqs = list()
seq = [
LoadRepeat(2),
BlockLabel('loopstart1'),
Barrier(),
X(q1, length=0.1),
Barrier(),
Repeat(BlockLabel('loopstart1')),
Barrier() # Including this causes error cause we see the Repeat without LoadRepeat
]
seqs += [seq]
seq = [
LoadRepeat(2),
BlockLabel('loopstart2'),
Barrier(),
X(q2, length=0.2),
Barrier(),
Repeat(BlockLabel('loopstart2')),
Barrier()
]
seqs += [seq]
return seqs
def testFunc():
from QGL.ChannelLibrary import QubitFactory
from qgl2.qgl1control import Barrier
from QGL.ControlFlow import Sync
from QGL.ControlFlow import Wait
from QGL.PulsePrimitives import Id
from QGL.PulsePrimitives import MEAS
from QGL.PulsePrimitives import X
from QGL.PulsePrimitives import Y
q1 = QubitFactory('q1')
QBIT_q1 = q1
q2 = QubitFactory('q2')
QBIT_q2 = q2
q3 = QubitFactory('q3')
QBIT_q3 = q3
# Roughly equivalent to multi/anotherMulti2 without the init
# with concur:
# for q in [q1, q2]:
# Id(q)
# X(q)
# MEAS(q)
# with concur:
# for q in [q1, q3]:
# Y(q)
seqs = list()
# Q3
seq = [
Barrier('2', [q1, q3]), # becomes ID 0.8
Y(QBIT_q3, length=0.6),
Barrier('3', [q1, q3]) # Becomes Id 0
]
seqs += [seq]
# Q2
seq = [
Barrier('0', [q1, q2]), # Becomes ID 0
Id(QBIT_q2, length=0.5),
X(QBIT_q2, length=0.3),
MEAS(QBIT_q2),
Barrier('1', [q1, q2]) # Becomes Id 0
]
seqs += [seq]
# Q1
seq = [
Barrier('0', [q1, q2]), # Becomes Id 0
Id(QBIT_q1, length=0.2),
X(QBIT_q1, length=0.4),
MEAS(QBIT_q1),
Barrier('1', [q1, q2]), # Becomes Id 0.2
Barrier('2', [q1, q3]), # Becomes Id 0
Y(QBIT_q1, length=0.1),
Barrier('3', [q1, q3]) # Becomes Id 0.5
]
seqs += [seq]
return seqs
def printSeqs(seqs):
from QGL.PulseSequencer import Pulse
ret = "["
firstSeq = True
for sidx, seq in enumerate(seqs):
if not firstSeq:
ret += ","
else:
firstSeq = False
ret += "\n"
ret += "%d: [" % sidx
firstElem = True
for elem in seq:
if not firstElem:
ret += ","
else:
firstElem = False
ret += " %s" % str(elem)
if isinstance(elem, Pulse) and (elem.label == 'Id' or elem.length != 0):
ret += "(len: %f)" % elem.length
ret += " ]"
ret += "\n]\n"
return ret
# Basic 3 qubits not all doing same stuff / diff # barriers
# seqs = testFunc()
# 2 qubits with a repeat inside, doing same stuff
# seqs = testFunc2()
# 2 qubits doing same thing
# Call inside a barrier
# which has barriers inside, and does a call itself
# seqs = testCall()
# test Repeats including nested repeats
# seqs = testRepeat()
# test explicit waits
# seqs = testWait()
# test explicit WaitSomes
# seqs = testWaitSome()
# test WaitSomes and Cmp/LoadCmp
seqs = testCMP()
logger.info("Seqs: \n%s", printSeqs(seqs))
seqIdxToChannelMap = dict()
for idx, seq in enumerate(seqs):
chs = find_unique_channels(seq)
for ch in chs:
# FIXME: Or just exclude Measurement channels?
if isinstance(ch, qgl1Qubit):
seqIdxToChannelMap[idx] = ch
logger.debug("Sequence %d is channel %s", idx, ch)
break
# Hack: skip the empty sequence(s) now before doing anything else
useseqs = list()
decr = 0 # How much to decrement the index
toDecr = dict() # Map of old index to amount to decrement
for idx, seq in enumerate(seqs):
if idx not in seqIdxToChannelMap:
# Indicates an error - that empty sequence
logger.debug("Sequence %d has no channel - skip", idx)
decr = decr+1
continue
if decr:
toDecr[idx] = decr
logger.debug("Will shift index of sequence %d by %d", idx, decr)
useseqs.append(seq)
seqs = useseqs
if decr:
newmap = dict()
for ind in seqIdxToChannelMap:
if ind in toDecr:
newmap[ind-decr] = seqIdxToChannelMap[ind]
logger.debug("Sequence %d (channel %s) is now sequence %d", ind, seqIdxToChannelMap[ind], ind-decr)
elif ind in seqIdxToChannelMap:
logger.debug("Sequence %d keeping map to %s", ind, seqIdxToChannelMap[ind])
newmap[ind] = seqIdxToChannelMap[ind]
else:
logger.debug("Dropping (empty) sequence %d", ind)
seqIdxToChannelMap = newmap
logger.info("Seqs just before replace:\n%s", printSeqs(seqs))
seqs = replaceBarriers(seqs, seqIdxToChannelMap)
logger.info("Seqs after replace: \n%s", printSeqs(seqs))
| 14,937 | 0 | 503 |
4193f67ee2419bbcb7536b678b831e08fb18ef71 | 6,459 | py | Python | rlpyt/models/curiosity/disagreement.py | williamd4112/curiosity_baselines | 45939f3f24c53cfff5153ef012486a6a058660be | [
"MIT"
] | null | null | null | rlpyt/models/curiosity/disagreement.py | williamd4112/curiosity_baselines | 45939f3f24c53cfff5153ef012486a6a058660be | [
"MIT"
] | 3 | 2021-09-08T03:31:49.000Z | 2022-03-12T00:59:45.000Z | rlpyt/models/curiosity/disagreement.py | williamd4112/curiosity_baselines | 45939f3f24c53cfff5153ef012486a6a058660be | [
"MIT"
] | null | null | null |
import torch
from torch import nn
import numpy as np
import cv2
from rlpyt.utils.tensor import infer_leading_dims, restore_leading_dims, valid_mean
from rlpyt.models.curiosity.encoders import *
from rlpyt.models.curiosity.forward_models import *
class Disagreement(nn.Module):
"""Curiosity model for intrinsically motivated agents: similar to ICM
except there is an ensemble of forward models that each make predictions.
The intrinsic reward is defined as the variance between these predictions.
"""
| 45.808511 | 155 | 0.650875 |
import torch
from torch import nn
import numpy as np
import cv2
from rlpyt.utils.tensor import infer_leading_dims, restore_leading_dims, valid_mean
from rlpyt.models.curiosity.encoders import *
from rlpyt.models.curiosity.forward_models import *
class Disagreement(nn.Module):
"""Curiosity model for intrinsically motivated agents: similar to ICM
except there is an ensemble of forward models that each make predictions.
The intrinsic reward is defined as the variance between these predictions.
"""
def __init__(
self,
image_shape,
action_size,
ensemble_size=5,
feature_encoding='idf',
batch_norm=False,
prediction_beta=1.0,
obs_stats=None,
device="cpu",
forward_loss_wt=0.2,
forward_model='res'
):
super(Disagreement, self).__init__()
self.ensemble_size = ensemble_size
self.beta = prediction_beta
self.feature_encoding = feature_encoding
self.obs_stats = obs_stats
self.device = torch.device("cuda:0" if device == "gpu" else "cpu")
if self.obs_stats is not None:
self.obs_mean, self.obs_std = self.obs_stats
if forward_loss_wt == -1.0:
self.forward_loss_wt = 1.0
else:
self.forward_loss_wt = forward_loss_wt
if self.feature_encoding != 'none':
if self.feature_encoding == 'idf':
self.feature_size = 288
self.encoder = UniverseHead(image_shape=image_shape, batch_norm=batch_norm)
elif self.feature_encoding == 'idf_burda':
self.feature_size = 512
self.encoder = BurdaHead(image_shape=image_shape, output_size=self.feature_size, batch_norm=batch_norm)
elif self.feature_encoding == 'idf_maze':
self.feature_size = 256
self.encoder = MazeHead(image_shape=image_shape, output_size=self.feature_size, batch_norm=batch_norm)
if forward_model == 'res':
fmodel_class = ResForward
elif forward_model == 'og':
fmodel_class = OgForward
self.forward_model_1 = fmodel_class(feature_size=self.feature_size, action_size=action_size).to(self.device)
self.forward_model_2 = fmodel_class(feature_size=self.feature_size, action_size=action_size).to(self.device)
self.forward_model_3 = fmodel_class(feature_size=self.feature_size, action_size=action_size).to(self.device)
self.forward_model_4 = fmodel_class(feature_size=self.feature_size, action_size=action_size).to(self.device)
self.forward_model_5 = fmodel_class(feature_size=self.feature_size, action_size=action_size).to(self.device)
def forward(self, obs1, obs2, action):
if self.obs_stats is not None:
img1 = (obs1 - self.obs_mean) / (self.obs_std+1e-10)
img2 = (obs2 - self.obs_mean) / (self.obs_std+1e-10)
# img = np.squeeze(obs1.data.numpy()[20][0])
# cv2.imwrite('disimages/original.png', img.transpose(1, 2, 0))
img1 = obs1.type(torch.float)
img2 = obs2.type(torch.float) # Expect torch.uint8 inputs
# Infer (presence of) leading dimensions: [T,B], [B], or [].
# lead_dim is just number of leading dimensions: e.g. [T, B] = 2 or [] = 0.
lead_dim, T, B, img_shape = infer_leading_dims(obs1, 3)
phi1 = img1
phi2 = img2
if self.feature_encoding != 'none':
phi1 = self.encoder(img1.view(T * B, *img_shape))
phi2 = self.encoder(img2.view(T * B, *img_shape))
phi1 = phi1.view(T, B, -1) # make sure you're not mixing data up here
phi2 = phi2.view(T, B, -1)
predicted_phi2 = []
predicted_phi2.append(self.forward_model_1(phi1.detach(), action.view(T, B, -1).detach()))
predicted_phi2.append(self.forward_model_2(phi1.detach(), action.view(T, B, -1).detach()))
predicted_phi2.append(self.forward_model_3(phi1.detach(), action.view(T, B, -1).detach()))
predicted_phi2.append(self.forward_model_4(phi1.detach(), action.view(T, B, -1).detach()))
predicted_phi2.append(self.forward_model_5(phi1.detach(), action.view(T, B, -1).detach()))
predicted_phi2_stacked = torch.stack(predicted_phi2)
return phi2, predicted_phi2, predicted_phi2_stacked
def compute_bonus(self, observations, next_observations, actions):
_, _, predicted_phi2_stacked = self.forward(observations, next_observations, actions)
feature_var = torch.var(predicted_phi2_stacked, dim=0) # feature variance across forward models
reward = torch.mean(feature_var, axis=-1) # mean over feature
return self.beta * reward
def compute_loss(self, observations, next_observations, actions, valid):
#------------------------------------------------------------#
# hacky dimension add for when you have only one environment (debugging)
if actions.dim() == 2:
actions = actions.unsqueeze(1)
#------------------------------------------------------------#
phi2, predicted_phi2, _ = self.forward(observations, next_observations, actions)
forward_loss = torch.tensor(0.0, device=self.device)
forward_loss_1 = nn.functional.dropout(nn.functional.mse_loss(predicted_phi2[0], phi2.detach(), reduction='none'), p=0.2).sum(-1)/self.feature_size
forward_loss += valid_mean(forward_loss_1, valid)
forward_loss_2 = nn.functional.dropout(nn.functional.mse_loss(predicted_phi2[1], phi2.detach(), reduction='none'), p=0.2).sum(-1)/self.feature_size
forward_loss += valid_mean(forward_loss_2, valid)
forward_loss_3 = nn.functional.dropout(nn.functional.mse_loss(predicted_phi2[2], phi2.detach(), reduction='none'), p=0.2).sum(-1)/self.feature_size
forward_loss += valid_mean(forward_loss_3, valid)
forward_loss_4 = nn.functional.dropout(nn.functional.mse_loss(predicted_phi2[3], phi2.detach(), reduction='none'), p=0.2).sum(-1)/self.feature_size
forward_loss += valid_mean(forward_loss_4, valid)
forward_loss_5 = nn.functional.dropout(nn.functional.mse_loss(predicted_phi2[4], phi2.detach(), reduction='none'), p=0.2).sum(-1)/self.feature_size
forward_loss += valid_mean(forward_loss_5, valid)
return self.forward_loss_wt*forward_loss
| 5,826 | 0 | 108 |
8f33c883632dd88e193a16cb271b82e81a1d018a | 1,778 | py | Python | ppjson.py | nightjuggler/peaks | 9ef7596295648e070eb5b62c88564423916d84fa | [
"MIT"
] | 1 | 2021-01-16T17:54:03.000Z | 2021-01-16T17:54:03.000Z | ppjson.py | nightjuggler/peaks | 9ef7596295648e070eb5b62c88564423916d84fa | [
"MIT"
] | null | null | null | ppjson.py | nightjuggler/peaks | 9ef7596295648e070eb5b62c88564423916d84fa | [
"MIT"
] | null | null | null | #
# ppjson.py - Pretty Print JSON
#
import sys
level = 0
write = sys.stdout.write
if __name__ == '__main__':
main()
| 14.224 | 81 | 0.602362 | #
# ppjson.py - Pretty Print JSON
#
import sys
def log(message, *formatArgs):
print(message.format(*formatArgs), file=sys.stderr)
level = 0
write = sys.stdout.write
def writeTab():
write('\t' * level)
def ppDict(o):
global level, write
n = len(o)
if n == 0:
write('{}')
return
if n == 1:
write('{')
for key, value in o.items():
write('"{}": '.format(key))
prettyPrint(value)
write('}')
return
write('{\n')
level += 1
first = True
for key in sorted(o.keys()):
if first:
first = False
else:
write(',\n')
writeTab()
write('"{}": '.format(key))
prettyPrint(o[key])
level -= 1
write('\n')
writeTab()
write('}')
def ppList(o):
global level, write
n = len(o)
if n == 0:
write('[]')
return
if n == 1:
write('[')
prettyPrint(o[0])
write(']')
return
if n == 2 and isinstance(o[0], (float, int)) and isinstance(o[1], (float, int)):
write('[{},{}]'.format(o[0], o[1]))
return
write('[\n')
level += 1
first = True
for item in o:
if first:
first = False
else:
write(',\n')
writeTab()
prettyPrint(item)
level -= 1
write('\n')
writeTab()
write(']')
def prettyPrint(o):
global level, write
objType = type(o)
if objType is str:
write('"{}"'.format(o))
elif objType in (bool, float, int):
write(str(o))
elif objType is dict:
ppDict(o)
elif objType is list:
ppList(o)
elif o is None:
write('null')
else:
log('Unrecognized object type: {}', objType)
if level == 0:
write('\n')
def main():
import argparse
import json
parser = argparse.ArgumentParser()
parser.add_argument('fileName')
args = parser.parse_args()
jsonFile = open(args.fileName)
jsonData = json.load(jsonFile)
jsonFile.close()
prettyPrint(jsonData)
if __name__ == '__main__':
main()
| 1,521 | 0 | 138 |
7d3920167f2ff85fc145bda37fe2ad2ff4f17034 | 4,915 | py | Python | src/cnrg/src/part_info.py | Abdumaleek/infinity-mirror | b493c5602d9e4bcf374b748e9b80e7c85be54a88 | [
"MIT"
] | 5 | 2020-03-13T02:54:03.000Z | 2022-03-18T02:33:12.000Z | src/cnrg/src/part_info.py | Abdumaleek/infinity-mirror | b493c5602d9e4bcf374b748e9b80e7c85be54a88 | [
"MIT"
] | 2 | 2021-11-10T19:47:00.000Z | 2022-02-10T01:24:59.000Z | src/cnrg/src/part_info.py | Abdumaleek/infinity-mirror | b493c5602d9e4bcf374b748e9b80e7c85be54a88 | [
"MIT"
] | 1 | 2021-05-24T21:54:44.000Z | 2021-05-24T21:54:44.000Z | """
Partial info extraction and generation
Partial boundary information containing node level info on boundary degree
"""
import random
import networkx as nx
import numpy as np
from src.cnrg.src.LightMultiGraph import LightMultiGraph
from src.cnrg.src.globals import find_boundary_edges
def set_boundary_degrees_old(g, sg):
"""
Find the nunber of boundary edges that each node participate in.
This is stored as a node level attribute - 'b_deg' in nodes in g that are part of nbunch
:param g: whole graph
:param sg: the subgraph
:return: nothing
"""
boundary_degree = {}
for u in sg.nodes():
boundary_degree[u] = 0
for v in g.neighbors(u):
if not sg.has_node(v):
boundary_degree[u] += g.number_of_edges(u, v) # for a multi-graph
nx.set_node_attributes(sg, values=boundary_degree, name='b_deg')
def generate_graph(rule_dict, rule_list):
"""
Create a new graph from the VRG at random
:param rule_dict: List of unique VRG rules
:return: newly generated graph
"""
node_counter = 1
non_terminals = set()
# new_g = nx.MultiGraph()
new_g = LightMultiGraph()
new_g.add_node(0, label=0)
non_terminals.add(0)
rule_ordering = [] # list of rule ids in the order they were fired
while len(non_terminals) > 0: # continue until no more non-terminal nodes
# choose a non terminal node at random
node_sample = random.sample(non_terminals, 1)[0]
lhs = new_g.nodes[node_sample]['label']
rhs_candidates = list(filter(lambda rule: rule.is_active, rule_dict[lhs]))
# consider only active rules
if len(rhs_candidates) == 1:
rhs = rhs_candidates[0]
else:
weights = np.array([rule.frequency for rule in rhs_candidates])
weights = weights / np.sum(weights) # normalize into probabilities
idx = int(np.random.choice(range(len(rhs_candidates)), size=1, p=weights)) # pick based on probability
rhs = rhs_candidates[idx]
# print(f'firing rule {rule_list.index(rhs)}')
# rule_ordering.append(rule_list.index(rhs))
# print('Selected node {} with label {}'.format(node_sample, lhs))
broken_edges = find_boundary_edges(new_g, [node_sample])
# print('broken edges: ', broken_edges)
assert len(broken_edges) == lhs
new_g.remove_node(node_sample)
non_terminals.remove(node_sample)
nodes = {}
for n, d in rhs.graph.nodes(data=True): # all the nodes are internal
new_node = node_counter
nodes[n] = new_node
new_g.add_node(new_node, attr_dict=d)
if 'label' in d: # if it's a new non-terminal add it to the set of non-terminals
non_terminals.add(new_node)
node_counter += 1
# randomly assign broken edges to boundary edges
random.shuffle(broken_edges)
# randomly joining the new boundary edges from the RHS to the rest of the graph - uniformly at random
for n, d in rhs.graph.nodes(data=True):
num_boundary_edges = d['b_deg']
if num_boundary_edges == 0: # there are no boundary edges incident to that node
continue
assert len(broken_edges) >= num_boundary_edges
edge_candidates = broken_edges[: num_boundary_edges] # picking the first num_broken edges
broken_edges = broken_edges[num_boundary_edges:] # removing them from future consideration
for u, v in edge_candidates: # each edge is either (node_sample, v) or (u, node_sample)
if u == node_sample:
u = nodes[n]
else:
v = nodes[n]
# print('adding broken edge ({}, {})'.format(u, v))
new_g.add_edge(u, v)
# adding the rhs to the new graph
for u, v in rhs.graph.edges():
# print('adding RHS internal edge ({}, {})'.format(nodes[u], nodes[v]))
edge_multiplicity = rhs.graph[u][v]['weight'] #
for _ in range(edge_multiplicity):
new_g.add_edge(nodes[u], nodes[v])
return new_g, rule_ordering
if __name__ == '__main__':
g = LightMultiGraph()
g.add_edges_from([(1, 2), (1, 2), (1, 3), (2, 3), (3, 4)])
sg = g.subgraph([2, 3]).copy()
print(g.edges(data=True))
set_boundary_degrees(g, sg)
print(sg.nodes(data=True))
| 34.612676 | 115 | 0.62238 | """
Partial info extraction and generation
Partial boundary information containing node level info on boundary degree
"""
import random
import networkx as nx
import numpy as np
from src.cnrg.src.LightMultiGraph import LightMultiGraph
from src.cnrg.src.globals import find_boundary_edges
def set_boundary_degrees(g, sg):
# TODO: test this!!
boundary_degree = {n: 0 for n in sg.nodes()} # by default every boundary degree is 0
for u, v in nx.edge_boundary(g, sg.nodes()):
if sg.has_node(u):
boundary_degree[u] += g.number_of_edges(u, v)
else:
boundary_degree[v] += g.number_of_edges(u, v)
nx.set_node_attributes(sg, values=boundary_degree, name='b_deg')
def set_boundary_degrees_old(g, sg):
"""
Find the nunber of boundary edges that each node participate in.
This is stored as a node level attribute - 'b_deg' in nodes in g that are part of nbunch
:param g: whole graph
:param sg: the subgraph
:return: nothing
"""
boundary_degree = {}
for u in sg.nodes():
boundary_degree[u] = 0
for v in g.neighbors(u):
if not sg.has_node(v):
boundary_degree[u] += g.number_of_edges(u, v) # for a multi-graph
nx.set_node_attributes(sg, values=boundary_degree, name='b_deg')
def generate_graph(rule_dict, rule_list):
"""
Create a new graph from the VRG at random
:param rule_dict: List of unique VRG rules
:return: newly generated graph
"""
node_counter = 1
non_terminals = set()
# new_g = nx.MultiGraph()
new_g = LightMultiGraph()
new_g.add_node(0, label=0)
non_terminals.add(0)
rule_ordering = [] # list of rule ids in the order they were fired
while len(non_terminals) > 0: # continue until no more non-terminal nodes
# choose a non terminal node at random
node_sample = random.sample(non_terminals, 1)[0]
lhs = new_g.nodes[node_sample]['label']
rhs_candidates = list(filter(lambda rule: rule.is_active, rule_dict[lhs]))
# consider only active rules
if len(rhs_candidates) == 1:
rhs = rhs_candidates[0]
else:
weights = np.array([rule.frequency for rule in rhs_candidates])
weights = weights / np.sum(weights) # normalize into probabilities
idx = int(np.random.choice(range(len(rhs_candidates)), size=1, p=weights)) # pick based on probability
rhs = rhs_candidates[idx]
# print(f'firing rule {rule_list.index(rhs)}')
# rule_ordering.append(rule_list.index(rhs))
# print('Selected node {} with label {}'.format(node_sample, lhs))
broken_edges = find_boundary_edges(new_g, [node_sample])
# print('broken edges: ', broken_edges)
assert len(broken_edges) == lhs
new_g.remove_node(node_sample)
non_terminals.remove(node_sample)
nodes = {}
for n, d in rhs.graph.nodes(data=True): # all the nodes are internal
new_node = node_counter
nodes[n] = new_node
new_g.add_node(new_node, attr_dict=d)
if 'label' in d: # if it's a new non-terminal add it to the set of non-terminals
non_terminals.add(new_node)
node_counter += 1
# randomly assign broken edges to boundary edges
random.shuffle(broken_edges)
# randomly joining the new boundary edges from the RHS to the rest of the graph - uniformly at random
for n, d in rhs.graph.nodes(data=True):
num_boundary_edges = d['b_deg']
if num_boundary_edges == 0: # there are no boundary edges incident to that node
continue
assert len(broken_edges) >= num_boundary_edges
edge_candidates = broken_edges[: num_boundary_edges] # picking the first num_broken edges
broken_edges = broken_edges[num_boundary_edges:] # removing them from future consideration
for u, v in edge_candidates: # each edge is either (node_sample, v) or (u, node_sample)
if u == node_sample:
u = nodes[n]
else:
v = nodes[n]
# print('adding broken edge ({}, {})'.format(u, v))
new_g.add_edge(u, v)
# adding the rhs to the new graph
for u, v in rhs.graph.edges():
# print('adding RHS internal edge ({}, {})'.format(nodes[u], nodes[v]))
edge_multiplicity = rhs.graph[u][v]['weight'] #
for _ in range(edge_multiplicity):
new_g.add_edge(nodes[u], nodes[v])
return new_g, rule_ordering
if __name__ == '__main__':
g = LightMultiGraph()
g.add_edges_from([(1, 2), (1, 2), (1, 3), (2, 3), (3, 4)])
sg = g.subgraph([2, 3]).copy()
print(g.edges(data=True))
set_boundary_degrees(g, sg)
print(sg.nodes(data=True))
| 401 | 0 | 23 |
3f4dea2d62030d0f427afb8a404d1182031975f7 | 11,515 | py | Python | crowd_anki/config/config_ui.py | lukas-mertens/CrowdAnki | 2961f9c073bc799f192235414408907632fc8211 | [
"MIT"
] | null | null | null | crowd_anki/config/config_ui.py | lukas-mertens/CrowdAnki | 2961f9c073bc799f192235414408907632fc8211 | [
"MIT"
] | null | null | null | crowd_anki/config/config_ui.py | lukas-mertens/CrowdAnki | 2961f9c073bc799f192235414408907632fc8211 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_files/config.ui'
#
# Created by: PyQt5 UI code generator 5.13.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| 94.385246 | 635 | 0.730873 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_files/config.ui'
#
# Created by: PyQt5 UI code generator 5.13.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(825, 726)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.group_snapshot = QtWidgets.QGroupBox(Dialog)
self.group_snapshot.setObjectName("group_snapshot")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.group_snapshot)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.lbl_snapshot_path = QtWidgets.QLabel(self.group_snapshot)
self.lbl_snapshot_path.setObjectName("lbl_snapshot_path")
self.horizontalLayout_2.addWidget(self.lbl_snapshot_path)
self.textedit_snapshot_path = QtWidgets.QLineEdit(self.group_snapshot)
self.textedit_snapshot_path.setObjectName("textedit_snapshot_path")
self.horizontalLayout_2.addWidget(self.textedit_snapshot_path)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
self.cb_automated_snapshot = QtWidgets.QCheckBox(self.group_snapshot)
self.cb_automated_snapshot.setObjectName("cb_automated_snapshot")
self.verticalLayout_3.addWidget(self.cb_automated_snapshot)
self.lbl_snapshot = QtWidgets.QLabel(self.group_snapshot)
self.lbl_snapshot.setObjectName("lbl_snapshot")
self.verticalLayout_3.addWidget(self.lbl_snapshot)
self.textedit_snapshot_root_decks = QtWidgets.QPlainTextEdit(self.group_snapshot)
self.textedit_snapshot_root_decks.setObjectName("textedit_snapshot_root_decks")
self.verticalLayout_3.addWidget(self.textedit_snapshot_root_decks)
self.verticalLayout_2.addWidget(self.group_snapshot)
self.group_deck_import = QtWidgets.QGroupBox(Dialog)
self.group_deck_import.setObjectName("group_deck_import")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.group_deck_import)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.cb_ignore_move_cards = QtWidgets.QCheckBox(self.group_deck_import)
self.cb_ignore_move_cards.setObjectName("cb_ignore_move_cards")
self.verticalLayout_5.addWidget(self.cb_ignore_move_cards)
self.verticalLayout_2.addWidget(self.group_deck_import)
self.group_deck_export = QtWidgets.QGroupBox(Dialog)
self.group_deck_export.setObjectName("group_deck_export")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.group_deck_export)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.lbl_deck_sort = QtWidgets.QLabel(self.group_deck_export)
self.lbl_deck_sort.setObjectName("lbl_deck_sort")
self.verticalLayout_4.addWidget(self.lbl_deck_sort)
self.textedit_deck_sort_methods = QtWidgets.QPlainTextEdit(self.group_deck_export)
self.textedit_deck_sort_methods.setObjectName("textedit_deck_sort_methods")
self.verticalLayout_4.addWidget(self.textedit_deck_sort_methods)
self.cb_reverse_sort = QtWidgets.QCheckBox(self.group_deck_export)
self.cb_reverse_sort.setObjectName("cb_reverse_sort")
self.verticalLayout_4.addWidget(self.cb_reverse_sort)
self.verticalLayout_2.addWidget(self.group_deck_export)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.tb_instructions = QtWidgets.QTextBrowser(Dialog)
self.tb_instructions.setLineWidth(1)
self.tb_instructions.setObjectName("tb_instructions")
self.horizontalLayout.addWidget(self.tb_instructions)
self.verticalLayout.addLayout(self.horizontalLayout)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "CrowdAnki Configuration"))
self.group_snapshot.setTitle(_translate("Dialog", "Snapshot"))
self.lbl_snapshot_path.setText(_translate("Dialog", "Snapshot Path:"))
self.cb_automated_snapshot.setText(_translate("Dialog", "Automated Snapshot"))
self.lbl_snapshot.setText(_translate("Dialog", "Snapshot Root Decks (separated by comma)"))
self.group_deck_import.setTitle(_translate("Dialog", "Import"))
self.cb_ignore_move_cards.setText(_translate("Dialog", "Do Not Move Existing Cards"))
self.group_deck_export.setTitle(_translate("Dialog", "Export"))
self.lbl_deck_sort.setText(_translate("Dialog", "Deck Sort Method(s) (separated by comma)"))
self.cb_reverse_sort.setText(_translate("Dialog", "Reverse Sort Order"))
self.tb_instructions.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Noto Sans\'; font-size:10pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:16pt;\">Snapshot</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">Snapshot Path</span>: The path for base directory where the CrowdAnki snapshot would be written to.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-style:italic;\">Default</span>: `user_files` subdirectory of the extension directory.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">Automated Snapshot</span>: Whether to perform the snapshot automatically on opening/closing the application and on switching the profile.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-style:italic;\">Default</span>: `false` - this is an experimental feature and it\'s disabled by default.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">Snapshot Root Decks</span>: A list of names of the decks that should be considered `root`. When CrowdAnki creates a snapshot it\'ll create a separate git repository for each `root` deck.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-style:italic;\">Default</span>: Each deck with no children is considered `root`.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" text-decoration: underline;\">Example</span>: Let\'s assume that you have the following decks in your collection: `a` (with sub-decks `b` and `c`), and `d`. By default CrowdAnki is going to create 3 separate repositories - `a::b`, `a::c` and `d`. If you are to add `a` to `snapshot_root_decks` then CrowdAnki would create 2 repositories instead - `a` and `d`. The information for sub-decks `b` and `c` would be stored within repository `a` in this case.</p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:16pt;\">Import</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">Do Not Move Existing Cards</span>: By default on import of a CrowdAnki deck file, when a note already exists in Anki itself, all notes will be updated and placed in the deck set in the deck file. Tick this box if you wish only to have the notes updated, but left in their current deck. See <a href=\"https://github.com/Stvad/CrowdAnki/issues/23\"><span style=\" text-decoration: underline; color:#2980b9;\">this Issue</span></a> on the CrowdAnki Github Repo for more info.</p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:16pt;\">Export</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">Deck Sort Methods</span>: Methods with which the deck will be sorted. If multiple sorting methods are provided then each sorting method will be applied in order.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" text-decoration: underline;\">Options</span>: <span style=\" font-style:italic;\">none, guid, flag, tag, note_model_name, note_model_id, field1, </span>and<span style=\" font-style:italic;\"> field2.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">Reverse Sort Order</span>: Swap the order of the notes, after all sorting.</p></body></html>"))
| 11,187 | 3 | 76 |
e0142fa4ec05646b0e86910d593bf4e001e44504 | 8,615 | py | Python | Utils/visualize_object_survey.py | arafin-lab/model_inversion_experiments | 8e491f22ae847d8d0f2f31caed6ad0078c9b6a49 | [
"Apache-2.0"
] | 101 | 2018-10-26T14:39:19.000Z | 2022-03-28T11:38:45.000Z | Utils/visualize_object_survey.py | zhampel/cyphercat | 4029ae8683b9056013e6424d8931afe79afa618e | [
"Apache-2.0"
] | 32 | 2018-10-10T23:02:04.000Z | 2019-11-11T21:30:57.000Z | Utils/visualize_object_survey.py | zhampel/cyphercat | 4029ae8683b9056013e6424d8931afe79afa618e | [
"Apache-2.0"
] | 49 | 2018-10-05T17:54:49.000Z | 2022-02-10T07:47:25.000Z | #!/usr/bin/python3
"""
Set of functions used to call a series of algorithms used to visualize the object localization of a pre-trained
network in PyTorch. The different algorithms are discussed in several papers, while the implementation is based,
roughly, on work in the following repository (https://github.com/sar-gupta/weakly-supervised-localization-survey)
"""
import numpy as np
import PIL
import torch
import torchvision
import torch.nn as nn
from torch.autograd import Variable
def saliency_map_general(model, input, label, plot = False):
"""
saliency_map_general: implementation to return the most general form of the saliency map, informing
on the regions of interest that activate a specific label.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
return: numpy array with heatmap data
"""
input = Variable(input.unsqueeze_(0),requires_grad = True)
output = model.forward(input)
model.zero_grad()
output[0][label].backward()
grads = input.grad.data.clamp(min=0)
grads.squeeze_()
grads.transpose_(0,1)
grads.transpose_(1,2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 255
grads = grads.astype(int)
return grads
def guided_saliency_map(model, input, label, plot = False):
"""
guided_saliency_map: implementation to return a guided saliency map, informing
on the regions of interest that activate a specific label.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
return: numpy array with heatmap data
"""
input = Variable(input.unsqueeze_(0), requires_grad=True)
try:
h = [0]*len(list(model.modules()))
for j, i in enumerate(list(model.modules())):
h[j] = i.register_backward_hook(hookfunc)
output = model.forward(input)
model.zero_grad()
output[0][label].backward()
for i in range(len(list(model.modules()))):
h[i].remove()
except Exception as e:
print(e)
for i in range(len(list(model.modules()))):
h[i].remove()
grads = input.grad.data.clamp(min=0)
grads.squeeze_()
grads.transpose_(0,1)
grads.transpose_(1,2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 255
grads = grads.astype(int)
return grads
def gradcam(model, input, label, layer_name, plot=False):
"""
gradcam: implementation to return a class activation map using the gradient of class score with each
of last conv layer filters. Calculate weighted sum of gradients and filters to finally obtain a map
of size equal to size of filters.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
- layer_name: Name of the layer to target, should be the last CNN.
return:
PIL image with cativation map
"""
imgs_shape = (input.shape[1], input.shape[2])
rs = torchvision.transforms.Resize( imgs_shape )
#find the right layer
last_conv = None
for name, item in model._modules.items():
if name == layer_name:
last_conv = item
if last_conv == None:
print('Cant find target layer')
return None
pre_image = input
global gcdata
global gcgrads
hb = last_conv.register_backward_hook(bhook)
hf = last_conv.register_forward_hook(fhook)
out = model(input.unsqueeze_(0))
model.zero_grad()
out[0, label].backward()
hb.remove()
hf.remove()
gcdata = gcdata[0]
gcgrads = gcgrads[0].squeeze()
gcgrads = gcgrads.mean(dim=2, keepdim=True)
gcgrads = gcgrads.mean(dim=1, keepdim=True)
#
gcdata = gcdata.mul(gcgrads)
gcdata = gcdata.sum(dim=0, keepdim=True)
gcdata = gcdata.clamp(min=0)
gcdata -= gcdata.min()
gcdata /= gcdata.max()
toi = torchvision.transforms.ToPILImage()
gcdata = np.array(rs(toi(gcdata.data.cpu())))
input.squeeze()
return gcdata
def guided_gradcam(model, input, label,layer_name, plot = False):
"""
guided_gradcam: returns a combination of a guided saliency map and class activation map. this combines
the sensitivity to different classes from gradcam toguether with the greater resolution of the
saliency map.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
- layer_name: Name of the layer to target, should be the last CNN.
return:
PIL image with cativation map
"""
gc = gradcam(model, input, label, layer_name, plot=False)
guided = guided_saliency_map(model=model, input=input[0], label=label, plot=False)
gc = gc * guided
rs = torchvision.transforms.Resize((32,32))
gc -= gc.min()
gc = np.divide(gc, gc.max())
gc *= 255
gc = gc.astype(int)
return gc
def smooth_guided_saliency_map(model, input, label, transform,x=10, percent_noise=10, plot = True):
"""
smooth_guided_saliency_map: Implementation of guided saliency map accounting for the fact
small, local variations in the local derivatives lead to the apparent noise one sees. This implementation smooths
these.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- x: Number fo times to sample for the smoothing
- percent_nois: Percentage of noise to be itroduced during sampling for smoothing
return:
PIL image with cativation map
"""
tensor_input = input
final_grad = torch.zeros(input.shape).cuda()
final_grad = final_grad.unsqueeze(0)
h = [0]*len(list(model.modules()))
for j, i in enumerate(list(model.modules())):
h[j] = i.register_backward_hook(hookfunc)
for i in range(x):
temp_input = tensor_input
noise = torch.from_numpy(np.random.normal(loc=0, scale=(percent_noise/100) *
(tensor_input.max() - tensor_input.min()),
size=temp_input.shape)).type(torch.cuda.FloatTensor)
temp_input = (temp_input.cuda() + noise).cpu().numpy()
temp_input = np.transpose(temp_input, (1,2,0) )
temp_input = PIL.Image.fromarray(temp_input.astype(np.uint8))
temp_input = Variable(transform(temp_input).unsqueeze(0).cuda(), requires_grad=True)
output = model.forward(temp_input)
model.zero_grad()
output[0][label].backward()
final_grad += temp_input.grad.data
for i in range(len(list(model.modules()))):
h[i].remove()
grads = final_grad/x
grads = grads.clamp(min=0)
grads.squeeze_()
grads.transpose_(0,1)
grads.transpose_(1,2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 255
grads = grads.astype(int)
return grads
| 32.509434 | 117 | 0.643645 | #!/usr/bin/python3
"""
Set of functions used to call a series of algorithms used to visualize the object localization of a pre-trained
network in PyTorch. The different algorithms are discussed in several papers, while the implementation is based,
roughly, on work in the following repository (https://github.com/sar-gupta/weakly-supervised-localization-survey)
"""
import numpy as np
import PIL
import torch
import torchvision
import torch.nn as nn
from torch.autograd import Variable
def saliency_map_general(model, input, label, plot = False):
"""
saliency_map_general: implementation to return the most general form of the saliency map, informing
on the regions of interest that activate a specific label.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
return: numpy array with heatmap data
"""
input = Variable(input.unsqueeze_(0),requires_grad = True)
output = model.forward(input)
model.zero_grad()
output[0][label].backward()
grads = input.grad.data.clamp(min=0)
grads.squeeze_()
grads.transpose_(0,1)
grads.transpose_(1,2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 255
grads = grads.astype(int)
return grads
def guided_saliency_map(model, input, label, plot = False):
"""
guided_saliency_map: implementation to return a guided saliency map, informing
on the regions of interest that activate a specific label.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
return: numpy array with heatmap data
"""
input = Variable(input.unsqueeze_(0), requires_grad=True)
try:
h = [0]*len(list(model.modules()))
def hookfunc(module, gradInput, gradOutput):
return tuple([(None if g is None else g.clamp(min=0)) for g in gradInput])
for j, i in enumerate(list(model.modules())):
h[j] = i.register_backward_hook(hookfunc)
output = model.forward(input)
model.zero_grad()
output[0][label].backward()
for i in range(len(list(model.modules()))):
h[i].remove()
except Exception as e:
print(e)
for i in range(len(list(model.modules()))):
h[i].remove()
grads = input.grad.data.clamp(min=0)
grads.squeeze_()
grads.transpose_(0,1)
grads.transpose_(1,2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 255
grads = grads.astype(int)
return grads
def gradcam(model, input, label, layer_name, plot=False):
"""
gradcam: implementation to return a class activation map using the gradient of class score with each
of last conv layer filters. Calculate weighted sum of gradients and filters to finally obtain a map
of size equal to size of filters.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
- layer_name: Name of the layer to target, should be the last CNN.
return:
PIL image with cativation map
"""
imgs_shape = (input.shape[1], input.shape[2])
rs = torchvision.transforms.Resize( imgs_shape )
#find the right layer
last_conv = None
for name, item in model._modules.items():
if name == layer_name:
last_conv = item
if last_conv == None:
print('Cant find target layer')
return None
pre_image = input
global gcdata
global gcgrads
def bhook(module, gradInputs, gradOutputs):
global gcgrads
gcgrads = gradOutputs
def fhook(module, input, output):
global gcdata
gcdata = output
hb = last_conv.register_backward_hook(bhook)
hf = last_conv.register_forward_hook(fhook)
out = model(input.unsqueeze_(0))
model.zero_grad()
out[0, label].backward()
hb.remove()
hf.remove()
gcdata = gcdata[0]
gcgrads = gcgrads[0].squeeze()
gcgrads = gcgrads.mean(dim=2, keepdim=True)
gcgrads = gcgrads.mean(dim=1, keepdim=True)
#
gcdata = gcdata.mul(gcgrads)
gcdata = gcdata.sum(dim=0, keepdim=True)
gcdata = gcdata.clamp(min=0)
gcdata -= gcdata.min()
gcdata /= gcdata.max()
toi = torchvision.transforms.ToPILImage()
gcdata = np.array(rs(toi(gcdata.data.cpu())))
input.squeeze()
return gcdata
def guided_gradcam(model, input, label,layer_name, plot = False):
"""
guided_gradcam: returns a combination of a guided saliency map and class activation map. this combines
the sensitivity to different classes from gradcam toguether with the greater resolution of the
saliency map.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- label: Class to identify the regions of interest
- layer_name: Name of the layer to target, should be the last CNN.
return:
PIL image with cativation map
"""
gc = gradcam(model, input, label, layer_name, plot=False)
guided = guided_saliency_map(model=model, input=input[0], label=label, plot=False)
gc = gc * guided
rs = torchvision.transforms.Resize((32,32))
gc -= gc.min()
gc = np.divide(gc, gc.max())
gc *= 255
gc = gc.astype(int)
return gc
def smooth_guided_saliency_map(model, input, label, transform,x=10, percent_noise=10, plot = True):
"""
smooth_guided_saliency_map: Implementation of guided saliency map accounting for the fact
small, local variations in the local derivatives lead to the apparent noise one sees. This implementation smooths
these.
Args:
- model: (PyTorch) Trained model trying to understand
- input: Image to be classfied and understood, passed as a PyTorch tensor (C x W x H)
- x: Number fo times to sample for the smoothing
- percent_nois: Percentage of noise to be itroduced during sampling for smoothing
return:
PIL image with cativation map
"""
tensor_input = input
final_grad = torch.zeros(input.shape).cuda()
final_grad = final_grad.unsqueeze(0)
h = [0]*len(list(model.modules()))
def hookfunc(module, gradInput, gradOutput):
return tuple([(None if g is None else g.clamp(min=0)) for g in gradInput])
for j, i in enumerate(list(model.modules())):
h[j] = i.register_backward_hook(hookfunc)
for i in range(x):
temp_input = tensor_input
noise = torch.from_numpy(np.random.normal(loc=0, scale=(percent_noise/100) *
(tensor_input.max() - tensor_input.min()),
size=temp_input.shape)).type(torch.cuda.FloatTensor)
temp_input = (temp_input.cuda() + noise).cpu().numpy()
temp_input = np.transpose(temp_input, (1,2,0) )
temp_input = PIL.Image.fromarray(temp_input.astype(np.uint8))
temp_input = Variable(transform(temp_input).unsqueeze(0).cuda(), requires_grad=True)
output = model.forward(temp_input)
model.zero_grad()
output[0][label].backward()
final_grad += temp_input.grad.data
for i in range(len(list(model.modules()))):
h[i].remove()
grads = final_grad/x
grads = grads.clamp(min=0)
grads.squeeze_()
grads.transpose_(0,1)
grads.transpose_(1,2)
grads = np.amax(grads.cpu().numpy(), axis=2)
grads -= grads.min()
grads /= grads.max()
grads *= 255
grads = grads.astype(int)
return grads
def smooth_guided_gradcam(model, input, label, transform, layer_name, plot = False ):
guided = smooth_guided_saliency_map(model, input, label,transform = transform, plot = False)
gc = gradcam(model, input, label, layer_name = layer_name, plot=False)
gc = gc * guided
rs = torchvision.transforms.Resize((32,32))
gc -= gc.min()
gc = np.divide(gc, gc.max())
gc *= 255
gc = gc.astype(int)
return gc
| 772 | 0 | 135 |
042732899e68080fe08c8f52ede7a73a878819ff | 1,696 | py | Python | tdclient/account_api.py | minchuang/td-client-python | 6cf6dfbb60119f400274491d3e942d4f9fbcebd6 | [
"Apache-2.0"
] | 2 | 2019-02-22T11:56:17.000Z | 2019-02-25T10:09:46.000Z | tdclient/account_api.py | minchuang/td-client-python | 6cf6dfbb60119f400274491d3e942d4f9fbcebd6 | [
"Apache-2.0"
] | null | null | null | tdclient/account_api.py | minchuang/td-client-python | 6cf6dfbb60119f400274491d3e942d4f9fbcebd6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
import time
| 33.92 | 79 | 0.514151 | #!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
import time
class AccountAPI(object):
####
## Account API
##
def show_account(self):
"""
TODO: add docstring
"""
with self.get("/v3/account/show") as res:
code, body = res.status, res.read()
if code != 200:
self.raise_error("Show account failed", res, body)
js = self.checked_json(body, ["account"])
a = js["account"]
a["id"] = int(a["id"])
a["plan"] = int(a["plan"])
a["storage_size"] = int(a["storage_size"])
a["guaranteed_cores"] = int(a["guaranteed_cores"])
a["maximum_cores"] = int(a["maximum_cores"])
a["created_at"] = self._parsedate(a["created_at"], "%Y-%m-%d")
return a
def account_core_utilization(self, _from, to):
"""
TODO: add docstring
"""
params = {}
if _from is not None:
params["from"] = str(_from)
if to is not None:
params["to"] = str(to)
with self.get("/v3/account/core_utilization", params) as res:
code, body = res.status, res.read()
if code != 200:
self.raise_error("Show account failed", res, body)
js = self.checked_json(body, ["from", "to", "interval", "history"])
_from = self._parsedate(js["from"], "%Y-%m-%d %H:%M:%S %Z")
to = self._parsedate(js["to"], "%Y-%m-%d %H:%M:%S %Z")
interval = int(js["interval"])
history = js["history"]
return [_from, to, interval, history]
| 0 | 1,559 | 23 |
467a08261626d9203c64f1475b1888f09feb9de9 | 34 | py | Python | old/src/baseline_agents/Kait/__init__.py | horacioMartinez/L2RPN | 11e13a60a568b8ad7f9c6de060ac8c41784ced96 | [
"MIT"
] | 2 | 2021-10-17T08:41:42.000Z | 2021-12-22T18:39:39.000Z | old/src/baseline_agents/Kait/__init__.py | horacioMartinez/L2RPN | 11e13a60a568b8ad7f9c6de060ac8c41784ced96 | [
"MIT"
] | null | null | null | old/src/baseline_agents/Kait/__init__.py | horacioMartinez/L2RPN | 11e13a60a568b8ad7f9c6de060ac8c41784ced96 | [
"MIT"
] | null | null | null | from .submission import make_agent | 34 | 34 | 0.882353 | from .submission import make_agent | 0 | 0 | 0 |
65921663e4f5728ae00968077e25c8c27650608e | 1,507 | py | Python | debuggee/debugvis.py | jroweboy/vscode-lldb | 6d556023ad2e87971d977a66915876950a21207b | [
"MIT"
] | null | null | null | debuggee/debugvis.py | jroweboy/vscode-lldb | 6d556023ad2e87971d977a66915876950a21207b | [
"MIT"
] | null | null | null | debuggee/debugvis.py | jroweboy/vscode-lldb | 6d556023ad2e87971d977a66915876950a21207b | [
"MIT"
] | null | null | null | import io
import lldb
import debugger
import base64
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
| 35.046512 | 125 | 0.680159 | import io
import lldb
import debugger
import base64
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
def show():
image_bytes = io.BytesIO()
plt.savefig(image_bytes, format='png', bbox_inches='tight')
document = '<html><img src="data:image/png;base64,%s"></html>' % base64.b64encode(image_bytes.getvalue()).decode('utf-8')
debugger.display_html('/plot', title='Pretty Plot', position=2, content={'/plot': document})
def show2():
image_bytes = io.BytesIO()
plt.savefig(image_bytes, format='png', bbox_inches='tight')
document = '<html><img src="data:data:image/png;base64,%s"></html>' % base64.b64encode(image_bytes.getvalue())
debugger.register_content_provider(lambda uri: document)
debugger.display_html('debugger:/plot', title='Pretty Plot', position=2)
def plot_image(image, xdim, ydim, cmap='nipy_spectral_r'):
image = debugger.unwrap(image)
if image.TypeIsPointerType():
image_addr = image.GetValueAsUnsigned()
else:
image_addr = image.AddressOf().GetValueAsUnsigned()
data = lldb.process.ReadMemory(image_addr, int(xdim * ydim) * 4, lldb.SBError())
data = np.frombuffer(data, dtype=np.int32).reshape((ydim,xdim))
plt.imshow(data, cmap=cmap, interpolation='nearest')
show()
def display(x):
print(repr(x))
def test():
x = np.linspace(0, 1, 500)
y = np.sin(4 * np.pi * x) * np.exp(-5 * x)
fig, ax = plt.subplots()
ax.fill(x, y, zorder=10)
ax.grid(True, zorder=5)
show2()
| 1,271 | 0 | 115 |
439c5921c6284cb5fda23c030ae8a9d98952293a | 89 | py | Python | aawards/apps.py | seron-ux/Awards | e6cbe17a890e44b0d1d88c48d5c6c27656b50bfb | [
"MIT"
] | null | null | null | aawards/apps.py | seron-ux/Awards | e6cbe17a890e44b0d1d88c48d5c6c27656b50bfb | [
"MIT"
] | null | null | null | aawards/apps.py | seron-ux/Awards | e6cbe17a890e44b0d1d88c48d5c6c27656b50bfb | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 14.833333 | 33 | 0.752809 | from django.apps import AppConfig
class AawardsConfig(AppConfig):
name = 'aawards'
| 0 | 31 | 23 |
7523dddeccce7c26beff51912adb1dd4fceff950 | 11,210 | py | Python | logging_automation.py | aws-samples/aws-centeralized-logging-with-datadog | f1fb778664915b645a0fec5c56a34654ac761233 | [
"MIT-0"
] | 3 | 2020-02-05T03:52:33.000Z | 2021-10-30T01:41:04.000Z | logging_automation.py | aws-samples/aws-centeralized-logging-with-datadog | f1fb778664915b645a0fec5c56a34654ac761233 | [
"MIT-0"
] | null | null | null | logging_automation.py | aws-samples/aws-centeralized-logging-with-datadog | f1fb778664915b645a0fec5c56a34654ac761233 | [
"MIT-0"
] | 6 | 2018-09-06T05:48:07.000Z | 2021-10-30T01:40:56.000Z | # Copyright 2008-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
# http://aws.amazon.com/apache2.0/
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
from __future__ import print_function
import boto3
import botocore
import time
import sys
import argparse
import json
import os
import base64
encrypted_token = os.environ['DD_KMS_API_KEY']
ddApiKey = boto3.client('kms').decrypt(CiphertextBlob=base64.b64decode(encrypted_token))['Plaintext']
def create_account(account_name, account_email, account_role, access_to_billing, credentials):
'''
Create a new AWS account and add it to an organization
'''
client = boto3.client('organizations', aws_access_key_id=credentials['AccessKeyId'], aws_secret_access_key=credentials['SecretAccessKey'], aws_session_token=credentials['SessionToken'],)
try:
create_account_response = client.create_account(Email=account_email, AccountName=account_name, RoleName=account_role, IamUserAccessToBilling=access_to_billing)
except botocore.exceptions.ClientError as e:
print(e)
sys.exit(1)
time.sleep(10)
account_status = 'IN_PROGRESS'
while account_status == 'IN_PROGRESS':
create_account_status_response = client.describe_create_account_status(CreateAccountRequestId=create_account_response.get('CreateAccountStatus').get('Id'))
print("Create account status "+str(create_account_status_response))
account_status = create_account_status_response.get('CreateAccountStatus').get('State')
if account_status == 'SUCCEEDED':
account_id = create_account_status_response.get('CreateAccountStatus').get('AccountId')
elif account_status == 'FAILED':
print("Account creation failed: " + create_account_status_response.get('CreateAccountStatus').get('FailureReason'))
sys.exit(1)
root_id = client.list_roots().get('Roots')[0].get('Id')
return account_id
def get_template(template_file):
'''
Read a template file and return the contents
'''
print("Reading resources from " + template_file)
f = open(template_file, "r")
cf_template = f.read()
return cf_template | 46.322314 | 268 | 0.632739 | # Copyright 2008-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
# http://aws.amazon.com/apache2.0/
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
from __future__ import print_function
import boto3
import botocore
import time
import sys
import argparse
import json
import os
import base64
encrypted_token = os.environ['DD_KMS_API_KEY']
ddApiKey = boto3.client('kms').decrypt(CiphertextBlob=base64.b64decode(encrypted_token))['Plaintext']
def lambda_handler(event, context):
access_to_billing = "DENY"
if event['existing_accountid'] is None:
print("Creating new account: " + event['account_name'] + " (" + event['account_email'] + ")")
print("********************************")
credentials = assume_role(event['masteraccount_id'], 'ST-S-Automation', None)
account_id = create_account(event['account_name'], event['account_email'], 'OrganizationAccountAccessRole', access_to_billing, credentials)
print("********************************")
print("Created acount: " + account_id)
print("********************************")
else:
account_id = event['existing_accountid']
print("Updating Shared Security account policy...")
credentials = assume_role(event['securityaccount_id'], 'ST-S-Automation', None)
update_policy(account_id, event['cloudtrail_bucket'], event['datadogcode_bucket'], credentials)
print("********************************")
print("Deploying resources from " + 'Member.yml' + " as " + 'Member' + " in " + 'us-east-1')
mastercredentials = assume_role(event['masteraccount_id'], 'ST-S-Automation', None)
credentials = assume_role(account_id, 'OrganizationAccountAccessRole', mastercredentials)
template = get_template('Member.yml')
stack = deploy_resources(template, 'Member', 'us-east-1', event['cloudtrail_bucket'], event['datadogcode_bucket'], event['securityaccount_id'], ddApiKey, credentials)
print("********************************")
print(stack)
print("********************************")
print("Resources deployed for account " + account_id)
def assume_role(account_id, account_role, credentials):
if credentials is None:
sts_client = boto3.client('sts')
else:
sts_client = boto3.client('sts', aws_access_key_id=credentials['AccessKeyId'], aws_secret_access_key=credentials['SecretAccessKey'], aws_session_token=credentials['SessionToken'],)
role_arn = 'arn:aws:iam::' + account_id + ':role/' + account_role
assuming_role = True
while assuming_role is True:
try:
assuming_role = False
assumedRoleObject = sts_client.assume_role(RoleArn=role_arn, RoleSessionName="NewRole")
except botocore.exceptions.ClientError as e:
assuming_role = True
print(e)
time.sleep(10)
return assumedRoleObject['Credentials']
def create_account(account_name, account_email, account_role, access_to_billing, credentials):
'''
Create a new AWS account and add it to an organization
'''
client = boto3.client('organizations', aws_access_key_id=credentials['AccessKeyId'], aws_secret_access_key=credentials['SecretAccessKey'], aws_session_token=credentials['SessionToken'],)
try:
create_account_response = client.create_account(Email=account_email, AccountName=account_name, RoleName=account_role, IamUserAccessToBilling=access_to_billing)
except botocore.exceptions.ClientError as e:
print(e)
sys.exit(1)
time.sleep(10)
account_status = 'IN_PROGRESS'
while account_status == 'IN_PROGRESS':
create_account_status_response = client.describe_create_account_status(CreateAccountRequestId=create_account_response.get('CreateAccountStatus').get('Id'))
print("Create account status "+str(create_account_status_response))
account_status = create_account_status_response.get('CreateAccountStatus').get('State')
if account_status == 'SUCCEEDED':
account_id = create_account_status_response.get('CreateAccountStatus').get('AccountId')
elif account_status == 'FAILED':
print("Account creation failed: " + create_account_status_response.get('CreateAccountStatus').get('FailureReason'))
sys.exit(1)
root_id = client.list_roots().get('Roots')[0].get('Id')
return account_id
def update_policy(account_id, cloudtrail_bucket, datadogcode_bucket, credentials):
s3 = boto3.client('s3', aws_access_key_id=credentials['AccessKeyId'], aws_secret_access_key=credentials['SecretAccessKey'], aws_session_token=credentials['SessionToken'],)
iam = boto3.client('iam', aws_access_key_id=credentials['AccessKeyId'], aws_secret_access_key=credentials['SecretAccessKey'], aws_session_token=credentials['SessionToken'],)
'''
Update CloudTrail bucket policy
'''
cloudtrail_arn = "arn:aws:s3:::" + cloudtrail_bucket +"/AWSLogs/" + account_id + "/*"
cloudtrail_response = s3.get_bucket_policy(Bucket=cloudtrail_bucket)
cloudtrailpolicy = json.loads(cloudtrail_response['Policy'])
for cloudtrail_index in range(len(cloudtrailpolicy['Statement'])):
if cloudtrailpolicy['Statement'][cloudtrail_index]['Sid'] == 'AWSCloudTrailWrite':
folder_list = cloudtrailpolicy['Statement'][cloudtrail_index]['Resource']
folder_list.append(cloudtrail_arn)
cloudtrailpolicy['Statement'][cloudtrail_index]['Resource'] = folder_list
s3.put_bucket_policy(Bucket=cloudtrail_bucket, Policy=json.dumps(cloudtrailpolicy))
'''
Update Datadog Lambda Code bucket policy
'''
newaccount_arn = "arn:aws:iam::" + account_id + ":root"
datadog_response = s3.get_bucket_policy(Bucket=datadogcode_bucket)
datadogcodepolicy = json.loads(datadog_response['Policy'])
datadog_index = 0
for statement in datadogcodepolicy['Statement']:
if statement['Sid'] == 'CodeReadAccess':
account_list = statement['Principal']['AWS']
account_list.append(newaccount_arn)
statement['Principal']['AWS'] = account_list
datadogcodepolicy['Statement'][datadog_index] = statement
datadog_index += 1
s3.put_bucket_policy(Bucket=datadogcode_bucket, Policy=json.dumps(datadogcodepolicy))
'''
Update LoggingLambdaRole role policy
'''
account_arn = "arn:aws:iam::" + account_id + ":role/ST-S-Automation"
assumerole_response = iam.get_role_policy(RoleName='LoggingLambdaRole', PolicyName='AssumeRole')
assumerole_policy = assumerole_response['PolicyDocument']
for assumerole_index in range(len(assumerole_policy['Statement'])):
if assumerole_policy['Statement'][assumerole_index]['Sid'] == 'AWSAssumeRole':
account_list = assumerole_policy['Statement'][assumerole_index]['Resource']
account_list.append(account_arn)
assumerole_policy['Statement'][assumerole_index]['Resource'] = account_list
iam.put_role_policy(RoleName='LoggingLambdaRole', PolicyName='AssumeRole', PolicyDocument=json.dumps(assumerole_policy))
print("Policies successfully updated")
def get_template(template_file):
'''
Read a template file and return the contents
'''
print("Reading resources from " + template_file)
f = open(template_file, "r")
cf_template = f.read()
return cf_template
def deploy_resources(template, stack_name, stack_region, cloudtrail_bucket, datadogcode_bucket, securityaccount_id, datadog_apikey, credentials):
print(datadog_apikey)
'''
Create a CloudFormation stack of resources within the new account
'''
datestamp = time.strftime("%d/%m/%Y")
client = boto3.client('cloudformation',
aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken'],
region_name=stack_region)
print("Creating stack " + stack_name + " in " + stack_region)
creating_stack = True
while creating_stack is True:
try:
creating_stack = False
create_stack_response = client.create_stack(
StackName=stack_name,
TemplateBody=template,
Parameters=[
{
'ParameterKey' : 'cloudtrailbucket',
'ParameterValue' : cloudtrail_bucket
},
{
'ParameterKey' : 'securityaccountid',
'ParameterValue' : securityaccount_id
},
{
'ParameterKey' : 'Datadogbucket',
'ParameterValue' : datadogcode_bucket
},
{
'ParameterKey' : 'DatadogAPIToken',
'ParameterValue' : datadog_apikey
}
],
NotificationARNs=[],
Capabilities=[
'CAPABILITY_NAMED_IAM',
],
OnFailure='ROLLBACK',
Tags=[
{
'Key': 'ManagedResource',
'Value': 'True'
},
{
'Key': 'DeployDate',
'Value': datestamp
}
]
)
except botocore.exceptions.ClientError as e:
creating_stack = True
print(e)
time.sleep(10)
stack_building = True
print("********************************")
print("Stack creation in process...")
print("********************************")
print(create_stack_response)
while stack_building is True:
event_list = client.describe_stack_events(StackName=stack_name).get("StackEvents")
stack_event = event_list[0]
if (stack_event.get('ResourceType') == 'AWS::CloudFormation::Stack' and
stack_event.get('ResourceStatus') == 'CREATE_COMPLETE'):
stack_building = False
print("Stack construction complete.")
elif (stack_event.get('ResourceType') == 'AWS::CloudFormation::Stack' and
stack_event.get('ResourceStatus') == 'ROLLBACK_COMPLETE'):
stack_building = False
print("Stack construction failed.")
sys.exit(1)
else:
print(stack_event)
print("********************************")
print("Stack building . . .")
print("********************************")
time.sleep(10)
stack = client.describe_stacks(StackName=stack_name)
return stack | 8,570 | 0 | 92 |
04ed3e193b41cca7d967ab45758a87d8c8d2d08a | 3,726 | py | Python | examples/main.py | 2020-ICT-insdeout/ICT-insideout | abccd30552f8f67bd7699fa38608d50bf920fff9 | [
"MIT"
] | null | null | null | examples/main.py | 2020-ICT-insdeout/ICT-insideout | abccd30552f8f67bd7699fa38608d50bf920fff9 | [
"MIT"
] | null | null | null | examples/main.py | 2020-ICT-insdeout/ICT-insideout | abccd30552f8f67bd7699fa38608d50bf920fff9 | [
"MIT"
] | 1 | 2022-03-16T05:59:22.000Z | 2022-03-16T05:59:22.000Z | '''
strates how to use `CNN` model from
`speechemotionrecognition` package
'''
from keras.utils import np_utils
import pulsectl
import serial
import time
import os
import sys
import collections
import webrtcvad
import signal
import subprocess
import socket as sk
import numpy as np
from common import extract_data
from dnn_test import CNN, LSTM
from utilities_test import get_feature_vector_from_mfcc, get_stream_feature_vector_from_mfcc
from pywebrtcvad.vadfunc import make_chunk, make_stream_chunk, write_wave, frame_generator, stream_vad_collector
pulse = pulsectl.Pulse('my-client-name')
if __name__ == "__main__":
lstm_example()
| 32.4 | 134 | 0.627751 | '''
strates how to use `CNN` model from
`speechemotionrecognition` package
'''
from keras.utils import np_utils
import pulsectl
import serial
import time
import os
import sys
import collections
import webrtcvad
import signal
import subprocess
import socket as sk
import numpy as np
from common import extract_data
from dnn_test import CNN, LSTM
from utilities_test import get_feature_vector_from_mfcc, get_stream_feature_vector_from_mfcc
from pywebrtcvad.vadfunc import make_chunk, make_stream_chunk, write_wave, frame_generator, stream_vad_collector
pulse = pulsectl.Pulse('my-client-name')
def give_me_the_device_num():
source_list = pulse.source_list()
for row in source_list:
if str(row).find('bluez') != -1:
chunk = str(row).split(' ')
for cc in chunk:
idx = cc.find('index=')
if idx != -1:
return cc[6:-1]
return -1
def recording_blue(rate, device_num):
cmd = "parec -r --rate=" + str(rate) + " --device=" + str(device_num) + " --channels=1"
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
shell=True, preexec_fn=os.setsid)
return process
def lstm_example():
# initializing dont touch
timeout = False
trigger_num = 0
ser_to_ino = serial.Serial('/dev/ttyUSB0', 9600)
file_idx = 0
rate = 16000
duration_sec = 4
duration_byte = rate * 2 * duration_sec
to_flatten = False
in_shape = np.zeros((198,39))
model = LSTM(input_shape=in_shape.shape, num_classes=7)
load_path = 'korean_LSTM_best_model.h5'
model.load_model(load_path)
model.trained = True
print('start')
device_num = give_me_the_device_num()
print("device_num: ", device_num)
process = recording_blue(rate, device_num)
pcm_list = []
aggressive = 3
triggered = False
padding_duration_ms = 300
frame_duration_ms = 30
n = int(rate * (frame_duration_ms / 1000.0) * 2)
num_padding_frames = int(padding_duration_ms / frame_duration_ms)
ring_buffer = collections.deque(maxlen=num_padding_frames)
vad = webrtcvad.Vad(aggressive)
voiced_frames = []
sibal_idx = 0
while(1):
pcm_data_line = process.stdout.readline()
# pcm_data += pcm_data_line
pcm_list.append(pcm_data_line)
# target_num = len(pcm_data) // n # number of audio data for 30 milli seconds
target_num = len(pcm_list)
if target_num <= 300:
continue
pcm_data = b''.join(pcm_list)
sibal_idx += 1
target_num = len(pcm_data) // n
pcm_data_to_chunk = pcm_data[:n * target_num]
pcm_list = [pcm_data[n * target_num:]]
# pcm_data = pcm_data[n * target_num:]
frames = list(frame_generator(frame_duration_ms, pcm_data_to_chunk, rate))
for frame in frames:
triggered, voiced_audio, timeout = stream_vad_collector(rate, vad, frame, triggered, ring_buffer, voiced_frames, timeout)
if triggered and not timeout:
trigger_num += 1
if 150 <= trigger_num: # 150 means 4.5 seconds
timeout = True
if voiced_audio is not None: # talking -> no talking then this if works.
trigger_num = 0
voiced_frames = []
emotion = model.predict_one(get_stream_feature_vector_from_mfcc(voiced_audio, fs=rate, flatten=to_flatten))
print(emotion)
ser_to_ino.write(str(emotion).encode('utf-8'))
file_idx += 1
if __name__ == "__main__":
lstm_example()
| 2,980 | 0 | 75 |
7ca9789ce165ed243be6718a78e0afd7a24e5147 | 2,868 | py | Python | src/python/pants/backend/native/register.py | billybecker/pants | ee101f3e360b712aceb9dacf7723aaf9b5567f04 | [
"Apache-2.0"
] | 94 | 2015-01-15T21:24:20.000Z | 2022-02-16T16:55:43.000Z | src/python/pants/backend/native/register.py | billybecker/pants | ee101f3e360b712aceb9dacf7723aaf9b5567f04 | [
"Apache-2.0"
] | 5 | 2020-07-18T01:04:43.000Z | 2021-05-10T08:40:56.000Z | src/python/pants/backend/native/register.py | billybecker/pants | ee101f3e360b712aceb9dacf7723aaf9b5567f04 | [
"Apache-2.0"
] | 47 | 2015-02-25T02:20:07.000Z | 2022-03-21T00:59:16.000Z | # coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from pants.backend.native.config.environment import create_native_environment_rules
from pants.backend.native.subsystems.binaries.binutils import create_binutils_rules
from pants.backend.native.subsystems.binaries.gcc import create_gcc_rules
from pants.backend.native.subsystems.binaries.llvm import create_llvm_rules
from pants.backend.native.subsystems.native_build_settings import NativeBuildSettings
from pants.backend.native.subsystems.native_toolchain import create_native_toolchain_rules
from pants.backend.native.subsystems.xcode_cli_tools import create_xcode_cli_tools_rules
from pants.backend.native.targets.external_native_library import (ConanRequirement,
ExternalNativeLibrary)
from pants.backend.native.targets.native_artifact import NativeArtifact
from pants.backend.native.targets.native_library import CLibrary, CppLibrary
from pants.backend.native.targets.packaged_native_library import PackagedNativeLibrary
from pants.backend.native.tasks.c_compile import CCompile
from pants.backend.native.tasks.conan_fetch import ConanFetch
from pants.backend.native.tasks.conan_prep import ConanPrep
from pants.backend.native.tasks.cpp_compile import CppCompile
from pants.backend.native.tasks.link_shared_libraries import LinkSharedLibraries
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.goal.task_registrar import TaskRegistrar as task
| 43.454545 | 97 | 0.791492 | # coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from pants.backend.native.config.environment import create_native_environment_rules
from pants.backend.native.subsystems.binaries.binutils import create_binutils_rules
from pants.backend.native.subsystems.binaries.gcc import create_gcc_rules
from pants.backend.native.subsystems.binaries.llvm import create_llvm_rules
from pants.backend.native.subsystems.native_build_settings import NativeBuildSettings
from pants.backend.native.subsystems.native_toolchain import create_native_toolchain_rules
from pants.backend.native.subsystems.xcode_cli_tools import create_xcode_cli_tools_rules
from pants.backend.native.targets.external_native_library import (ConanRequirement,
ExternalNativeLibrary)
from pants.backend.native.targets.native_artifact import NativeArtifact
from pants.backend.native.targets.native_library import CLibrary, CppLibrary
from pants.backend.native.targets.packaged_native_library import PackagedNativeLibrary
from pants.backend.native.tasks.c_compile import CCompile
from pants.backend.native.tasks.conan_fetch import ConanFetch
from pants.backend.native.tasks.conan_prep import ConanPrep
from pants.backend.native.tasks.cpp_compile import CppCompile
from pants.backend.native.tasks.link_shared_libraries import LinkSharedLibraries
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.goal.task_registrar import TaskRegistrar as task
def build_file_aliases():
return BuildFileAliases(
targets={
CLibrary.alias(): CLibrary,
CppLibrary.alias(): CppLibrary,
ExternalNativeLibrary.alias(): ExternalNativeLibrary,
PackagedNativeLibrary.alias(): PackagedNativeLibrary,
},
objects={
ConanRequirement.alias(): ConanRequirement,
NativeArtifact.alias(): NativeArtifact,
}
)
def global_subsystems():
return {NativeBuildSettings}
def register_goals():
# TODO(#5962): register these under the 'compile' goal when we eliminate the product transitive
# dependency from export -> compile.
task(name='conan-prep', action=ConanPrep).install('native-compile')
task(name='conan-fetch', action=ConanFetch).install('native-compile')
task(name='c-for-ctypes', action=CCompile).install('native-compile')
task(name='cpp-for-ctypes', action=CppCompile).install('native-compile')
task(name='shared-libraries', action=LinkSharedLibraries).install('link')
def rules():
return (
create_native_environment_rules() +
create_native_toolchain_rules() +
create_xcode_cli_tools_rules() +
create_binutils_rules() +
create_gcc_rules() +
create_llvm_rules()
)
| 1,099 | 0 | 92 |
e9cf7acd97a40ffdbac1b4a42da7f8b511a0e314 | 1,167 | py | Python | problem_118.py | renxiaoyi/project_euler | 2566482209cd225fce39887fe869f839cf88330e | [
"Unlicense"
] | null | null | null | problem_118.py | renxiaoyi/project_euler | 2566482209cd225fce39887fe869f839cf88330e | [
"Unlicense"
] | null | null | null | problem_118.py | renxiaoyi/project_euler | 2566482209cd225fce39887fe869f839cf88330e | [
"Unlicense"
] | null | null | null | import itertools
import petools
def Part(full, segs):
"""full: sorted digit string; segs: list of sorted digit strings."""
if full in segs:
yield [full]
for i in range(len(segs)):
seg = segs[i]
if Contains(full, seg):
for p in Part(Diff(full, seg), segs[i+1:]):
yield [seg] + p
d = {}
digits = range(1, 10)
for l in range(1, len(digits)+1):
for t in itertools.combinations(digits, l):
key = str(Combine(sorted(t)))
val = NumOfPrimesCat(t)
if val > 0:
d[key] = val
ans = 0
for p in Part(str(Combine(digits)), d.keys()):
ans += petools.Product([d[i] for i in p])
print ans
| 18.822581 | 70 | 0.579263 | import itertools
import petools
def Combine(t):
ret = 0
for i in t:
ret *= 10
ret += i
return ret
def NumOfPrimesCat(s):
if len(s) == 0:
return 0
if len(s) == 1:
return int(s[0] in [2, 3, 5, 7])
if sum(s) % 3 == 0:
return 0
if all([x % 2 == 0 for x in s]):
return 0
ret = 0
for t in itertools.permutations(s):
if petools.IsPrime(Combine(t)):
ret += 1
return ret
def Contains(full, seg):
return all([x in full for x in seg])
def Diff(full, seg):
d = [x for x in full if x not in seg]
return ''.join(d)
def Part(full, segs):
"""full: sorted digit string; segs: list of sorted digit strings."""
if full in segs:
yield [full]
for i in range(len(segs)):
seg = segs[i]
if Contains(full, seg):
for p in Part(Diff(full, seg), segs[i+1:]):
yield [seg] + p
d = {}
digits = range(1, 10)
for l in range(1, len(digits)+1):
for t in itertools.combinations(digits, l):
key = str(Combine(sorted(t)))
val = NumOfPrimesCat(t)
if val > 0:
d[key] = val
ans = 0
for p in Part(str(Combine(digits)), d.keys()):
ans += petools.Product([d[i] for i in p])
print ans
| 441 | 0 | 92 |
f279e6211e391d537fb9e731f2aeec719f8d25f0 | 696 | py | Python | vspax/com_worker.py | thingsroot/power_vsp | fae50959b859a4562ac90167531a7a617025643a | [
"MIT"
] | null | null | null | vspax/com_worker.py | thingsroot/power_vsp | fae50959b859a4562ac90167531a7a617025643a | [
"MIT"
] | null | null | null | vspax/com_worker.py | thingsroot/power_vsp | fae50959b859a4562ac90167531a7a617025643a | [
"MIT"
] | null | null | null | import logging
import threading
import pythoncom
import win32com.client
import time
from vspax import *
| 22.451613 | 47 | 0.617816 | import logging
import threading
import pythoncom
import win32com.client
import time
from vspax import *
class COM_Worker(threading.Thread):
def __init__(self, handler):
threading.Thread.__init__(self)
self._handler = handler
self._thread_stop = False
def run(self):
try:
self._handler.init_vsport()
while not self._thread_stop:
pythoncom.PumpWaitingMessages()
time.sleep(0.001)
# pythoncom.PumpMessages()
except Exception as ex:
logging.exception(ex)
self._handler.close_vsport()
def stop(self):
self._thread_stop = True
self.join(2)
| 474 | 14 | 103 |
2e04ea60aafff8e6b9dde9ec44246cf083f3dd7e | 8,957 | py | Python | collectors/networktraffic.py | vijayanant/kunai | 0dfe169731eaceb1bba66e12715b3968d2a3de20 | [
"MIT"
] | 1 | 2020-04-12T21:05:46.000Z | 2020-04-12T21:05:46.000Z | collectors/networktraffic.py | vijayanant/kunai | 0dfe169731eaceb1bba66e12715b3968d2a3de20 | [
"MIT"
] | null | null | null | collectors/networktraffic.py | vijayanant/kunai | 0dfe169731eaceb1bba66e12715b3968d2a3de20 | [
"MIT"
] | null | null | null | import httplib # Used only for handling httplib.HTTPException (case #26701)
import os
import sys
import platform
import re
import urllib
import urllib2
import traceback
import time
from StringIO import StringIO
from kunai.log import logger
from kunai.collector import Collector
| 40.899543 | 142 | 0.515351 | import httplib # Used only for handling httplib.HTTPException (case #26701)
import os
import sys
import platform
import re
import urllib
import urllib2
import traceback
import time
from StringIO import StringIO
from kunai.log import logger
from kunai.collector import Collector
class NetworkTraffic(Collector):
def launch(self):
logger.debug('getNetworkTraffic: start')
if sys.platform == 'linux2':
logger.debug('getNetworkTraffic: linux2')
try:
logger.debug('getNetworkTraffic: attempting open')
proc = open('/proc/net/dev', 'r')
lines = proc.readlines()
proc.close()
except IOError, e:
logger.error('getNetworkTraffic: exception = %s', e)
return False
logger.debug('getNetworkTraffic: open success, parsing')
columnLine = lines[1]
_, receiveCols , transmitCols = columnLine.split('|')
receiveCols = map(lambda a:'recv_' + a, receiveCols.split())
transmitCols = map(lambda a:'trans_' + a, transmitCols.split())
cols = receiveCols + transmitCols
logger.debug('getNetworkTraffic: parsing, looping')
faces = {}
for line in lines[2:]:
if line.find(':') < 0: continue
face, data = line.split(':')
faceData = dict(zip(cols, data.split()))
faces[face] = faceData
logger.debug('getNetworkTraffic: parsed, looping')
interfaces = {}
# Now loop through each interface
for face in faces:
key = face.strip()
# We need to work out the traffic since the last check so first time we store the current value
# then the next time we can calculate the difference
try:
if key in self.networkTrafficStore:
interfaces[key] = {}
interfaces[key]['recv_bytes'] = long(faces[face]['recv_bytes']) - long(self.networkTrafficStore[key]['recv_bytes'])
interfaces[key]['trans_bytes'] = long(faces[face]['trans_bytes']) - long(self.networkTrafficStore[key]['trans_bytes'])
if interfaces[key]['recv_bytes'] < 0:
interfaces[key]['recv_bytes'] = long(faces[face]['recv_bytes'])
if interfaces[key]['trans_bytes'] < 0:
interfaces[key]['trans_bytes'] = long(faces[face]['trans_bytes'])
# Get the traffic
interfaces[key]['recv_bytes/s'] = interfaces[key]['recv_bytes'] / 10
interfaces[key]['trans_bytes/s'] = interfaces[key]['trans_bytes'] / 10
interfaces[key]['recv_bytes'] = str(interfaces[key]['recv_bytes'])
interfaces[key]['trans_bytes'] = str(interfaces[key]['trans_bytes'])
# And update the stored value to subtract next time round
self.networkTrafficStore[key]['recv_bytes'] = faces[face]['recv_bytes']
self.networkTrafficStore[key]['trans_bytes'] = faces[face]['trans_bytes']
else:
self.networkTrafficStore[key] = {}
self.networkTrafficStore[key]['recv_bytes'] = faces[face]['recv_bytes']
self.networkTrafficStore[key]['trans_bytes'] = faces[face]['trans_bytes']
# Logging
logger.debug('getNetworkTraffic: %s = %s', key, self.networkTrafficStore[key]['recv_bytes'])
logger.debug('getNetworkTraffic: %s = %s', key, self.networkTrafficStore[key]['trans_bytes'])
except KeyError, ex:
logger.error('getNetworkTraffic: no data for %s', key)
except ValueError, ex:
logger.error('getNetworkTraffic: invalid data for %s', key)
logger.debug('getNetworkTraffic: completed, returning')
return interfaces
elif sys.platform.find('freebsd') != -1:
logger.debug('getNetworkTraffic: freebsd')
try:
try:
logger.debug('getNetworkTraffic: attempting Popen (netstat)')
proc = subprocess.Popen(['netstat', '-nbid'], stdout=subprocess.PIPE, close_fds=True)
netstat = proc.communicate()[0]
if int(pythonVersion[1]) >= 6:
try:
proc.kill()
except Exception, e:
logger.debug('Process already terminated')
except Exception, e:
logger.error('getNetworkTraffic: exception = %s', traceback.format_exc())
return False
finally:
if int(pythonVersion[1]) >= 6:
try:
proc.kill()
except Exception, e:
logger.debug('Process already terminated')
logger.debug('getNetworkTraffic: open success, parsing')
lines = netstat.split('\n')
# Loop over available data for each inteface
faces = {}
rxKey = None
txKey = None
for line in lines:
logger.debug('getNetworkTraffic: %s', line)
line = re.split(r'\s+', line)
# Figure out which index we need
if rxKey == None and txKey == None:
for k, part in enumerate(line):
logger.debug('getNetworkTraffic: looping parts (%s)', part)
if part == 'Ibytes':
rxKey = k
logger.debug('getNetworkTraffic: found rxKey = %s', k)
elif part == 'Obytes':
txKey = k
logger.debug('getNetworkTraffic: found txKey = %s', k)
else:
if line[0] not in faces:
try:
logger.debug('getNetworkTraffic: parsing (rx: %s = %s / tx: %s = %s)', rxKey, line[rxKey], txKey, line[txKey])
faceData = {'recv_bytes': line[rxKey], 'trans_bytes': line[txKey]}
face = line[0]
faces[face] = faceData
except IndexError, e:
continue
logger.debug('getNetworkTraffic: parsed, looping')
interfaces = {}
# Now loop through each interface
for face in faces:
key = face.strip()
try:
# We need to work out the traffic since the last check so first time we store the current value
# then the next time we can calculate the difference
if key in self.networkTrafficStore:
interfaces[key] = {}
interfaces[key]['recv_bytes'] = long(faces[face]['recv_bytes']) - long(self.networkTrafficStore[key]['recv_bytes'])
interfaces[key]['trans_bytes'] = long(faces[face]['trans_bytes']) - long(self.networkTrafficStore[key]['trans_bytes'])
interfaces[key]['recv_bytes'] = str(interfaces[key]['recv_bytes'])
interfaces[key]['trans_bytes'] = str(interfaces[key]['trans_bytes'])
if interfaces[key]['recv_bytes'] < 0:
interfaces[key]['recv_bytes'] = long(faces[face]['recv_bytes'])
if interfaces[key]['trans_bytes'] < 0:
interfaces[key]['trans_bytes'] = long(faces[face]['trans_bytes'])
# And update the stored value to subtract next time round
self.networkTrafficStore[key]['recv_bytes'] = faces[face]['recv_bytes']
self.networkTrafficStore[key]['trans_bytes'] = faces[face]['trans_bytes']
else:
self.networkTrafficStore[key] = {}
self.networkTrafficStore[key]['recv_bytes'] = faces[face]['recv_bytes']
self.networkTrafficStore[key]['trans_bytes'] = faces[face]['trans_bytes']
except KeyError, ex:
logger.error('getNetworkTraffic: no data for %s', key)
except ValueError, ex:
logger.error('getNetworkTraffic: invalid data for %s', key)
logger.debug('getNetworkTraffic: completed, returning')
return interfaces
else:
logger.debug('getNetworkTraffic: other platform, returning')
return False
| 8,616 | 11 | 49 |
cb14bed8fd4d80b5e2ad2efaf1d46a5dee88e7c8 | 16,824 | py | Python | rmgpy/kinetics/kineticsdataTest.py | mbprend/RMG-Py | 29e111d683f2daa0b376417be60e76b32ce8a993 | [
"MIT"
] | null | null | null | rmgpy/kinetics/kineticsdataTest.py | mbprend/RMG-Py | 29e111d683f2daa0b376417be60e76b32ce8a993 | [
"MIT"
] | null | null | null | rmgpy/kinetics/kineticsdataTest.py | mbprend/RMG-Py | 29e111d683f2daa0b376417be60e76b32ce8a993 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), #
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
This script contains unit tests of the :mod:`rmgpy.kinetics.arrhenius` module.
"""
import unittest
import numpy as np
import rmgpy.constants as constants
from rmgpy.kinetics.kineticsdata import KineticsData, PDepKineticsData
################################################################################
class TestKineticsData(unittest.TestCase):
"""
Contains unit tests of the :class:`KineticsData` class.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
self.Tdata = np.array([300, 400, 500, 600, 700, 800, 900, 1000, 1500, 2000], np.float64)
self.kdata = np.array(
[4.73e-19, 3.93e-17, 6.51e-16, 4.60e-15, 2.03e-14, 6.28e-14, 1.58e-13, 3.31e-13, 3.72e-12, 1.49e-11],
np.float64)
self.Tmin = 300.
self.Tmax = 3000.
self.comment = 'H + CH4 <=> H2 + CH3 (RPMD)'
self.kinetics = KineticsData(
Tdata=(self.Tdata, "K"),
kdata=(self.kdata, "cm^3/(molecule*s)"),
Tmin=(self.Tmin, "K"),
Tmax=(self.Tmax, "K"),
comment=self.comment,
)
def test_temperature_data(self):
"""
Test that the KineticsData Tdata property was properly set.
"""
self.assertEqual(self.kinetics.Tdata.value_si.shape, self.Tdata.shape)
for T, T0 in zip(self.kinetics.Tdata.value_si, self.Tdata):
self.assertAlmostEqual(T, T0, 4)
def test_kdata(self):
"""
Test that the KineticsData kdata property was properly set.
"""
self.assertEqual(self.kinetics.kdata.value_si.shape, self.kdata.shape)
for k, k0 in zip(self.kinetics.kdata.value_si, self.kdata):
k0 *= constants.Na * 1e-6
self.assertAlmostEqual(k, k0, delta=1e-6 * k0)
def test_temperature_min(self):
"""
Test that the KineticsData Tmin property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Tmin.value_si, self.Tmin, 6)
def test_temperature_max(self):
"""
Test that the KineticsData Tmax property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Tmax.value_si, self.Tmax, 6)
def test_comment(self):
"""
Test that the KineticsData comment property was properly set.
"""
self.assertEqual(self.kinetics.comment, self.comment)
def test_is_temperature_valid(self):
"""
Test the KineticsData.is_temperature_valid() method.
"""
Tdata = np.array([200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000])
validdata = np.array([False, True, True, True, True, True, True, True, True, True], np.bool)
for T, valid in zip(Tdata, validdata):
valid0 = self.kinetics.is_temperature_valid(T)
self.assertEqual(valid0, valid)
def test_get_rate_coefficient(self):
"""
Test the KineticsData.get_rate_coefficient() method.
"""
Tlist = np.array([300, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000])
kexplist = np.array(
[2.84847e-01, 2.36670e+01, 2.77019e+03, 3.78191e+04, 1.99333e+05, 5.24644e+05, 1.38086e+06, 2.95680e+06,
5.15086e+06, 8.97299e+06])
for T, kexp in zip(Tlist, kexplist):
kact = self.kinetics.get_rate_coefficient(T)
self.assertAlmostEqual(kexp, kact, delta=1e-4 * kexp)
def test_pickle(self):
"""
Test that a KineticsData object can be pickled and unpickled with no
loss of information.
"""
import pickle
kinetics = pickle.loads(pickle.dumps(self.kinetics, -1))
self.assertEqual(self.kinetics.Tdata.value.shape, kinetics.Tdata.value.shape)
for T, T0 in zip(self.kinetics.Tdata.value, kinetics.Tdata.value):
self.assertAlmostEqual(T, T0, 4)
self.assertEqual(self.kinetics.Tdata.units, kinetics.Tdata.units)
self.assertEqual(self.kinetics.kdata.value.shape, kinetics.kdata.value.shape)
for k, k0 in zip(self.kinetics.kdata.value, kinetics.kdata.value):
self.assertAlmostEqual(k, k0, 4)
self.assertEqual(self.kinetics.kdata.units, kinetics.kdata.units)
self.assertAlmostEqual(self.kinetics.Tmin.value, kinetics.Tmin.value, 4)
self.assertEqual(self.kinetics.Tmin.units, kinetics.Tmin.units)
self.assertAlmostEqual(self.kinetics.Tmax.value, kinetics.Tmax.value, 4)
self.assertEqual(self.kinetics.Tmax.units, kinetics.Tmax.units)
self.assertEqual(self.kinetics.comment, kinetics.comment)
def test_repr(self):
"""
Test that a KineticsData object can be reconstructed from its repr()
output with no loss of information.
"""
namespace = {}
exec('kinetics = {0!r}'.format(self.kinetics), globals(), namespace)
self.assertIn('kinetics', namespace)
kinetics = namespace['kinetics']
self.assertEqual(self.kinetics.Tdata.value.shape, kinetics.Tdata.value.shape)
for T, T0 in zip(self.kinetics.Tdata.value, kinetics.Tdata.value):
self.assertAlmostEqual(T, T0, 4)
self.assertEqual(self.kinetics.Tdata.units, kinetics.Tdata.units)
self.assertEqual(self.kinetics.kdata.value.shape, kinetics.kdata.value.shape)
for k, k0 in zip(self.kinetics.kdata.value, kinetics.kdata.value):
self.assertAlmostEqual(k, k0, 4)
self.assertEqual(self.kinetics.kdata.units, kinetics.kdata.units)
self.assertAlmostEqual(self.kinetics.Tmin.value, kinetics.Tmin.value, 4)
self.assertEqual(self.kinetics.Tmin.units, kinetics.Tmin.units)
self.assertAlmostEqual(self.kinetics.Tmax.value, kinetics.Tmax.value, 4)
self.assertEqual(self.kinetics.Tmax.units, kinetics.Tmax.units)
self.assertEqual(self.kinetics.comment, kinetics.comment)
################################################################################
class TestPDepKineticsData(unittest.TestCase):
"""
Contains unit tests of the :class:`PDepKineticsData` class.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
self.Tdata = np.array([300, 400, 500, 600, 700, 800, 900, 1000, 1500, 2000], np.float64)
self.Pdata = np.array([1e-1, 1e0, 1e1], np.float64)
self.kdata = np.array([
[4.73e-21, 3.93e-19, 6.51e-18, 4.60e-17, 2.03e-16, 6.28e-16, 1.58e-15, 3.31e-15, 3.72e-14, 1.49e-13],
[4.73e-20, 3.93e-18, 6.51e-17, 4.60e-16, 2.03e-15, 6.28e-15, 1.58e-14, 3.31e-14, 3.72e-13, 1.49e-12],
[4.73e-19, 3.93e-17, 6.51e-16, 4.60e-15, 2.03e-14, 6.28e-14, 1.58e-13, 3.31e-13, 3.72e-12, 1.49e-11],
], np.float64).T
self.Tmin = 300.
self.Tmax = 3000.
self.Pmin = 1e-1
self.Pmax = 1e1
self.comment = ''
self.kinetics = PDepKineticsData(
Tdata=(self.Tdata, "K"),
Pdata=(self.Pdata, "bar"),
kdata=(self.kdata, "cm^3/(molecule*s)"),
Tmin=(self.Tmin, "K"),
Tmax=(self.Tmax, "K"),
Pmin=(self.Pmin, "bar"),
Pmax=(self.Pmax, "bar"),
comment=self.comment,
)
def test_temperature_data(self):
"""
Test that the PDepKineticsData Tdata property was properly set.
"""
self.assertEqual(self.kinetics.Tdata.value_si.shape, self.Tdata.shape)
for T, T0 in zip(self.kinetics.Tdata.value_si, self.Tdata):
self.assertAlmostEqual(T, T0, 4)
def test_pressure_data(self):
"""
Test that the PDepKineticsData Pdata property was properly set.
"""
self.assertEqual(self.kinetics.Pdata.value_si.shape, self.Pdata.shape)
for P, P0 in zip(self.kinetics.Pdata.value_si, self.Pdata):
self.assertAlmostEqual(P * 1e-5, P0, 4)
def test_kdata(self):
"""
Test that the PDepKineticsData kdata property was properly set.
"""
self.assertEqual(self.kinetics.kdata.value_si.shape, self.kdata.shape)
for i in range(self.kdata.shape[0]):
for j in range(self.kdata.shape[1]):
k0 = self.kdata[i, j] * constants.Na * 1e-6
k = self.kinetics.kdata.value_si[i, j]
self.assertAlmostEqual(k, k0, delta=1e-6 * k0)
def test_temperature_min(self):
"""
Test that the PDepKineticsData Tmin property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Tmin.value_si, self.Tmin, 6)
def test_temperature_max(self):
"""
Test that the PDepKineticsData Tmax property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Tmax.value_si, self.Tmax, 6)
def test_pressure_min(self):
"""
Test that the PDepKineticsData Pmin property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Pmin.value_si * 1e-5, self.Pmin, 6)
def test_pressure_max(self):
"""
Test that the PDepKineticsData Pmax property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Pmax.value_si * 1e-5, self.Pmax, 6)
def test_comment(self):
"""
Test that the PDepKineticsData comment property was properly set.
"""
self.assertEqual(self.kinetics.comment, self.comment)
def test_is_temperature_valid(self):
"""
Test the PDepKineticsData.is_temperature_valid() method.
"""
Tdata = np.array([200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000])
validdata = np.array([False, True, True, True, True, True, True, True, True, True], np.bool)
for T, valid in zip(Tdata, validdata):
valid0 = self.kinetics.is_temperature_valid(T)
self.assertEqual(valid0, valid)
def test_is_pressure_valid(self):
"""
Test the PDepKineticsData.is_pressure_valid() method.
"""
Pdata = np.array([1e3, 1e4, 1e5, 1e6, 1e7])
validdata = np.array([False, True, True, True, False], np.bool)
for P, valid in zip(Pdata, validdata):
valid0 = self.kinetics.is_pressure_valid(P)
self.assertEqual(valid0, valid)
def test_get_rate_coefficient(self):
"""
Test the PDepKineticsData.get_rate_coefficient() method.
"""
Tlist = np.array([300, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000])
Plist = np.array([1e4, 1e5, 1e6])
kexplist = np.array([
[2.84847e-03, 2.36670e-01, 2.77019e+01, 3.78191e+02, 1.99333e+03, 5.24644e+03, 1.38086e+04, 2.95680e+04,
5.15086e+04, 8.97299e+04],
[2.84847e-02, 2.36670e+00, 2.77019e+02, 3.78191e+03, 1.99333e+04, 5.24644e+04, 1.38086e+05, 2.95680e+05,
5.15086e+05, 8.97299e+05],
[2.84847e-01, 2.36670e+01, 2.77019e+03, 3.78191e+04, 1.99333e+05, 5.24644e+05, 1.38086e+06, 2.95680e+06,
5.15086e+06, 8.97299e+06],
]).T
for i in range(Tlist.shape[0]):
for j in range(Plist.shape[0]):
kexp = kexplist[i, j]
kact = self.kinetics.get_rate_coefficient(Tlist[i], Plist[j])
self.assertAlmostEqual(kexp, kact, delta=1e-4 * kexp)
def test_pickle(self):
"""
Test that a PDepKineticsData object can be pickled and unpickled with no
loss of information.
"""
import pickle
kinetics = pickle.loads(pickle.dumps(self.kinetics, -1))
self.assertEqual(self.kinetics.Tdata.value.shape, kinetics.Tdata.value.shape)
for T, T0 in zip(self.kinetics.Tdata.value, kinetics.Tdata.value):
self.assertAlmostEqual(T, T0, 4)
self.assertEqual(self.kinetics.Tdata.units, kinetics.Tdata.units)
self.assertEqual(self.kinetics.Pdata.value.shape, kinetics.Pdata.value.shape)
for P, P0 in zip(self.kinetics.Pdata.value, kinetics.Pdata.value):
self.assertAlmostEqual(P, P0, 4)
self.assertEqual(self.kinetics.Pdata.units, kinetics.Pdata.units)
self.assertEqual(self.kinetics.kdata.value.shape, kinetics.kdata.value.shape)
for i in range(self.kinetics.kdata.value.shape[0]):
for j in range(self.kinetics.kdata.value.shape[1]):
k0 = self.kinetics.kdata.value[i, j]
k = kinetics.kdata.value[i, j]
self.assertAlmostEqual(k, k0, delta=1e-6 * k0)
self.assertEqual(self.kinetics.kdata.units, kinetics.kdata.units)
self.assertAlmostEqual(self.kinetics.Tmin.value, kinetics.Tmin.value, 4)
self.assertEqual(self.kinetics.Tmin.units, kinetics.Tmin.units)
self.assertAlmostEqual(self.kinetics.Tmax.value, kinetics.Tmax.value, 4)
self.assertEqual(self.kinetics.Tmax.units, kinetics.Tmax.units)
self.assertEqual(self.kinetics.comment, kinetics.comment)
def test_repr(self):
"""
Test that a PDepKineticsData object can be reconstructed from its repr()
output with no loss of information.
"""
namespace = {}
exec('kinetics = {0!r}'.format(self.kinetics), globals(), namespace)
self.assertIn('kinetics', namespace)
kinetics = namespace['kinetics']
self.assertEqual(self.kinetics.Tdata.value.shape, kinetics.Tdata.value.shape)
for T, T0 in zip(self.kinetics.Tdata.value, kinetics.Tdata.value):
self.assertAlmostEqual(T, T0, 4)
self.assertEqual(self.kinetics.Tdata.units, kinetics.Tdata.units)
self.assertEqual(self.kinetics.Pdata.value.shape, kinetics.Pdata.value.shape)
for P, P0 in zip(self.kinetics.Pdata.value, kinetics.Pdata.value):
self.assertAlmostEqual(P, P0, 4)
self.assertEqual(self.kinetics.Pdata.units, kinetics.Pdata.units)
self.assertEqual(self.kinetics.kdata.value.shape, kinetics.kdata.value.shape)
for i in range(self.kinetics.kdata.value.shape[0]):
for j in range(self.kinetics.kdata.value.shape[1]):
k0 = self.kinetics.kdata.value[i, j]
k = kinetics.kdata.value[i, j]
self.assertAlmostEqual(k, k0, delta=1e-6 * k0)
self.assertEqual(self.kinetics.kdata.units, kinetics.kdata.units)
self.assertAlmostEqual(self.kinetics.Tmin.value, kinetics.Tmin.value, 4)
self.assertEqual(self.kinetics.Tmin.units, kinetics.Tmin.units)
self.assertAlmostEqual(self.kinetics.Tmax.value, kinetics.Tmax.value, 4)
self.assertEqual(self.kinetics.Tmax.units, kinetics.Tmax.units)
self.assertEqual(self.kinetics.comment, kinetics.comment)
| 46.86351 | 116 | 0.599144 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), #
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
This script contains unit tests of the :mod:`rmgpy.kinetics.arrhenius` module.
"""
import unittest
import numpy as np
import rmgpy.constants as constants
from rmgpy.kinetics.kineticsdata import KineticsData, PDepKineticsData
################################################################################
class TestKineticsData(unittest.TestCase):
"""
Contains unit tests of the :class:`KineticsData` class.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
self.Tdata = np.array([300, 400, 500, 600, 700, 800, 900, 1000, 1500, 2000], np.float64)
self.kdata = np.array(
[4.73e-19, 3.93e-17, 6.51e-16, 4.60e-15, 2.03e-14, 6.28e-14, 1.58e-13, 3.31e-13, 3.72e-12, 1.49e-11],
np.float64)
self.Tmin = 300.
self.Tmax = 3000.
self.comment = 'H + CH4 <=> H2 + CH3 (RPMD)'
self.kinetics = KineticsData(
Tdata=(self.Tdata, "K"),
kdata=(self.kdata, "cm^3/(molecule*s)"),
Tmin=(self.Tmin, "K"),
Tmax=(self.Tmax, "K"),
comment=self.comment,
)
def test_temperature_data(self):
"""
Test that the KineticsData Tdata property was properly set.
"""
self.assertEqual(self.kinetics.Tdata.value_si.shape, self.Tdata.shape)
for T, T0 in zip(self.kinetics.Tdata.value_si, self.Tdata):
self.assertAlmostEqual(T, T0, 4)
def test_kdata(self):
"""
Test that the KineticsData kdata property was properly set.
"""
self.assertEqual(self.kinetics.kdata.value_si.shape, self.kdata.shape)
for k, k0 in zip(self.kinetics.kdata.value_si, self.kdata):
k0 *= constants.Na * 1e-6
self.assertAlmostEqual(k, k0, delta=1e-6 * k0)
def test_temperature_min(self):
"""
Test that the KineticsData Tmin property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Tmin.value_si, self.Tmin, 6)
def test_temperature_max(self):
"""
Test that the KineticsData Tmax property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Tmax.value_si, self.Tmax, 6)
def test_comment(self):
"""
Test that the KineticsData comment property was properly set.
"""
self.assertEqual(self.kinetics.comment, self.comment)
def test_is_temperature_valid(self):
"""
Test the KineticsData.is_temperature_valid() method.
"""
Tdata = np.array([200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000])
validdata = np.array([False, True, True, True, True, True, True, True, True, True], np.bool)
for T, valid in zip(Tdata, validdata):
valid0 = self.kinetics.is_temperature_valid(T)
self.assertEqual(valid0, valid)
def test_get_rate_coefficient(self):
"""
Test the KineticsData.get_rate_coefficient() method.
"""
Tlist = np.array([300, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000])
kexplist = np.array(
[2.84847e-01, 2.36670e+01, 2.77019e+03, 3.78191e+04, 1.99333e+05, 5.24644e+05, 1.38086e+06, 2.95680e+06,
5.15086e+06, 8.97299e+06])
for T, kexp in zip(Tlist, kexplist):
kact = self.kinetics.get_rate_coefficient(T)
self.assertAlmostEqual(kexp, kact, delta=1e-4 * kexp)
def test_pickle(self):
"""
Test that a KineticsData object can be pickled and unpickled with no
loss of information.
"""
import pickle
kinetics = pickle.loads(pickle.dumps(self.kinetics, -1))
self.assertEqual(self.kinetics.Tdata.value.shape, kinetics.Tdata.value.shape)
for T, T0 in zip(self.kinetics.Tdata.value, kinetics.Tdata.value):
self.assertAlmostEqual(T, T0, 4)
self.assertEqual(self.kinetics.Tdata.units, kinetics.Tdata.units)
self.assertEqual(self.kinetics.kdata.value.shape, kinetics.kdata.value.shape)
for k, k0 in zip(self.kinetics.kdata.value, kinetics.kdata.value):
self.assertAlmostEqual(k, k0, 4)
self.assertEqual(self.kinetics.kdata.units, kinetics.kdata.units)
self.assertAlmostEqual(self.kinetics.Tmin.value, kinetics.Tmin.value, 4)
self.assertEqual(self.kinetics.Tmin.units, kinetics.Tmin.units)
self.assertAlmostEqual(self.kinetics.Tmax.value, kinetics.Tmax.value, 4)
self.assertEqual(self.kinetics.Tmax.units, kinetics.Tmax.units)
self.assertEqual(self.kinetics.comment, kinetics.comment)
def test_repr(self):
"""
Test that a KineticsData object can be reconstructed from its repr()
output with no loss of information.
"""
namespace = {}
exec('kinetics = {0!r}'.format(self.kinetics), globals(), namespace)
self.assertIn('kinetics', namespace)
kinetics = namespace['kinetics']
self.assertEqual(self.kinetics.Tdata.value.shape, kinetics.Tdata.value.shape)
for T, T0 in zip(self.kinetics.Tdata.value, kinetics.Tdata.value):
self.assertAlmostEqual(T, T0, 4)
self.assertEqual(self.kinetics.Tdata.units, kinetics.Tdata.units)
self.assertEqual(self.kinetics.kdata.value.shape, kinetics.kdata.value.shape)
for k, k0 in zip(self.kinetics.kdata.value, kinetics.kdata.value):
self.assertAlmostEqual(k, k0, 4)
self.assertEqual(self.kinetics.kdata.units, kinetics.kdata.units)
self.assertAlmostEqual(self.kinetics.Tmin.value, kinetics.Tmin.value, 4)
self.assertEqual(self.kinetics.Tmin.units, kinetics.Tmin.units)
self.assertAlmostEqual(self.kinetics.Tmax.value, kinetics.Tmax.value, 4)
self.assertEqual(self.kinetics.Tmax.units, kinetics.Tmax.units)
self.assertEqual(self.kinetics.comment, kinetics.comment)
################################################################################
class TestPDepKineticsData(unittest.TestCase):
"""
Contains unit tests of the :class:`PDepKineticsData` class.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
self.Tdata = np.array([300, 400, 500, 600, 700, 800, 900, 1000, 1500, 2000], np.float64)
self.Pdata = np.array([1e-1, 1e0, 1e1], np.float64)
self.kdata = np.array([
[4.73e-21, 3.93e-19, 6.51e-18, 4.60e-17, 2.03e-16, 6.28e-16, 1.58e-15, 3.31e-15, 3.72e-14, 1.49e-13],
[4.73e-20, 3.93e-18, 6.51e-17, 4.60e-16, 2.03e-15, 6.28e-15, 1.58e-14, 3.31e-14, 3.72e-13, 1.49e-12],
[4.73e-19, 3.93e-17, 6.51e-16, 4.60e-15, 2.03e-14, 6.28e-14, 1.58e-13, 3.31e-13, 3.72e-12, 1.49e-11],
], np.float64).T
self.Tmin = 300.
self.Tmax = 3000.
self.Pmin = 1e-1
self.Pmax = 1e1
self.comment = ''
self.kinetics = PDepKineticsData(
Tdata=(self.Tdata, "K"),
Pdata=(self.Pdata, "bar"),
kdata=(self.kdata, "cm^3/(molecule*s)"),
Tmin=(self.Tmin, "K"),
Tmax=(self.Tmax, "K"),
Pmin=(self.Pmin, "bar"),
Pmax=(self.Pmax, "bar"),
comment=self.comment,
)
def test_temperature_data(self):
"""
Test that the PDepKineticsData Tdata property was properly set.
"""
self.assertEqual(self.kinetics.Tdata.value_si.shape, self.Tdata.shape)
for T, T0 in zip(self.kinetics.Tdata.value_si, self.Tdata):
self.assertAlmostEqual(T, T0, 4)
def test_pressure_data(self):
"""
Test that the PDepKineticsData Pdata property was properly set.
"""
self.assertEqual(self.kinetics.Pdata.value_si.shape, self.Pdata.shape)
for P, P0 in zip(self.kinetics.Pdata.value_si, self.Pdata):
self.assertAlmostEqual(P * 1e-5, P0, 4)
def test_kdata(self):
"""
Test that the PDepKineticsData kdata property was properly set.
"""
self.assertEqual(self.kinetics.kdata.value_si.shape, self.kdata.shape)
for i in range(self.kdata.shape[0]):
for j in range(self.kdata.shape[1]):
k0 = self.kdata[i, j] * constants.Na * 1e-6
k = self.kinetics.kdata.value_si[i, j]
self.assertAlmostEqual(k, k0, delta=1e-6 * k0)
def test_temperature_min(self):
"""
Test that the PDepKineticsData Tmin property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Tmin.value_si, self.Tmin, 6)
def test_temperature_max(self):
"""
Test that the PDepKineticsData Tmax property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Tmax.value_si, self.Tmax, 6)
def test_pressure_min(self):
"""
Test that the PDepKineticsData Pmin property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Pmin.value_si * 1e-5, self.Pmin, 6)
def test_pressure_max(self):
"""
Test that the PDepKineticsData Pmax property was properly set.
"""
self.assertAlmostEqual(self.kinetics.Pmax.value_si * 1e-5, self.Pmax, 6)
def test_comment(self):
"""
Test that the PDepKineticsData comment property was properly set.
"""
self.assertEqual(self.kinetics.comment, self.comment)
def test_is_temperature_valid(self):
"""
Test the PDepKineticsData.is_temperature_valid() method.
"""
Tdata = np.array([200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000])
validdata = np.array([False, True, True, True, True, True, True, True, True, True], np.bool)
for T, valid in zip(Tdata, validdata):
valid0 = self.kinetics.is_temperature_valid(T)
self.assertEqual(valid0, valid)
def test_is_pressure_valid(self):
"""
Test the PDepKineticsData.is_pressure_valid() method.
"""
Pdata = np.array([1e3, 1e4, 1e5, 1e6, 1e7])
validdata = np.array([False, True, True, True, False], np.bool)
for P, valid in zip(Pdata, validdata):
valid0 = self.kinetics.is_pressure_valid(P)
self.assertEqual(valid0, valid)
def test_get_rate_coefficient(self):
"""
Test the PDepKineticsData.get_rate_coefficient() method.
"""
Tlist = np.array([300, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000])
Plist = np.array([1e4, 1e5, 1e6])
kexplist = np.array([
[2.84847e-03, 2.36670e-01, 2.77019e+01, 3.78191e+02, 1.99333e+03, 5.24644e+03, 1.38086e+04, 2.95680e+04,
5.15086e+04, 8.97299e+04],
[2.84847e-02, 2.36670e+00, 2.77019e+02, 3.78191e+03, 1.99333e+04, 5.24644e+04, 1.38086e+05, 2.95680e+05,
5.15086e+05, 8.97299e+05],
[2.84847e-01, 2.36670e+01, 2.77019e+03, 3.78191e+04, 1.99333e+05, 5.24644e+05, 1.38086e+06, 2.95680e+06,
5.15086e+06, 8.97299e+06],
]).T
for i in range(Tlist.shape[0]):
for j in range(Plist.shape[0]):
kexp = kexplist[i, j]
kact = self.kinetics.get_rate_coefficient(Tlist[i], Plist[j])
self.assertAlmostEqual(kexp, kact, delta=1e-4 * kexp)
def test_pickle(self):
"""
Test that a PDepKineticsData object can be pickled and unpickled with no
loss of information.
"""
import pickle
kinetics = pickle.loads(pickle.dumps(self.kinetics, -1))
self.assertEqual(self.kinetics.Tdata.value.shape, kinetics.Tdata.value.shape)
for T, T0 in zip(self.kinetics.Tdata.value, kinetics.Tdata.value):
self.assertAlmostEqual(T, T0, 4)
self.assertEqual(self.kinetics.Tdata.units, kinetics.Tdata.units)
self.assertEqual(self.kinetics.Pdata.value.shape, kinetics.Pdata.value.shape)
for P, P0 in zip(self.kinetics.Pdata.value, kinetics.Pdata.value):
self.assertAlmostEqual(P, P0, 4)
self.assertEqual(self.kinetics.Pdata.units, kinetics.Pdata.units)
self.assertEqual(self.kinetics.kdata.value.shape, kinetics.kdata.value.shape)
for i in range(self.kinetics.kdata.value.shape[0]):
for j in range(self.kinetics.kdata.value.shape[1]):
k0 = self.kinetics.kdata.value[i, j]
k = kinetics.kdata.value[i, j]
self.assertAlmostEqual(k, k0, delta=1e-6 * k0)
self.assertEqual(self.kinetics.kdata.units, kinetics.kdata.units)
self.assertAlmostEqual(self.kinetics.Tmin.value, kinetics.Tmin.value, 4)
self.assertEqual(self.kinetics.Tmin.units, kinetics.Tmin.units)
self.assertAlmostEqual(self.kinetics.Tmax.value, kinetics.Tmax.value, 4)
self.assertEqual(self.kinetics.Tmax.units, kinetics.Tmax.units)
self.assertEqual(self.kinetics.comment, kinetics.comment)
def test_repr(self):
"""
Test that a PDepKineticsData object can be reconstructed from its repr()
output with no loss of information.
"""
namespace = {}
exec('kinetics = {0!r}'.format(self.kinetics), globals(), namespace)
self.assertIn('kinetics', namespace)
kinetics = namespace['kinetics']
self.assertEqual(self.kinetics.Tdata.value.shape, kinetics.Tdata.value.shape)
for T, T0 in zip(self.kinetics.Tdata.value, kinetics.Tdata.value):
self.assertAlmostEqual(T, T0, 4)
self.assertEqual(self.kinetics.Tdata.units, kinetics.Tdata.units)
self.assertEqual(self.kinetics.Pdata.value.shape, kinetics.Pdata.value.shape)
for P, P0 in zip(self.kinetics.Pdata.value, kinetics.Pdata.value):
self.assertAlmostEqual(P, P0, 4)
self.assertEqual(self.kinetics.Pdata.units, kinetics.Pdata.units)
self.assertEqual(self.kinetics.kdata.value.shape, kinetics.kdata.value.shape)
for i in range(self.kinetics.kdata.value.shape[0]):
for j in range(self.kinetics.kdata.value.shape[1]):
k0 = self.kinetics.kdata.value[i, j]
k = kinetics.kdata.value[i, j]
self.assertAlmostEqual(k, k0, delta=1e-6 * k0)
self.assertEqual(self.kinetics.kdata.units, kinetics.kdata.units)
self.assertAlmostEqual(self.kinetics.Tmin.value, kinetics.Tmin.value, 4)
self.assertEqual(self.kinetics.Tmin.units, kinetics.Tmin.units)
self.assertAlmostEqual(self.kinetics.Tmax.value, kinetics.Tmax.value, 4)
self.assertEqual(self.kinetics.Tmax.units, kinetics.Tmax.units)
self.assertEqual(self.kinetics.comment, kinetics.comment)
| 0 | 0 | 0 |
fba2b02af14665d0ddace29de0d5019383f51be5 | 350 | py | Python | requests_test/request_test_3.py | waws520waws/waws_spider | c6a5988121f32619a5c5134c09fdfd556c696fe7 | [
"MIT"
] | null | null | null | requests_test/request_test_3.py | waws520waws/waws_spider | c6a5988121f32619a5c5134c09fdfd556c696fe7 | [
"MIT"
] | null | null | null | requests_test/request_test_3.py | waws520waws/waws_spider | c6a5988121f32619a5c5134c09fdfd556c696fe7 | [
"MIT"
] | null | null | null | # encoding:utf-8
"""
requests 的POST方法
注意post方式中传递的参数是data,而get 是params
"""
import requests
data = {"user":"admin","pwd":"admin"}
headers = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0);"}
url = "https://httpbin.org/post"
req = requests.post(url,data=data,headers=headers)
print(req.content.decode("utf8","ignore")) | 26.923077 | 92 | 0.702857 | # encoding:utf-8
"""
requests 的POST方法
注意post方式中传递的参数是data,而get 是params
"""
import requests
data = {"user":"admin","pwd":"admin"}
headers = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0);"}
url = "https://httpbin.org/post"
req = requests.post(url,data=data,headers=headers)
print(req.content.decode("utf8","ignore")) | 0 | 0 | 0 |
5f8cca5795d2a214e46ae5e039dd2b525827f58f | 1,260 | py | Python | TapeEquilibrium.py | Cynthyah/Exercises | 4c458cb518d8e77f2c51a9dd8d36eb4e4c73364c | [
"MIT"
] | null | null | null | TapeEquilibrium.py | Cynthyah/Exercises | 4c458cb518d8e77f2c51a9dd8d36eb4e4c73364c | [
"MIT"
] | null | null | null | TapeEquilibrium.py | Cynthyah/Exercises | 4c458cb518d8e77f2c51a9dd8d36eb4e4c73364c | [
"MIT"
] | null | null | null | # A non-empty array A consisting of N integers is given. Array A represents numbers on a tape.
# Any integer P, such that 0 < P < N, splits this tape into two non-empty parts:
# A[0], A[1], ..., A[P − 1] and A[P], A[P + 1], ..., A[N − 1].
# The difference between the two parts is the value of: |(A[0] + A[1] + ... + A[P − 1]) − (A[P] + A[P + 1] + ... + A[N − 1])|
# In other words, it is the absolute difference between the sum of the first part and the sum of the second part.
# A = [3,1,2,4,3]
# P = 1, difference = |3 − 10| = 7
# P = 2, difference = |4 − 9| = 5
# P = 3, difference = |6 − 7| = 1
# P = 4, difference = |10 − 3| = 7
# function should return 1, as explained above.
# N is an integer within the range [2..100,000];
# each element of array A is an integer within the range [−1,000..1,000].
# Testing
A = [-10, -20, -30, -40, 100]
print(solution(A))
# Detected time complexity: O(N)
| 31.5 | 125 | 0.560317 | # A non-empty array A consisting of N integers is given. Array A represents numbers on a tape.
# Any integer P, such that 0 < P < N, splits this tape into two non-empty parts:
# A[0], A[1], ..., A[P − 1] and A[P], A[P + 1], ..., A[N − 1].
# The difference between the two parts is the value of: |(A[0] + A[1] + ... + A[P − 1]) − (A[P] + A[P + 1] + ... + A[N − 1])|
# In other words, it is the absolute difference between the sum of the first part and the sum of the second part.
# A = [3,1,2,4,3]
# P = 1, difference = |3 − 10| = 7
# P = 2, difference = |4 − 9| = 5
# P = 3, difference = |6 − 7| = 1
# P = 4, difference = |10 − 3| = 7
# function should return 1, as explained above.
# N is an integer within the range [2..100,000];
# each element of array A is an integer within the range [−1,000..1,000].
def solution(A):
part1 = A[0]
part2 = sum(A[1:])
dif = abs(part1 - part2)
if len(A) == 2:
return dif
min_dif = [dif]
for cursor in A[1:-1:]:
part1 += cursor
part2 -= cursor
dif = abs(part1 - part2)
min_dif.append(dif)
if dif == 0:
break
return min(min_dif)
# Testing
A = [-10, -20, -30, -40, 100]
print(solution(A))
# Detected time complexity: O(N)
| 323 | 0 | 23 |
b3198f38460196e85f4db1f61f3cb0834ada74e4 | 6,370 | py | Python | ridt/tests/systemtests/test_st02.py | riskaware-ltd/ridt | c0288a2f814b2749bdf73de7157f7477ca271aff | [
"MIT"
] | null | null | null | ridt/tests/systemtests/test_st02.py | riskaware-ltd/ridt | c0288a2f814b2749bdf73de7157f7477ca271aff | [
"MIT"
] | 9 | 2020-09-18T08:22:39.000Z | 2021-07-20T09:39:59.000Z | ridt/tests/systemtests/test_st02.py | riskaware-ltd/ridt | c0288a2f814b2749bdf73de7157f7477ca271aff | [
"MIT"
] | 1 | 2021-06-22T21:53:20.000Z | 2021-06-22T21:53:20.000Z | import unittest
import os
import json
from ridt.config import ConfigFileParser
from ridt.config.ridtconfig import ConsistencyError
from ridt.config.configfileparser import ConfigFileParserValidationError
class ST02(unittest.TestCase):
"""System Test 02. The objective is to able to parse monitor
points/lines/planes parallel to the principle axes from the JSON file,
to be evaluated by the Eddy Diffusion model."""
def setUp(self) -> None:
"""setUp method that instantiates the
:class:`~.RIDTConfig` class."""
this_dir = os.path.dirname(os.path.abspath(__file__))
self.config_path = os.path.join(this_dir, "st06/config.json")
with open(self.config_path) as f:
self.default = json.load(f)
this_dir = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(this_dir, "../../default/config.json")
with ConfigFileParser(path) as cfp:
self.c = cfp
def test_verify(self):
""""Verifies that the :class:`~.RIDTConfig` class contains the
monitor locations attributes."""
self.assertEqual(
hasattr(self.c, "models"), True)
self.assertEqual(
hasattr(self.c.models, "eddy_diffusion"), True)
self.assertEqual(
hasattr(self.c.models.eddy_diffusion, "monitor_locations"), True)
self.assertEqual(
hasattr(self.c.models.eddy_diffusion.monitor_locations, "points"), True)
self.assertEqual(
hasattr(self.c.models.eddy_diffusion.monitor_locations, "lines"), True)
self.assertEqual(
hasattr(self.c.models.eddy_diffusion.monitor_locations, "planes"), True)
def test_verify_points(self):
"""Verifies that the :class:`~.Point` class contains the
correct attributes."""
for point in self.c.models.eddy_diffusion.monitor_locations.points.values():
self.assertEqual(hasattr(point, "x"), True)
self.assertEqual(hasattr(point, "y"), True)
self.assertEqual(hasattr(point, "z"), True)
def test_verify_lines(self):
"""Verifies that the :class:`~.Line` class contains the
correct attributes."""
for line in self.c.models.eddy_diffusion.monitor_locations.lines.values():
self.assertEqual(hasattr(line, "point"), True)
self.assertEqual(hasattr(line.point, "x"), True)
self.assertEqual(hasattr(line.point, "y"), True)
self.assertEqual(hasattr(line.point, "z"), True)
self.assertEqual(hasattr(line, "parallel_axis"), True)
def test_verify_planes(self):
"""Verifies that the :class:`~.Plane` class contains the
correct attributes."""
for plane in self.c.models.eddy_diffusion.monitor_locations.planes.values():
self.assertEqual(hasattr(plane, "axis"), True)
self.assertEqual(hasattr(plane, "distance"), True)
def test_line_dims(self):
"""Checks to see if the :class:`.ConsistencyError` error
triggers if the line does not lie in the bounds of the container
or is not parallel to one of the principle axes."""
with self.assertRaises(ConsistencyError):
with open(self.config_path) as f:
config = json.load(f)
for lines in config["models"]["eddy_diffusion"]["monitor_locations"]["lines"].values():
lines["point"]["x"] = config["dimensions"]["x"] + 1
with open(self.config_path, "w") as f:
json.dump(config, f)
ConfigFileParser(self.config_path)
def test_line_axis(self):
"""Checks to see if the :class:`.ConsistencyError` error
triggers if the line does not lie in the bounds of the container
or is not parallel to one of the principle axes."""
with self.assertRaises(ConfigFileParserValidationError):
with open(self.config_path) as f:
config = json.load(f)
for lines in config["models"]["eddy_diffusion"]["monitor_locations"]["lines"].values():
lines["parallel_axis"] = "test"
with open(self.config_path, "w") as f:
json.dump(config, f)
ConfigFileParser(self.config_path)
def test_points(self):
"""Checks to see if the :class:`.ConsistencyError` error
triggers if the point does not lie in the bounds of the container."""
with self.assertRaises(ConsistencyError):
with open(self.config_path) as f:
config = json.load(f)
for points in config["models"]["eddy_diffusion"]["monitor_locations"]["points"].values():
points["x"] = config["dimensions"]["x"] + 1
with open(self.config_path, "w") as f:
json.dump(config, f)
ConfigFileParser(self.config_path)
def test_planes_axis(self):
"""Checks to see if the :class:`.ConsistencyError` error
triggers if a plane lies outside of the bounds of the container."""
with self.assertRaises(ConfigFileParserValidationError):
with open(self.config_path) as f:
config = json.load(f)
for planes in config["models"]["eddy_diffusion"]["monitor_locations"]["planes"].values():
planes["axis"] = "test"
with open(self.config_path, "w") as f:
json.dump(config, f)
ConfigFileParser(self.config_path)
def test_planes_distance(self):
"""Checks to see if the :class:`.ConsistencyError` error
triggers if a plane lies outside of the bounds of the container."""
with self.assertRaises(ConsistencyError):
with open(self.config_path) as f:
config = json.load(f)
for planes in config["models"]["eddy_diffusion"]["monitor_locations"]["planes"].values():
axis = [x for x in ["x", "y", "z"] if x not in list(planes["axis"])]
planes["distance"] = config["dimensions"][axis[0]] + 1
with open(self.config_path, "w") as f:
json.dump(config, f)
ConfigFileParser(self.config_path)
if __name__ == "__main__":
unittest.main()
| 39.079755 | 101 | 0.619466 | import unittest
import os
import json
from ridt.config import ConfigFileParser
from ridt.config.ridtconfig import ConsistencyError
from ridt.config.configfileparser import ConfigFileParserValidationError
class ST02(unittest.TestCase):
"""System Test 02. The objective is to able to parse monitor
points/lines/planes parallel to the principle axes from the JSON file,
to be evaluated by the Eddy Diffusion model."""
def setUp(self) -> None:
"""setUp method that instantiates the
:class:`~.RIDTConfig` class."""
this_dir = os.path.dirname(os.path.abspath(__file__))
self.config_path = os.path.join(this_dir, "st06/config.json")
with open(self.config_path) as f:
self.default = json.load(f)
this_dir = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(this_dir, "../../default/config.json")
with ConfigFileParser(path) as cfp:
self.c = cfp
def tearDown(self) -> None:
with open(self.config_path, "w") as w:
json.dump(self.default, w)
def test_verify(self):
""""Verifies that the :class:`~.RIDTConfig` class contains the
monitor locations attributes."""
self.assertEqual(
hasattr(self.c, "models"), True)
self.assertEqual(
hasattr(self.c.models, "eddy_diffusion"), True)
self.assertEqual(
hasattr(self.c.models.eddy_diffusion, "monitor_locations"), True)
self.assertEqual(
hasattr(self.c.models.eddy_diffusion.monitor_locations, "points"), True)
self.assertEqual(
hasattr(self.c.models.eddy_diffusion.monitor_locations, "lines"), True)
self.assertEqual(
hasattr(self.c.models.eddy_diffusion.monitor_locations, "planes"), True)
def test_verify_points(self):
"""Verifies that the :class:`~.Point` class contains the
correct attributes."""
for point in self.c.models.eddy_diffusion.monitor_locations.points.values():
self.assertEqual(hasattr(point, "x"), True)
self.assertEqual(hasattr(point, "y"), True)
self.assertEqual(hasattr(point, "z"), True)
def test_verify_lines(self):
"""Verifies that the :class:`~.Line` class contains the
correct attributes."""
for line in self.c.models.eddy_diffusion.monitor_locations.lines.values():
self.assertEqual(hasattr(line, "point"), True)
self.assertEqual(hasattr(line.point, "x"), True)
self.assertEqual(hasattr(line.point, "y"), True)
self.assertEqual(hasattr(line.point, "z"), True)
self.assertEqual(hasattr(line, "parallel_axis"), True)
def test_verify_planes(self):
"""Verifies that the :class:`~.Plane` class contains the
correct attributes."""
for plane in self.c.models.eddy_diffusion.monitor_locations.planes.values():
self.assertEqual(hasattr(plane, "axis"), True)
self.assertEqual(hasattr(plane, "distance"), True)
def test_line_dims(self):
"""Checks to see if the :class:`.ConsistencyError` error
triggers if the line does not lie in the bounds of the container
or is not parallel to one of the principle axes."""
with self.assertRaises(ConsistencyError):
with open(self.config_path) as f:
config = json.load(f)
for lines in config["models"]["eddy_diffusion"]["monitor_locations"]["lines"].values():
lines["point"]["x"] = config["dimensions"]["x"] + 1
with open(self.config_path, "w") as f:
json.dump(config, f)
ConfigFileParser(self.config_path)
def test_line_axis(self):
"""Checks to see if the :class:`.ConsistencyError` error
triggers if the line does not lie in the bounds of the container
or is not parallel to one of the principle axes."""
with self.assertRaises(ConfigFileParserValidationError):
with open(self.config_path) as f:
config = json.load(f)
for lines in config["models"]["eddy_diffusion"]["monitor_locations"]["lines"].values():
lines["parallel_axis"] = "test"
with open(self.config_path, "w") as f:
json.dump(config, f)
ConfigFileParser(self.config_path)
def test_points(self):
"""Checks to see if the :class:`.ConsistencyError` error
triggers if the point does not lie in the bounds of the container."""
with self.assertRaises(ConsistencyError):
with open(self.config_path) as f:
config = json.load(f)
for points in config["models"]["eddy_diffusion"]["monitor_locations"]["points"].values():
points["x"] = config["dimensions"]["x"] + 1
with open(self.config_path, "w") as f:
json.dump(config, f)
ConfigFileParser(self.config_path)
def test_planes_axis(self):
"""Checks to see if the :class:`.ConsistencyError` error
triggers if a plane lies outside of the bounds of the container."""
with self.assertRaises(ConfigFileParserValidationError):
with open(self.config_path) as f:
config = json.load(f)
for planes in config["models"]["eddy_diffusion"]["monitor_locations"]["planes"].values():
planes["axis"] = "test"
with open(self.config_path, "w") as f:
json.dump(config, f)
ConfigFileParser(self.config_path)
def test_planes_distance(self):
"""Checks to see if the :class:`.ConsistencyError` error
triggers if a plane lies outside of the bounds of the container."""
with self.assertRaises(ConsistencyError):
with open(self.config_path) as f:
config = json.load(f)
for planes in config["models"]["eddy_diffusion"]["monitor_locations"]["planes"].values():
axis = [x for x in ["x", "y", "z"] if x not in list(planes["axis"])]
planes["distance"] = config["dimensions"][axis[0]] + 1
with open(self.config_path, "w") as f:
json.dump(config, f)
ConfigFileParser(self.config_path)
if __name__ == "__main__":
unittest.main()
| 93 | 0 | 27 |
feadbcdbbac786c3b04fd65e5a611ba0ead397a6 | 693 | py | Python | setup.py | smkia/neuroCombat | d1a70c5f93284ec32a29bf2ba8c5addeea1089c7 | [
"MIT"
] | null | null | null | setup.py | smkia/neuroCombat | d1a70c5f93284ec32a29bf2ba8c5addeea1089c7 | [
"MIT"
] | null | null | null | setup.py | smkia/neuroCombat | d1a70c5f93284ec32a29bf2ba8c5addeea1089c7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
author="Jean-Philippe Fortin, Nick Cullen, Tim Robert-Fitzgerald",
author_email='fortin946@gmail.com,',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
],
description="ComBat algorithm for harmonizing multi-site imaging data",
license="MIT license",
url="https://github.com/Jfortin1/neuroCombat",
project_urls={
"Github": "https://github.com/Jfortin1/neuroCombat",
},
name='neuroCombat',
packages=['neuroCombat'],
version='0.2.11',
zip_safe=False,
)
| 26.653846 | 73 | 0.676768 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
author="Jean-Philippe Fortin, Nick Cullen, Tim Robert-Fitzgerald",
author_email='fortin946@gmail.com,',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
],
description="ComBat algorithm for harmonizing multi-site imaging data",
license="MIT license",
url="https://github.com/Jfortin1/neuroCombat",
project_urls={
"Github": "https://github.com/Jfortin1/neuroCombat",
},
name='neuroCombat',
packages=['neuroCombat'],
version='0.2.11',
zip_safe=False,
)
| 0 | 0 | 0 |