blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2e257e337febe1511c2d9627bf695efa744fc08f | b3e9a8963b9aca334b93b95bc340c379544e1046 | /euler/53.py | 848c9371fe27dc537186b3b102125dfaaa2271db | [] | no_license | protocol7/euler.py | 86ea512c2c216968e6c260b19469c0c8d038feb7 | e2a8e46a9b07e6d0b039a5496059f3bf73aa5441 | refs/heads/master | 2022-09-08T22:49:47.486631 | 2022-08-23T20:07:00 | 2022-08-23T20:07:00 | 169,478,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | #!/usr/bin/env python3
from functools import lru_cache
@lru_cache(maxsize=None)
def factorial(n):
if n == 1:
return 1
return n * factorial(n-1)
assert 24 == factorial(4)
def combos(n, r):
return factorial(n) / (factorial(r) * factorial(n - r))
assert 1144066 == combos(23, 10)
def find():
c = 0
for n in range(1, 101):
for r in range(1, n):
if combos(n, r) > 1000000:
c += 1
return c
assert 4075 == find()
| [
"niklas@protocol7.com"
] | niklas@protocol7.com |
258d8ef0453118d55ac07e5b1516bb5578fe9f11 | 646cadb1c72ef4a060343baf2fcbe271958b6878 | /tigerjython/Pi2GoEx/Ligth1a.py | 36d10f7d5715c2c72b027fbe61929c76909918e4 | [] | no_license | tigerjython/tjinstall | bd75cf8e4ae27b639a13865ef1ec5710391a2938 | aab61519b5299c2ab4f423c6fc5d8ea7c7860a99 | refs/heads/master | 2021-01-17T08:53:50.386905 | 2018-01-12T06:56:28 | 2018-01-12T06:56:28 | 40,659,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | # Light1a.py
from raspibrick import *
def onDark(value):
print "dark event with v:", value
def onBright(value):
print "bright event with v:", value
robot = Robot()
ls = LightSensor(LS_FRONT_LEFT)
while not robot.isEscapeHit():
continue
robot.exit()
print "All done" | [
"support@tigerjython.com"
] | support@tigerjython.com |
a51bcd3afd92f603fd92b6ce8517474dbb4ae72b | d741f71eb48b23fdda1339daee10ccb039da1ee6 | /leetcode7.py | f66dd37a370eca0129bd9f10c89bc749bd2596fd | [] | no_license | HawkinYap/Leetcode | a5b42bf3aa41e7f1ba60a5a804909035fa8e1ec9 | 5e7bc7368db4dfcd8597dc0462a8a0b5bfd46e54 | refs/heads/master | 2020-06-30T07:15:24.453955 | 2020-04-18T07:38:22 | 2020-04-18T07:38:22 | 199,663,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | class Solution:
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
strx = str(x)
if strx[0] == '-':
strx = strx[len(strx)-1:0:-1]
if int(strx) < 2 ** 31-1:
return(-int(strx))
else:
return(0)
else:
strx = strx[::-1]
if int(strx) < 2 ** 31-1:
return(int(strx))
else:
return(0)
if __name__ == '__main__':
num = 123
solution = Solution()
print(solution.reverse(num)) | [
"Gallowsgogo@gmail.com"
] | Gallowsgogo@gmail.com |
a0babf5d131fe0f8d29fd070ae89e73f9667fea3 | 9e4f5cf305e818caccb543b9b0c60876afc682c2 | /Python-Matic-SDK/examples/deploy.py | 7f36ee16da523d8d51697000b451f539e59d4dba | [
"MIT"
] | permissive | bellyfat/Matic-for-python-developers | 1ed2062e1826d7847ade449d47a311901c1b53dd | ea33737a89384ce9e6f6dc97b12f0a608994e28d | refs/heads/main | 2023-07-16T04:48:19.934602 | 2021-09-12T15:21:09 | 2021-09-12T15:21:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | from maticvigil.EVCore import EVCore
evc = EVCore(verbose=False)
r = evc.deploy(
contract_file='microblog.sol',
contract_name='Microblog',
inputs={
'_ownerName': 'anomit',
'_blogTitle': 'TheBlog'
}
)
print('Contract Address deployed at')
print(r['contract'])
| [
"noreply@github.com"
] | bellyfat.noreply@github.com |
cc01ee431f998edf052d5fb58191cf1d5e445aa9 | aa91f6e8d59286f65e7f6ed065823c80b7694439 | /scripts/lab/fasttext/f1.py | 80a8f85cdbe7ed8c70a4524cd5fa968237443bd0 | [
"MIT"
] | permissive | davidyuqiwei/davidyu_stock | 7f93bcc2c50a0e2c356e3b517dbf7e2e7467093f | c234911f49d5980f2dff651333f8ca957333e094 | refs/heads/master | 2023-01-07T02:08:04.330559 | 2023-01-02T16:31:37 | 2023-01-02T16:31:37 | 163,694,812 | 13 | 2 | null | 2022-12-18T03:55:10 | 2018-12-31T20:07:30 | Jupyter Notebook | UTF-8 | Python | false | false | 8,678 | py | # _*_ coding:utf-8 _*_
'''
@Author: Ruan Yang
@Date: 2018.12.9
@Purpose: 处理wikizh文本的二分类问题,判断语句是否通顺
@Attention: 本例中的负样本是 shuffle 正样例得到,所以容易形成分类
@算法:CNN
@本例是二分类问题
'''
import codecs
paths=r"/home/davidyu/gits/fastText-Study"
train_data_name="train.txt"
test_data_name="test.txt"
x_train=[]
x_test=[]
y_train=[]
y_test=[]
x_train_positive=0
x_train_negative=0
x_test_positive=0
x_test_negative=0
with codecs.open(paths+train_data_name,"r","utf-8") as f1,\
codecs.open(paths+test_data_name,"r","utf-8") as f2:
for line in f1:
words=line.strip().split("\t")
if words[0] == "__label__1":
y_train.append([0,1]) # [0,1] 表示正样例
x_train_positive += 1
else:
y_train.append([1,0]) # [1,0] 表示负样例
x_train_negative += 1
x_train.append(words[1])
for line in f2:
words=line.strip().split("\t")
if words[0] == "__label__1":
y_test.append([0,1])
x_test_positive += 1
else:
y_test.append([1,0])
x_test_negative += 1
x_test.append(words[1])
print("#----------------------------------------------------------#")
print("训练集总数:{}".format(len(x_train)))
print("训练集中正样本个数:{}".format(x_train_positive))
print("训练集中负样本个数:{}".format(x_train_negative))
print("测试集总数:{}".format(len(x_test)))
print("测试集中正样本个数:{}".format(x_test_positive))
print("测试集中负样本个数:{}".format(x_test_negative))
print("#----------------------------------------------------------#")
print("\n")
print("#----------------------------------------------------------#")
print("将输入文本转换成 index - word 对应关系,并输出词汇表")
x_text=x_train+x_test # 总输入文本
y_labels=y_train+y_test
'''
from tensorflow.contrib import learn
import tensorflow as tf
import numpy as np
import collections
max_document_length=200
min_frequency=1
vocab = learn.preprocessing.VocabularyProcessor(max_document_length,min_frequency, tokenizer_fn=list)
x = np.array(list(vocab.fit_transform(x_text)))
vocab_dict = collections.OrderedDict(vocab.vocabulary_._mapping)
with codecs.open(r"C:\Users\RY\Desktop\vocabulary.txt","w","utf-8") as f:
for key,value in vocab_dict.items():
f.write("{} {}\n".format(key,value))
print("#----------------------------------------------------------#")
print("\n")
print("#----------------------------------------------------------#")
print("数据混洗")
np.random.seed(10)
y=np.array(y_labels)
shuffle_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices]
test_sample_percentage=0.2
test_sample_index = -1 * int(test_sample_percentage * float(len(y)))
x_train, x_test = x_shuffled[:test_sample_index], x_shuffled[test_sample_index:]
y_train, y_test = y_shuffled[:test_sample_index], y_shuffled[test_sample_index:]
print("#----------------------------------------------------------#")
print("\n")
print("#----------------------------------------------------------#")
print("读取預训练词向量矩阵")
pretrainpath=r"E:\中科大MS\預训练模型\\"
embedding_index={}
with codecs.open(pretrainpath+"sgns.wiki.word","r","utf-8") as f:
#for line in f:
# if len(line.strip().split(" "))==2:
# nwords=int(line.strip().split(" ")[0])
# ndims=int(line.strip().split(" ")[1])
# else:
# values=line.split()
# words=values[0]
# coefs=np.asarray(values[1:],dtype="float32")
# embedding_index[word]=coefs
line=f.readline()
nwords=int(line.strip().split(" ")[0])
ndims=int(line.strip().split(" ")[1])
for line in f:
values=line.split()
words=values[0]
coefs=np.asarray(values[1:],dtype="float32")
embedding_index[words]=coefs
print("預训练模型中Token总数:{} = {}".format(nwords,len(embedding_index)))
print("預训练模型的维度:{}".format(ndims))
print("#----------------------------------------------------------#")
print("\n")
print("#----------------------------------------------------------#")
print("将vocabulary中的 index-word 对应关系映射到 index-word vector形式")
embedding_matrix=[]
notfoundword=0
for word in vocab_dict.keys():
if word in embedding_index.keys():
embedding_matrix.append(embedding_index[word])
else:
notfoundword += 1
embedding_matrix.append(np.random.uniform(-1,1,size=ndims))
embedding_matrix=np.array(embedding_matrix,dtype=np.float32) # 必须使用 np.float32
print("词汇表中未找到单词个数:{}".format(notfoundword))
print("#----------------------------------------------------------#")
print("\n")
print("#----------------------------------------------------------#")
print("构建CNN模型.................")
print("Embedding layer --- Conv1D layer --- Dense layer --- Dense layer")
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
max_sentence_length=200
embedding_dims=ndims
input_length=max_sentence_length
batch_size = 64
filters = 250
kernel_size = 3
hidden_dims = 250
dropout=0.5
num_classes=2
epochs = 2
model = Sequential()
model.add(Embedding(len(vocab_dict),
embedding_dims,
weights=[embedding_matrix],
input_length=max_sentence_length,
trainable=False))
model.add(Dropout(dropout))
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1))
model.add(GlobalMaxPooling1D())
model.add(Dense(hidden_dims))
model.add(Dropout(dropout))
model.add(Activation('relu'))
model.add(Dense(num_classes))
model.add(Activation('sigmoid'))
print("#----------------------------------------------------------#")
print("\n")
print("#----------------------------------------------------------#")
print("编译模型")
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print("#----------------------------------------------------------#")
print("\n")
print("#----------------------------------------------------------#")
print("模型拟合")
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
print("#----------------------------------------------------------#")
print("\n")
print("#----------------------------------------------------------#")
print("模型评估")
# 训练得分和准确度
score,acc=model.evaluate(x_test,y_test,batch_size=batch_size)
print("#---------------------------------------------------#")
print("预测得分:{}".format(score))
print("预测准确率:{}".format(acc))
print("#---------------------------------------------------#")
print("\n")
# 模型预测
predictions=model.predict(x_test)
print("#---------------------------------------------------#")
print("测试集的预测结果,对每个类有一个得分/概率,取值大对应的类别")
print(predictions)
print("#---------------------------------------------------#")
print("\n")
# 模型预测类别
predict_class=model.predict_classes(x_test)
print("#---------------------------------------------------#")
print("测试集的预测类别")
print(predict_class)
print("#---------------------------------------------------#")
print("\n")
# 模型保存
model.save(r"C:\Users\RY\Desktop\wikizh_cnn.h5")
print("#---------------------------------------------------#")
print("保存模型")
print("#---------------------------------------------------#")
print("\n")
# 模型总结
print("#---------------------------------------------------#")
print("输出模型总结")
print(model.summary())
print("#---------------------------------------------------#")
print("\n")
# 模型的配置文件
config=model.get_config()
print("#---------------------------------------------------#")
print("输出模型配置信息")
print(config)
print("#---------------------------------------------------#")
print("\n")
| [
"davidyuqiwei@outlook.com"
] | davidyuqiwei@outlook.com |
a05d56db574368bf8580a4bd6667b10b1c1cae4d | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/smsdroid/testcase/firstcases/testcase7_000.py | 9a66eb49f61b413902721cb5c99ff03f5fba9cc3 | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,530 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'de.ub0r.android.smsdroid',
'appActivity' : 'de.ub0r.android.smsdroid.ConversationListActivity',
'resetKeyboard' : True,
'androidCoverage' : 'de.ub0r.android.smsdroid/de.ub0r.android.smsdroid.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
return
def scrollToFindElement(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1) :
for temp in elements :
if temp.get_attribute("enabled") == "true" :
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.55, 0.5, 0.2)
else :
return element
for i in range(0, 4, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1):
for temp in elements:
if temp.get_attribute("enabled") == "true":
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.2, 0.5, 0.55)
else :
return element
return
def scrollToClickElement(driver, str) :
element = scrollToFindElement(driver, str)
if element is None :
return
else :
element.click()
def clickInList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
element.click()
else :
if checkWindow(driver) :
driver.press_keycode(4)
def clickOnCheckable(driver, str, value = "true") :
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_android_uiautomator("new UiSelector().checkable(true)")
nowvalue = innere.get_attribute("checked")
if (nowvalue != value) :
innere.click()
break
except NoSuchElementException:
continue
def typeText(driver, value) :
element = getElememt(driver, "new UiSelector().className(\"android.widget.EditText\")")
element.clear()
element.send_keys(value)
enterelement = getElememt(driver, "new UiSelector().text(\"OK\")")
if (enterelement is None) :
if checkWindow(driver):
driver.press_keycode(4)
else :
enterelement.click()
def checkWindow(driver) :
dsize = driver.get_window_size()
nsize = driver.find_element_by_class_name("android.widget.FrameLayout").size
if dsize['height'] > nsize['height']:
return True
else :
return False
def testingSeekBar(driver, str, value):
try :
if(not checkWindow(driver)) :
element = seekForNearestSeekBar(driver, str)
else :
element = driver.find_element_by_class_name("android.widget.SeekBar")
if (None != element):
settingSeekBar(driver, element, value)
driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click()
except NoSuchElementException:
time.sleep(1)
def seekForNearestSeekBar(driver, str):
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_class_name("android.widget.SeekBar")
return innere
break
except NoSuchElementException:
continue
def settingSeekBar(driver, element, value) :
x = element.rect.get("x")
y = element.rect.get("y")
width = element.rect.get("width")
height = element.rect.get("height")
TouchAction(driver).press(None, x + 10, y + height/2).move_to(None, x + width * value,y + height/2).release().perform()
y = value
def clickInMultiList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
nowvalue = element.get_attribute("checked")
if (nowvalue != "true") :
element.click()
if checkWindow(driver) :
driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click()
# testcase7_000
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"66560866\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"7:52 AM\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Call 2131231\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"7_000\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'de.ub0r.android.smsdroid'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"prefest2018@gmail.com"
] | prefest2018@gmail.com |
635db05bfab567250fafbb0dbbf2098cf6aebaad | 2324dea2cb3003c8ab7e8fd80588d44973eb8c77 | /Euler_1_31a.py | 92b658671b702b3c61f6fb98880583468f309a1e | [] | no_license | MikeOcc/MyProjectEulerFiles | 5f51bc516cb6584732dc67bb2f9c7fd9e6d51e56 | 4d066d52380aade215636953589bf56d6b88f745 | refs/heads/master | 2021-01-16T18:45:44.133229 | 2015-05-27T18:28:43 | 2015-05-27T18:28:43 | 5,876,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,765 | py | #
# Euler 31
#
#
from itertools import *
#print list(product([200,100,50,20,10,5,2,1] , repeat = 200))
cur = [100, 50, 20, 10, 5, 2, 1]
fac = [ 2, 4, 10, 20, 40, 100, 200]
val = 200
ctr = 8
cntdown = 8
cnt = 200
for i in range(0,2):
cnt-=i*cur[0]
print cnt,cur[0]
if cnt == 0:
cnt = 200
ctr += 1;print ctr
break
print "Level 1:"
for j in range(0,4):
if cnt == 0:break
print cnt,cur[1]
cnt-=j*cur[1]
if cnt == 0:
cnt = 200
ctr += 1;print ctr
break
print "Level 2:"
for k in range(0,11):
if cnt == 0:break
cnt-=k*cur[2]
print cnt,cur[0]
if cnt == 0:
cnt = 200
ctr += 1;print ctr
break
print "Level 3:"
for l in range(0,21):
if cnt == 0:break
cnt-=l*cur[3]
if cnt == 0:
cnt = 200
ctr += 1;print ctr
break
print "Level 4:"
for m in range(1,41):
if cnt == 0:break
cnt-=m*cur[4]
print cnt,cur[0]
if cnt == 0:
ctr += 1;print ctr
break
print "Level 5:"
for n in range(0,101):
if cnt == 0:break
cnt-=n*cur[5]
print cnt,cur[5]
if cnt == 0:
cnt = 200
ctr += 1;print ctr
break
print "Level 6:"
for o in range(0,201):
if cnt == 0:break
cnt-=o*cur[6]
print cnt,cur[6]
if cnt == 0:
cnt = 200
ctr += 1;print ctr
break
print "total number of permutations is " , ctr
| [
"mike.occhipinti@mlsassistant.com"
] | mike.occhipinti@mlsassistant.com |
64900c6259cb79e029298d9d679b01edffaba0e0 | 66b13f9aa90bb0091684334c68a574c390fb58a1 | /gen-cards.py | 2f95a50ff3eee68a49acce58d153ab71aa31fb67 | [
"MIT"
] | permissive | sugar-activities/4246-activity | d67c8038698b4fd5dab19ff9bf9ea2c3647692e8 | 5ddd5fe21b2ddfc5f926e434b7f8a23e20f54263 | refs/heads/master | 2021-01-19T23:14:32.545612 | 2017-04-21T05:05:53 | 2017-04-21T05:05:53 | 88,937,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,603 | py | # -*- coding: utf-8 -*-
#Copyright (c) 2009, Walter Bender
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
RED_STROKE = "#FF6040"
RED_FILL = "#FFC4B8"
BLUE_STROKE = "#0060C8"
BLUE_FILL = "#ACC8E4"
GREEN_STROKE = "#00B418"
GREEN_FILL = "#AFE8A8"
PURPLE_STROKE = "#780078"
PURPLE_FILL = "#E4AAE4"
color_pairs = ([RED_STROKE,RED_FILL],
[GREEN_STROKE,GREEN_FILL],
[BLUE_STROKE,BLUE_FILL],
[PURPLE_STROKE,PURPLE_FILL])
fill_styles = ("none","gradient","solid")
card_types = ("X","O","C")
def background(f,fill):
f.write("<rect width=\"74.5\" height=\"124.5\" rx=\"11\" ry=\"9\" x=\"0.25\" y=\"0.25\"\n")
f.write("style=\"fill:" + fill + ";fill-opacity:1;stroke:#000000;stroke-width:0.5\" />\n")
def header(f,fill):
f.write("<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n")
f.write("<!-- Created with Emacs -->\n")
f.write("<svg\n")
f.write(" xmlns:svg=\"http://www.w3.org/2000/svg\"\n")
f.write(" xmlns=\"http://www.w3.org/2000/svg\"\n")
f.write(" version=\"1.0\"\n")
f.write(" width=\"75\"\n")
f.write(" height=\"125\">\n")
background(f,fill)
f.write("<g>\n")
def footer(f):
f.write("</g>\n")
f.write("</svg>\n")
def circle(f, y, style, stroke, fill):
f.write("<circle cx=\"27\" cy=\"11\" r=\"16\"\n")
f.write(" transform=\"translate(11," + str(y+11) + ")\"\n")
if style == "none":
f.write(" style=\"fill:#FFFFFF;stroke:" + stroke + \
";stroke-width:1.8;\" />\n")
elif style == "gradient":
f.write(" style=\"fill:" + fill + ";stroke:" + stroke + \
";stroke-width:1.8;\" />\n")
else:
f.write(" style=\"fill:" + stroke + ";stroke:" + stroke + \
";stroke-width:1.8;\" />\n")
f.write("<circle cx=\"27\" cy=\"11\" r=\"8\"\n")
f.write(" transform=\"translate(11," + str(y+11) + ")\"\n")
f.write(" style=\"fill:" + fill + ";stroke:" + stroke + \
";stroke-width:1.8;\" />\n")
def check(f, y, style, stroke, fill):
f.write("<path d=\"m 28.3575,70.160499 -5.861,5.861 -5.861,-5.866001 -4.102,-4.1 c -0.747,-0.747999 -1.212,-1.784999 -1.212,-2.93 0,-2.288998 1.854,-4.145998 4.146,-4.145998 1.143,0 2.18,0.465 2.93,1.214 l 4.099,4.101999 14.102,-14.102998 c 0.754,-0.749 1.787,-1.214 2.934,-1.214 2.289,0 4.146,1.856001 4.146,4.145001 0,1.146 -0.467,2.18 -1.217,2.932 l -14.104,14.104997 z\"\n")
f.write(" transform=\"translate(10," + str(y-40) + ")\"\n")
if style == "none":
f.write(" style=\"fill:#FFFFFF;stroke:" + stroke + \
";stroke-width:1.8;\" />\n")
elif style == "gradient":
f.write(" style=\"fill:" + fill + ";stroke:" + stroke + \
";stroke-width:1.8;\" />\n")
else:
f.write(" style=\"fill:" + stroke + ";stroke:" + stroke + \
";stroke-width:1.8;\" />\n")
def cross(f, y, style, stroke, fill):
f.write("<path d=\"m 33.3585,62.5035 10.102,10.1 c 0.752,0.75 1.217,1.783 1.217,2.932 0,2.287 -1.855,4.143 -4.146,4.143 -1.145,0 -2.178,-0.463 -2.932,-1.211 l -10.102,-10.103 -10.1,10.1 c -0.75,0.75 -1.787,1.211 -2.934,1.211 -2.284,0 -4.143,-1.854 -4.143,-4.141 0,-1.146 0.465,-2.184 1.212,-2.934 l 10.104,-10.102 -10.102,-10.1 c -0.747,-0.748 -1.212,-1.785 -1.212,-2.93 0,-2.289 1.854,-4.146 4.146,-4.146 1.143,0 2.18,0.465 2.93,1.214 l 10.099,10.102 10.102,-10.103 c 0.754,-0.749 1.787,-1.214 2.934,-1.214 2.289,0 4.146,1.856 4.146,4.145 0,1.146 -0.467,2.18 -1.217,2.932 l -10.104,10.105 z\"\n")
f.write(" transform=\"translate(10," + str(y-40) + ")\"\n")
if style == "none":
f.write(" style=\"fill:#FFFFFF;stroke:" + stroke + \
";stroke-width:1.8;\" />\n")
elif style == "gradient":
f.write(" style=\"fill:" + fill + ";stroke:" + stroke + \
";stroke-width:1.8;\" />\n")
else:
f.write(" style=\"fill:" + stroke + ";stroke:" + stroke + \
";stroke-width:1.8;\" />\n")
def check_card(f, n, style, stroke, fill):
if n == 1:
check(f, 41.5,style, stroke, fill)
elif n == 2:
check(f, 21.5,style, stroke, fill)
check(f, 61.5,style, stroke, fill)
else:
check(f, 1.5,style, stroke, fill)
check(f, 41.5,style, stroke, fill)
check(f, 81.5,style, stroke, fill)
def cross_card(f, n, style, stroke, fill):
if n == 1:
cross(f, 41.5,style, stroke, fill)
elif n == 2:
cross(f, 21.5,style, stroke, fill)
cross(f, 61.5,style, stroke, fill)
else:
cross(f, 1.5,style, stroke, fill)
cross(f, 41.5,style, stroke, fill)
cross(f, 81.5,style, stroke, fill)
def circle_card(f, n, style, stroke, fill):
if n == 1:
circle(f, 41.5,style, stroke, fill)
elif n == 2:
circle(f, 21.5,style, stroke, fill)
circle(f, 61.5,style, stroke, fill)
else:
circle(f, 1.5,style, stroke, fill)
circle(f, 41.5,style, stroke, fill)
circle(f, 81.5,style, stroke, fill)
def open_file(i):
return file("images/card-"+str(i)+".svg", "w")
def close_file(f):
f.close()
i = 0
for t in card_types:
for c in color_pairs:
for n in range(1,4):
for s in fill_styles:
i += 1
f = open_file(i)
header(f,c[1])
if t == "O":
circle_card(f,n,s,c[0],c[1])
elif t == "C":
check_card(f,n,s,c[0],c[1])
else:
cross_card(f,n,s,c[0],c[1])
footer(f)
close_file(f)
| [
"ignacio@sugarlabs.org"
] | ignacio@sugarlabs.org |
ea08b87bbcf0c0cf423313d7133d5767e513cc77 | e73f0bd1e15de5b8cb70f1d603ceedc18c42b39b | /adventOfCode/2018/25/25.py | 361d1e18d2d39530b5332aaf11d459fb81790990 | [] | no_license | thran/the_code | cbfa3b8be86c3b31f76f6fbd1deb2013d3326a4a | ba73317ddc42e10791a829cc6e1a3460cc601c44 | refs/heads/master | 2023-01-05T14:39:16.708461 | 2022-12-25T08:37:39 | 2022-12-25T08:37:39 | 160,978,160 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | import queue
import pandas as pd
import numpy as np
df = pd.read_csv('input.txt', header=None)
distances = df.loc[:, :4]
df['selected'] = False
df['constellation'] = None
value = 3
c = 0
while (~df.selected).sum() > 0:
x = df.index[~df.selected][0]
q = queue.Queue()
q.put(x)
c += 1
df.loc[x, 'selected'] = True
df.loc[x, 'constellation'] = c
while not q.empty():
i = q.get()
d = df.loc[i]
new = (np.abs(distances - d[:4]).sum(axis=1) <= 3) & ~df.selected
df.loc[new, 'selected'] = True
df.loc[new, 'constellation'] = c
for n in df[new].index.values:
q.put(n)
print(df)
print(len(df.constellation.unique()))
| [
"thran@centrum.cz"
] | thran@centrum.cz |
ae9068dfc07c4682726c4eb7283a242f767290cd | 17ebca3c537e27bb1dff0d8f047ad63c1d7ebcf0 | /Tests/goldman_02.22.2021.py | ddb9353ab5e51c092092c223b1b2d5b46d2d4272 | [] | no_license | reading-stiener/For-the-love-of-algos | ad95283b55bfaf112302bf187fe0cefd6104f3f7 | d6f0d0985ac124eeb5ad1caee8563d3d0f186a25 | refs/heads/master | 2023-03-07T10:51:31.851212 | 2021-02-23T05:27:08 | 2021-02-23T05:27:08 | 292,468,905 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py | def numOfIds(pool):
count_eights = pool.count("8")
count_rest = len(pool) - count_eights
group_tens = count_rest // 10
if count_eights <= group_tens:
return count_eights
else:
eights_less = 11 - count_rest % 10
if count_eights - group_tens >= eights_less:
return group_tens + 1 + (count_eights - group_tens - eights_less) // 11
return group_tens
def maxInversions1(arr):
count = 0
n = len(arr)
for i in range(n):
for j in range(i+1, n):
if arr[i] > arr[j]:
for k in range(j+1, n):
if arr[j] > arr[k]:
count += 1
return count
def maxInversions(arr):
count = 0
n = len(arr)
for i in range(1, n-1):
lt_count, gt_count = 0, 0
for j in range(i+1, n):
if arr[i] > arr[j]:
lt_count += 1
for k in range(0, i):
if arr[i] < arr[k]:
gt_count += 1
count += lt_count * gt_count
return count
if __name__ == "__main__":
print(maxInversions([5,3,4,2,1])) | [
"apradha1@conncoll.edu"
] | apradha1@conncoll.edu |
d53bc612dcccd471686a70bf5243cfdd00224af9 | 1f41b828fb652795482cdeaac1a877e2f19c252a | /maya_menus/_MAINMENU_PMC_Rigging/28.Constraint/10.Tangent Constraint( curve, upObject, targets ).py | 189af8a0a6847beda077c61b234044b4bb945a5c | [] | no_license | jonntd/mayadev-1 | e315efe582ea433dcf18d7f1e900920f5590b293 | f76aeecb592df766d05a4e10fa2c2496f0310ca4 | refs/heads/master | 2021-05-02T07:16:17.941007 | 2018-02-05T03:55:12 | 2018-02-05T03:55:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | from sgMaya import sgCmds
import pymel.core
sels = pymel.core.ls( sl=1 )
curve = sels[0]
upObject = sels[1]
targets = sels[2:]
for target in targets:
sgCmds.tangentConstraint( curve, upObject, target ) | [
"kimsung9k@naver.com"
] | kimsung9k@naver.com |
6fc1ef57d9dea2611a35ab06fd8ea26f26f3f82d | a1615563bb9b124e16f4163f660d677f3224553c | /LI/lib/python3.8/site-packages/numpy/distutils/fcompiler/gnu.py | 68d1501eee6a23b74bd3de943e69a47a3e0c3071 | [
"MIT",
"BSD-3-Clause",
"GPL-3.0-or-later",
"BSD-3-Clause-Open-MPI",
"GPL-3.0-only",
"GCC-exception-3.1"
] | permissive | honeybhardwaj/Language_Identification | 2a247d98095bd56c1194a34a556ddfadf6f001e5 | 1b74f898be5402b0c1a13debf595736a3f57d7e7 | refs/heads/main | 2023-04-19T16:22:05.231818 | 2021-05-15T18:59:45 | 2021-05-15T18:59:45 | 351,470,447 | 5 | 4 | MIT | 2021-05-15T18:59:46 | 2021-03-25T14:42:26 | Python | UTF-8 | Python | false | false | 20,226 | py | import re
import os
import sys
import warnings
import platform
import tempfile
import hashlib
import base64
import subprocess
from subprocess import Popen, PIPE, STDOUT
from numpy.distutils.exec_command import filepath_from_subprocess_output
from numpy.distutils.fcompiler import FCompiler
from distutils.version import LooseVersion
compilers = ['GnuFCompiler', 'Gnu95FCompiler']
TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)")
# XXX: handle cross compilation
def is_win64():
return sys.platform == "win32" and platform.architecture()[0] == "64bit"
class GnuFCompiler(FCompiler):
compiler_type = 'gnu'
compiler_aliases = ('g77', )
description = 'GNU Fortran 77 compiler'
def gnu_version_match(self, version_string):
"""Handle the different versions of GNU fortran compilers"""
# Strip warning(s) that may be emitted by gfortran
while version_string.startswith('gfortran: warning'):
version_string = version_string[version_string.find('\n') + 1:]
# Gfortran versions from after 2010 will output a simple string
# (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older
# gfortrans may still return long version strings (``-dumpversion`` was
# an alias for ``--version``)
if len(version_string) <= 20:
# Try to find a valid version string
m = re.search(r'([0-9.]+)', version_string)
if m:
# g77 provides a longer version string that starts with GNU
# Fortran
if version_string.startswith('GNU Fortran'):
return ('g77', m.group(1))
# gfortran only outputs a version string such as #.#.#, so check
# if the match is at the start of the string
elif m.start() == 0:
return ('gfortran', m.group(1))
else:
# Output probably from --version, try harder:
m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string)
if m:
return ('gfortran', m.group(1))
m = re.search(
r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string)
if m:
v = m.group(1)
if v.startswith('0') or v.startswith('2') or v.startswith('3'):
# the '0' is for early g77's
return ('g77', v)
else:
# at some point in the 4.x series, the ' 95' was dropped
# from the version string
return ('gfortran', v)
# If still nothing, raise an error to make the problem easy to find.
err = 'A valid Fortran version was not found in this string:\n'
raise ValueError(err + version_string)
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'g77':
return None
return v[1]
possible_executables = ['g77', 'f77']
executables = {
'version_cmd' : [None, "-dumpversion"],
'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"],
'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes
'compiler_fix' : None,
'linker_so' : [None, "-g", "-Wall"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-g", "-Wall"]
}
module_dir_switch = None
module_include_switch = None
# Cygwin: f771: warning: -fPIC ignored for target (all code is
# position independent)
if os.name != 'nt' and sys.platform != 'cygwin':
pic_flags = ['-fPIC']
# use -mno-cygwin for g77 when Python is not Cygwin-Python
if sys.platform == 'win32':
for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']:
executables[key].append('-mno-cygwin')
g2c = 'g2c'
suggested_f90_compiler = 'gnu95'
def get_flags_linker_so(self):
opt = self.linker_so[1:]
if sys.platform == 'darwin':
target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None)
# If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value
# and leave it alone. But, distutils will complain if the
# environment's value is different from the one in the Python
# Makefile used to build Python. We let disutils handle this
# error checking.
if not target:
# If MACOSX_DEPLOYMENT_TARGET is not set in the environment,
# we try to get it first from sysconfig and then
# fall back to setting it to 10.9 This is a reasonable default
# even when using the official Python dist and those derived
# from it.
import sysconfig
target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if not target:
target = '10.9'
s = f'Env. variable MACOSX_DEPLOYMENT_TARGET set to {target}'
warnings.warn(s, stacklevel=2)
os.environ['MACOSX_DEPLOYMENT_TARGET'] = str(target)
opt.extend(['-undefined', 'dynamic_lookup', '-bundle'])
else:
opt.append("-shared")
if sys.platform.startswith('sunos'):
# SunOS often has dynamically loaded symbols defined in the
# static library libg2c.a The linker doesn't like this. To
# ignore the problem, use the -mimpure-text flag. It isn't
# the safest thing, but seems to work. 'man gcc' says:
# ".. Instead of using -mimpure-text, you should compile all
# source code with -fpic or -fPIC."
opt.append('-mimpure-text')
return opt
def get_libgcc_dir(self):
try:
output = subprocess.check_output(self.compiler_f77 +
['-print-libgcc-file-name'])
except (OSError, subprocess.CalledProcessError):
pass
else:
output = filepath_from_subprocess_output(output)
return os.path.dirname(output)
return None
def get_libgfortran_dir(self):
if sys.platform[:5] == 'linux':
libgfortran_name = 'libgfortran.so'
elif sys.platform == 'darwin':
libgfortran_name = 'libgfortran.dylib'
else:
libgfortran_name = None
libgfortran_dir = None
if libgfortran_name:
find_lib_arg = ['-print-file-name={0}'.format(libgfortran_name)]
try:
output = subprocess.check_output(
self.compiler_f77 + find_lib_arg)
except (OSError, subprocess.CalledProcessError):
pass
else:
output = filepath_from_subprocess_output(output)
libgfortran_dir = os.path.dirname(output)
return libgfortran_dir
def get_library_dirs(self):
opt = []
if sys.platform[:5] != 'linux':
d = self.get_libgcc_dir()
if d:
# if windows and not cygwin, libg2c lies in a different folder
if sys.platform == 'win32' and not d.startswith('/usr/lib'):
d = os.path.normpath(d)
path = os.path.join(d, "lib%s.a" % self.g2c)
if not os.path.exists(path):
root = os.path.join(d, *((os.pardir, ) * 4))
d2 = os.path.abspath(os.path.join(root, 'lib'))
path = os.path.join(d2, "lib%s.a" % self.g2c)
if os.path.exists(path):
opt.append(d2)
opt.append(d)
# For Macports / Linux, libgfortran and libgcc are not co-located
lib_gfortran_dir = self.get_libgfortran_dir()
if lib_gfortran_dir:
opt.append(lib_gfortran_dir)
return opt
def get_libraries(self):
opt = []
d = self.get_libgcc_dir()
if d is not None:
g2c = self.g2c + '-pic'
f = self.static_lib_format % (g2c, self.static_lib_extension)
if not os.path.isfile(os.path.join(d, f)):
g2c = self.g2c
else:
g2c = self.g2c
if g2c is not None:
opt.append(g2c)
c_compiler = self.c_compiler
if sys.platform == 'win32' and c_compiler and \
c_compiler.compiler_type == 'msvc':
opt.append('gcc')
if sys.platform == 'darwin':
opt.append('cc_dynamic')
return opt
def get_flags_debug(self):
return ['-g']
def get_flags_opt(self):
v = self.get_version()
if v and v <= '3.3.3':
# With this compiler version building Fortran BLAS/LAPACK
# with -O3 caused failures in lib.lapack heevr,syevr tests.
opt = ['-O2']
else:
opt = ['-O3']
opt.append('-funroll-loops')
return opt
def _c_arch_flags(self):
""" Return detected arch flags from CFLAGS """
import sysconfig
try:
cflags = sysconfig.get_config_vars()['CFLAGS']
except KeyError:
return []
arch_re = re.compile(r"-arch\s+(\w+)")
arch_flags = []
for arch in arch_re.findall(cflags):
arch_flags += ['-arch', arch]
return arch_flags
def get_flags_arch(self):
return []
def runtime_library_dir_option(self, dir):
if sys.platform == 'win32':
# Linux/Solaris/Unix support RPATH, Windows does not
raise NotImplementedError
# TODO: could use -Xlinker here, if it's supported
assert "," not in dir
if sys.platform == 'darwin':
return f'-Wl,-rpath,{dir}'
elif sys.platform[:3] == 'aix':
# AIX RPATH is called LIBPATH
return f'-Wl,-blibpath:{dir}'
else:
return f'-Wl,-rpath={dir}'
class Gnu95FCompiler(GnuFCompiler):
compiler_type = 'gnu95'
compiler_aliases = ('gfortran', )
description = 'GNU Fortran 95 compiler'
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'gfortran':
return None
v = v[1]
if LooseVersion(v) >= "4":
# gcc-4 series releases do not support -mno-cygwin option
pass
else:
# use -mno-cygwin flag for gfortran when Python is not
# Cygwin-Python
if sys.platform == 'win32':
for key in [
'version_cmd', 'compiler_f77', 'compiler_f90',
'compiler_fix', 'linker_so', 'linker_exe'
]:
self.executables[key].append('-mno-cygwin')
return v
possible_executables = ['gfortran', 'f95']
executables = {
'version_cmd' : ["<F90>", "-dumpversion"],
'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form",
"-fno-second-underscore"],
'compiler_f90' : [None, "-Wall", "-g",
"-fno-second-underscore"],
'compiler_fix' : [None, "-Wall", "-g","-ffixed-form",
"-fno-second-underscore"],
'linker_so' : ["<F90>", "-Wall", "-g"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-Wall"]
}
module_dir_switch = '-J'
module_include_switch = '-I'
if sys.platform[:3] == 'aix':
executables['linker_so'].append('-lpthread')
if platform.architecture()[0][:2] == '64':
for key in ['compiler_f77', 'compiler_f90','compiler_fix','linker_so', 'linker_exe']:
executables[key].append('-maix64')
g2c = 'gfortran'
def _universal_flags(self, cmd):
"""Return a list of -arch flags for every supported architecture."""
if not sys.platform == 'darwin':
return []
arch_flags = []
# get arches the C compiler gets.
c_archs = self._c_arch_flags()
if "i386" in c_archs:
c_archs[c_archs.index("i386")] = "i686"
# check the arches the Fortran compiler supports, and compare with
# arch flags from C compiler
for arch in ["ppc", "i686", "x86_64", "ppc64"]:
if _can_target(cmd, arch) and arch in c_archs:
arch_flags.extend(["-arch", arch])
return arch_flags
def get_flags(self):
flags = GnuFCompiler.get_flags(self)
arch_flags = self._universal_flags(self.compiler_f90)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_flags_linker_so(self):
flags = GnuFCompiler.get_flags_linker_so(self)
arch_flags = self._universal_flags(self.linker_so)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_library_dirs(self):
opt = GnuFCompiler.get_library_dirs(self)
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
target = self.get_target()
if target:
d = os.path.normpath(self.get_libgcc_dir())
root = os.path.join(d, *((os.pardir, ) * 4))
path = os.path.join(root, "lib")
mingwdir = os.path.normpath(path)
if os.path.exists(os.path.join(mingwdir, "libmingwex.a")):
opt.append(mingwdir)
# For Macports / Linux, libgfortran and libgcc are not co-located
lib_gfortran_dir = self.get_libgfortran_dir()
if lib_gfortran_dir:
opt.append(lib_gfortran_dir)
return opt
def get_libraries(self):
opt = GnuFCompiler.get_libraries(self)
if sys.platform == 'darwin':
opt.remove('cc_dynamic')
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
if "gcc" in opt:
i = opt.index("gcc")
opt.insert(i + 1, "mingwex")
opt.insert(i + 1, "mingw32")
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
return []
else:
pass
return opt
def get_target(self):
try:
output = subprocess.check_output(self.compiler_f77 + ['-v'])
except (OSError, subprocess.CalledProcessError):
pass
else:
output = filepath_from_subprocess_output(output)
m = TARGET_R.search(output)
if m:
return m.group(1)
return ""
def _hash_files(self, filenames):
h = hashlib.sha1()
for fn in filenames:
with open(fn, 'rb') as f:
while True:
block = f.read(131072)
if not block:
break
h.update(block)
text = base64.b32encode(h.digest())
text = text.decode('ascii')
return text.rstrip('=')
def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir,
chained_dlls, is_archive):
"""Create a wrapper shared library for the given objects
Return an MSVC-compatible lib
"""
c_compiler = self.c_compiler
if c_compiler.compiler_type != "msvc":
raise ValueError("This method only supports MSVC")
object_hash = self._hash_files(list(objects) + list(chained_dlls))
if is_win64():
tag = 'win_amd64'
else:
tag = 'win32'
basename = 'lib' + os.path.splitext(
os.path.basename(objects[0]))[0][:8]
root_name = basename + '.' + object_hash + '.gfortran-' + tag
dll_name = root_name + '.dll'
def_name = root_name + '.def'
lib_name = root_name + '.lib'
dll_path = os.path.join(extra_dll_dir, dll_name)
def_path = os.path.join(output_dir, def_name)
lib_path = os.path.join(output_dir, lib_name)
if os.path.isfile(lib_path):
# Nothing to do
return lib_path, dll_path
if is_archive:
objects = (["-Wl,--whole-archive"] + list(objects) +
["-Wl,--no-whole-archive"])
self.link_shared_object(
objects,
dll_name,
output_dir=extra_dll_dir,
extra_postargs=list(chained_dlls) + [
'-Wl,--allow-multiple-definition',
'-Wl,--output-def,' + def_path,
'-Wl,--export-all-symbols',
'-Wl,--enable-auto-import',
'-static',
'-mlong-double-64',
])
# No PowerPC!
if is_win64():
specifier = '/MACHINE:X64'
else:
specifier = '/MACHINE:X86'
# MSVC specific code
lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier]
if not c_compiler.initialized:
c_compiler.initialize()
c_compiler.spawn([c_compiler.lib] + lib_args)
return lib_path, dll_path
def can_ccompiler_link(self, compiler):
# MSVC cannot link objects compiled by GNU fortran
return compiler.compiler_type not in ("msvc", )
def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir):
"""
Convert a set of object files that are not compatible with the default
linker, to a file that is compatible.
"""
if self.c_compiler.compiler_type == "msvc":
# Compile a DLL and return the lib for the DLL as
# the object. Also keep track of previous DLLs that
# we have compiled so that we can link against them.
# If there are .a archives, assume they are self-contained
# static libraries, and build separate DLLs for each
archives = []
plain_objects = []
for obj in objects:
if obj.lower().endswith('.a'):
archives.append(obj)
else:
plain_objects.append(obj)
chained_libs = []
chained_dlls = []
for archive in archives[::-1]:
lib, dll = self._link_wrapper_lib(
[archive],
output_dir,
extra_dll_dir,
chained_dlls=chained_dlls,
is_archive=True)
chained_libs.insert(0, lib)
chained_dlls.insert(0, dll)
if not plain_objects:
return chained_libs
lib, dll = self._link_wrapper_lib(
plain_objects,
output_dir,
extra_dll_dir,
chained_dlls=chained_dlls,
is_archive=False)
return [lib] + chained_libs
else:
raise ValueError("Unsupported C compiler")
def _can_target(cmd, arch):
"""Return true if the architecture supports the -arch flag"""
newcmd = cmd[:]
fid, filename = tempfile.mkstemp(suffix=".f")
os.close(fid)
try:
d = os.path.dirname(filename)
output = os.path.splitext(filename)[0] + ".o"
try:
newcmd.extend(["-arch", arch, "-c", filename])
p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d)
p.communicate()
return p.returncode == 0
finally:
if os.path.exists(output):
os.remove(output)
finally:
os.remove(filename)
return False
if __name__ == '__main__':
from distutils import log
from numpy.distutils import customized_fcompiler
log.set_verbosity(2)
print(customized_fcompiler('gnu').get_version())
try:
print(customized_fcompiler('g95').get_version())
except Exception as e:
print(e)
| [
"honey.bhardwaj.18cse@bmu.edu.in"
] | honey.bhardwaj.18cse@bmu.edu.in |
d1034770b6e251685d6bebfce77b1a4c9de647a3 | d7ccb4225f623139995a7039f0981e89bf6365a4 | /.history/mall/settings_20211012161345.py | b5f7915f5c98d58a5332fffcaf4ae14c3dbc5b92 | [] | no_license | tonnymuchui/django-mall | 64fd4abc3725c1bd0a3dcf20b93b490fe9307b37 | 55c083d8433be3c77adc61939cd197902de4ce76 | refs/heads/master | 2023-08-23T04:59:20.418732 | 2021-10-13T15:59:37 | 2021-10-13T15:59:37 | 415,668,388 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,187 | py | """
Django settings for mall project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import django_heroku
import dj_database_url
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATE_DIR = os.path.join(BASE_DIR,"templates")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-#l0ij4e$3v@&xi3i#y$19f#_@z(yv+5yw$kc+02!-)g%ny%oi8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'category',
'accounts',
'store',
'carts',
'whitenoise.runserver_nostatic',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mall.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'category.context_processors.menu_links',
'carts.context_processors.counter',
],
},
},
]
WSGI_APPLICATION = 'mall.wsgi.application'
AUTH_USER_MODEL = 'accounts.Account'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
# STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# STATIC_ROOT = BASE_DIR / 'staticfiles'
# STATIC_ROOT = BASE_DIR /'static'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
# media files configuration
MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR /'media'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# WHITENOISE_USE_FINDERS = True
# STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Activate Django-Heroku.
# django_heroku.settings(locals()) | [
"tonykanyingah@gmail.com"
] | tonykanyingah@gmail.com |
6cc0eea8dd978ecbdfed41def4734537c2ada346 | 4bcb9dd0701dacd001cace60e0d6543c86b4ef4b | /PostYourFoodAd/migrations/0003_auto_20170422_1232.py | 709fc44f49897fcf4f4e28ae44ad572807f80baf | [] | no_license | kbidve/Hostelied_commit | 8576d317060021fec68cbcf5051b2bc32e461aba | 2d439ab979dcb38c9cfa156799d194ab50f964bf | refs/heads/master | 2021-01-22T20:34:52.827418 | 2017-05-19T14:46:45 | 2017-05-19T14:46:45 | 85,331,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,071 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-22 12:32
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('UserAdministrator', '0002_auto_20170328_2311'),
('PostYourFoodAd', '0002_auto_20170422_0828'),
]
operations = [
migrations.CreateModel(
name='Thali_Details',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vegetable', models.IntegerField(default=1)),
('roti', models.IntegerField(default=1)),
('dal', models.IntegerField(default=1)),
('rice', models.IntegerField(default=1)),
('salad', models.CharField(default='some string', max_length=200)),
],
),
migrations.AddField(
model_name='mess_info',
name='address',
field=models.CharField(default='some string', max_length=200),
),
migrations.AddField(
model_name='mess_info',
name='description',
field=models.CharField(default='some string', max_length=500),
),
migrations.AddField(
model_name='mess_info',
name='location',
field=django.contrib.gis.db.models.fields.PointField(blank=True, null=True, srid=4326),
),
migrations.AddField(
model_name='mess_info',
name='user_id',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='mess', to='UserAdministrator.UserInfo'),
),
migrations.AddField(
model_name='thali_details',
name='thali_details',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='thali_details', to='PostYourFoodAd.Mess_Info'),
),
]
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
fc3413eab8a3777a066cf659f7a1f321ad1121b6 | cb7c3673ad937c282a39be74d0aee8628e75928d | /tests/test_scripts/test_gen_jsonld.py | d38e0436a0414fd74dea9cbb7fa269db34e04ed9 | [
"CC0-1.0"
] | permissive | bpow/linkml | 649d6d48f39a8c51efa92fba7eb25c1d8854b472 | ab83c0caee9c02457ea5a748e284dee6b547fcd6 | refs/heads/main | 2023-05-05T18:46:04.501897 | 2021-05-13T21:17:03 | 2021-05-13T21:17:03 | 371,163,928 | 0 | 0 | CC0-1.0 | 2021-05-26T20:42:13 | 2021-05-26T20:42:12 | null | UTF-8 | Python | false | false | 4,620 | py | import os
import re
import unittest
# This has to occur post ClickTestCase
from functools import reduce
from typing import List, Tuple
import click
from rdflib import Graph, URIRef
from linkml import METAMODEL_NAMESPACE
from linkml.generators.jsonldcontextgen import ContextGenerator
from linkml.generators import jsonldgen
from tests.test_scripts.environment import env
from tests.utils.clicktestcase import ClickTestCase
cwd = os.path.dirname(__file__)
meta_context = 'file:./output/gencontext/meta.jsonld'
repl: List[Tuple[str, str]] = [
(r'"source_file_size": [0-9]+', ''),
(r'"source_file_date": "[^"]+"', ''),
(r'"generation_date": "[^"]+"', ''),
(r'"source_file": "[^"]+"', '')
]
def filtr(txt: str) -> str:
return reduce(lambda s, expr: re.sub(expr[0], expr[1], s), repl, txt)
class GenJSONLDTestCase(ClickTestCase):
testdir = "genjsonld"
click_ep = jsonldgen.cli
prog_name = "gen-jsonld"
env = env
def test_help(self):
self.do_test("--help", 'help')
def test_meta(self):
self.temp_file_path('meta.jsonld')
self.do_test(f"--context {meta_context}", 'meta.jsonld', filtr=filtr)
self.do_test(f'-f jsonld --context {meta_context}', 'meta.jsonld', filtr=filtr)
self.do_test(f'-f xsv --context {meta_context}', 'meta_error',
expected_error=click.exceptions.BadParameter)
def check_size(self, g: Graph, g2: Graph, root: URIRef, expected_classes: int, expected_slots: int,
expected_types: int, expected_subsets: int, expected_enums: int, model: str) -> None:
"""
Check
:param g:
:param g2:
:param root:
:param expected_classes:
:param expected_slots:
:param expected_types:
:param expected_subsets:
:param expected_enums:
:param model:
:return:
"""
for graph in [g, g2]:
n_classes = len(list(graph.objects(root, METAMODEL_NAMESPACE.classes)))
n_slots = len(list(graph.objects(root, METAMODEL_NAMESPACE.slots)))
n_types = len(list(graph.objects(root, METAMODEL_NAMESPACE.types)))
n_subsets = len(list(graph.objects(root, METAMODEL_NAMESPACE.subsets)))
n_enums = len(list(graph.objects(root, METAMODEL_NAMESPACE.enums)))
self.assertEqual(expected_classes, n_classes, f"Expected {expected_classes} classes in {model}")
self.assertEqual(expected_slots, n_slots, f"Expected {expected_slots} slots in {model}")
self.assertEqual(expected_types, n_types, f"Expected {expected_types} types in {model}")
self.assertEqual(expected_subsets, n_subsets, f"Expected {expected_subsets} subsets in {model}")
self.assertEqual(expected_enums, n_enums, f"Expected {expected_enums} enums in {model}")
def test_meta_output(self):
""" Generate a context AND a jsonld for the metamodel and make sure it parses as RDF """
tmp_jsonld_path = self.temp_file_path('metajson.jsonld')
tmp_rdf_path = self.temp_file_path('metardf.ttl')
tmp_meta_context_path = self.temp_file_path('metacontext.jsonld')
# Generate an image of the metamodel
gen = ContextGenerator(env.meta_yaml, importmap=env.import_map)
base = gen.namespaces[gen.schema.default_prefix]
if str(base)[-1] not in '/#':
base += '/'
schema = base + "meta"
# Generate context
with open(tmp_meta_context_path, 'w') as tfile:
tfile.write(gen.serialize())
# Generate JSON
with open(tmp_jsonld_path, 'w') as tfile:
tfile.write(jsonldgen.JSONLDGenerator(env.meta_yaml, fmt=jsonldgen.JSONLDGenerator.valid_formats[0],
importmap=env.import_map).serialize(context=tmp_meta_context_path))
# Convert JSON to TTL
g = Graph()
g.load(tmp_jsonld_path, format="json-ld")
g.serialize(tmp_rdf_path, format="ttl")
g.bind('meta', METAMODEL_NAMESPACE)
new_ttl = g.serialize(format="turtle").decode()
# Make sure that the generated TTL matches the JSON-LD (probably not really needed, as this is more of a test
# of rdflib than our tooling but it doesn't hurt
new_g = Graph()
new_g.parse(data=new_ttl, format="turtle")
# Make sure that both match the expected size (classes, slots, types, and model name for error reporting)
self.check_size(g, new_g, URIRef(schema), 17, 122, 14, 1, 1, "meta")
if __name__ == '__main__':
unittest.main()
| [
"solbrig@jhu.edu"
] | solbrig@jhu.edu |
a063a23ba0ef1a9b820232f028f24b1b463d0067 | a6fd4108016deab24ed5e4d528a4c009b597bf01 | /info_manager/migrations/0005_auto_20190927_1057.py | db9f59b6a8145a322760a05e004c10fbf5330d38 | [
"Apache-2.0"
] | permissive | UniversitaDellaCalabria/info-manager | 2da8f4087ed180c05a710a7d17dff7e71814dac9 | f4be641467f65d591ec28bbdf6ed681cdfed0e80 | refs/heads/master | 2022-12-04T17:56:56.349256 | 2022-01-05T22:50:12 | 2022-01-05T22:50:12 | 198,625,639 | 1 | 0 | Apache-2.0 | 2022-11-22T09:47:21 | 2019-07-24T11:51:11 | Python | UTF-8 | Python | false | false | 648 | py | # Generated by Django 2.2.2 on 2019-09-27 08:57
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('info_manager', '0004_itemtranslation'),
]
operations = [
migrations.AddField(
model_name='item',
name='created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='item',
name='modified',
field=models.DateTimeField(auto_now=True),
),
]
| [
"giuseppe.demarco@unical.it"
] | giuseppe.demarco@unical.it |
a9947e37c78fdb42ea17249f91dbf7f64fc41328 | a708f1d36586d2b01c99f2cb44aa4612b10192f6 | /bfs/733图像渲染.py | 7dabfbbb5f104bfb03e623af54d96f324c7e0529 | [] | no_license | LeopoldACC/Algorithm | 2477e8a371e9cdc5a47b582ca2a454539b96071e | fc1b0bec0e28d31e9a6ff722b3a66eacb0278148 | refs/heads/master | 2023-01-25T02:28:14.422447 | 2020-12-03T15:01:10 | 2020-12-03T15:01:10 | 197,297,197 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | class Solution:
def floodFill(self, image: List[List[int]], sr: int, sc: int, newColor: int) -> List[List[int]]:
if not image:
return image
dirs = [(-1,0),(1,0),(0,-1),(0,1)]
q = collections.deque([(sr,sc)])
ori = image[sr][sc]
m,n = len(image),len(image[0])
visit = set()
while q:
x,y = q.popleft()
image[x][y] = newColor
visit.add((x,y))
for dx,dy in dirs:
nx,ny = x+dx,y+dy
if 0<=nx<m and 0<=ny<n and image[nx][ny] == ori and (nx,ny) not in visit:
q.append((nx,ny))
return image
| [
"zhenggong9831@gmail.com"
] | zhenggong9831@gmail.com |
99fc9c9e5797d077f8302e1cc617697317de6463 | e23778e9cbedd6a9fcd28eb4263dab33da4a2d5a | /clowningaround/urls.py | 80d331a8cd46688fc9c8832a1a1843527b1e6924 | [] | no_license | eustone/clowingaround | 92667e2580c80dddf4489a1724d44e8c9823e215 | 1723d9aa5645510f711f0ab6478174b0aa66d84e | refs/heads/master | 2022-12-09T02:35:24.270583 | 2020-04-03T07:08:38 | 2020-04-03T07:08:38 | 252,653,240 | 0 | 0 | null | 2022-12-08T03:58:11 | 2020-04-03T06:44:31 | Python | UTF-8 | Python | false | false | 1,065 | py | """clowningaround URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include,path
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('allauth.urls')),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('api/v1/appointments/', include('appointments.urls', namespace='appointments')),
path('auth/', include('djoser.urls')),
path('', include('users.urls')),
]
| [
"emalaya@gmail.com"
] | emalaya@gmail.com |
fc54124378398c17d473cfeb673ecf5b28789142 | cdf4e1b9db499766780d28762f6c71ac2e438a90 | /getting_file_dates.py | fff141b456cacfb572b9bc589156326889adbecf | [] | no_license | sa-i/20200414py3interm | cd33af18bbb6b4ac98308905244cf71b014666e9 | 6bcb0a871f3894f4eb1a53b792ea5cc849d302e7 | refs/heads/master | 2022-05-09T08:52:11.242286 | 2020-04-17T21:57:45 | 2020-04-17T21:57:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | #!/usr/bin/env python
import os
from datetime import datetime
for file_name in os.listdir():
if file_name.endswith('.py'):
raw_file_timestamp = os.path.getmtime(file_name)
file_timestamp = datetime.fromtimestamp(raw_file_timestamp)
print(file_timestamp.date(), file_name)
| [
"jstrickler@gmail.com"
] | jstrickler@gmail.com |
b8e48ca6481e0eb739d7e2645271b4d774c19b9a | 27c04f9daf823c851bef35f91d261c677295d00c | /backend/asgi.py | 185382269e27551e31283442e62bb9840e7e8111 | [] | no_license | wlgud0402/crawling_estate_stock_backend | 865e6ed8811892c0823a8ac2cd11dba45dcdca2e | dfe2e06ccc3e993fd8cf9fe641d1aed097e375ad | refs/heads/master | 2023-04-29T16:13:03.224377 | 2021-05-25T15:48:57 | 2021-05-25T15:48:57 | 349,053,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
application = get_asgi_application()
| [
"wlgudrlgus@naver.com"
] | wlgudrlgus@naver.com |
dac54cc515d226209428a3bc5cc34eb7813b2419 | 8eab8ab725c2132bb8d090cdb2d23a5f71945249 | /virt/Lib/site-packages/jedi/third_party/typeshed/third_party/2/tornado/httpclient.pyi | 16bdb2856786d54de4bf56a7c7a95877725004ff | [
"MIT",
"Apache-2.0"
] | permissive | JoaoSevergnini/metalpy | 6c88a413a82bc25edd9308b8490a76fae8dd76ca | c2d0098a309b6ce8c756ff840bfb53fb291747b6 | refs/heads/main | 2023-04-18T17:25:26.474485 | 2022-09-18T20:44:45 | 2022-09-18T20:44:45 | 474,773,752 | 3 | 1 | MIT | 2022-11-03T20:07:50 | 2022-03-27T22:21:01 | Python | UTF-8 | Python | false | false | 3,219 | pyi | from typing import Any
from tornado.util import Configurable
class HTTPClient:
def __init__(self, async_client_class=..., **kwargs) -> None: ...
def __del__(self): ...
def close(self): ...
def fetch(self, request, **kwargs): ...
class AsyncHTTPClient(Configurable):
@classmethod
def configurable_base(cls): ...
@classmethod
def configurable_default(cls): ...
def __new__(cls, io_loop=..., force_instance=..., **kwargs): ...
io_loop: Any
defaults: Any
def initialize(self, io_loop, defaults=...): ...
def close(self): ...
def fetch(self, request, callback=..., raise_error=..., **kwargs): ...
def fetch_impl(self, request, callback): ...
@classmethod
def configure(cls, impl, **kwargs): ...
class HTTPRequest:
proxy_host: Any
proxy_port: Any
proxy_username: Any
proxy_password: Any
url: Any
method: Any
body_producer: Any
auth_username: Any
auth_password: Any
auth_mode: Any
connect_timeout: Any
request_timeout: Any
follow_redirects: Any
max_redirects: Any
user_agent: Any
decompress_response: Any
network_interface: Any
streaming_callback: Any
header_callback: Any
prepare_curl_callback: Any
allow_nonstandard_methods: Any
validate_cert: Any
ca_certs: Any
allow_ipv6: Any
client_key: Any
client_cert: Any
ssl_options: Any
expect_100_continue: Any
start_time: Any
def __init__(
self,
url,
method=...,
headers=...,
body=...,
auth_username=...,
auth_password=...,
auth_mode=...,
connect_timeout=...,
request_timeout=...,
if_modified_since=...,
follow_redirects=...,
max_redirects=...,
user_agent=...,
use_gzip=...,
network_interface=...,
streaming_callback=...,
header_callback=...,
prepare_curl_callback=...,
proxy_host=...,
proxy_port=...,
proxy_username=...,
proxy_password=...,
allow_nonstandard_methods=...,
validate_cert=...,
ca_certs=...,
allow_ipv6=...,
client_key=...,
client_cert=...,
body_producer=...,
expect_100_continue=...,
decompress_response=...,
ssl_options=...,
) -> None: ...
@property
def headers(self): ...
@headers.setter
def headers(self, value): ...
@property
def body(self): ...
@body.setter
def body(self, value): ...
class HTTPResponse:
request: Any
code: Any
reason: Any
headers: Any
buffer: Any
effective_url: Any
error: Any
request_time: Any
time_info: Any
def __init__(
self, request, code, headers=..., buffer=..., effective_url=..., error=..., request_time=..., time_info=..., reason=...
) -> None: ...
body: bytes
def rethrow(self): ...
class HTTPError(Exception):
code: Any
response: Any
def __init__(self, code, message=..., response=...) -> None: ...
class _RequestProxy:
request: Any
defaults: Any
def __init__(self, request, defaults) -> None: ...
def __getattr__(self, name): ...
def main(): ...
| [
"joao.a.severgnini@gmail.com"
] | joao.a.severgnini@gmail.com |
162669aaa9caa5ed418da539dc6c63763eae311f | 61361b4338c7434ae5fcfb225ef327c0e5bc7b5e | /etsydb/etsy/migrations/0003_auto_20151206_0002.py | b89556d6baba547e97cf83f04c93ade61426a01d | [] | no_license | mdamien/scrapy-etsy | a253284f6bcfc1cd04f551ddfb6d0cdb9bf0a9c3 | 3c379b4dafa181f11007c2e6902bb2689140a335 | refs/heads/master | 2021-01-10T07:31:03.894077 | 2016-01-23T22:32:14 | 2016-01-23T22:32:14 | 47,045,325 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-06 00:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('etsy', '0002_auto_20151206_0001'),
]
operations = [
migrations.AlterField(
model_name='product',
name='rating_count',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='product',
name='rating_score',
field=models.FloatField(null=True),
),
]
| [
"damien@dam.io"
] | damien@dam.io |
803f20b222f2a786b77c9b6d52e761c321aa2191 | 5ea136ca2e8066b77b39afdf15e3d0e6bc74761f | /scripts/generate-solve-files.py | dd2e19d28cb4d8b659ccf6ea89552ca96714641b | [] | no_license | reneang17/ttbar | 4023421846a70c22c13a2962520f7723ad35636b | 75f4fff1b5d79af097ea04aab437e2963215a232 | refs/heads/master | 2020-06-12T15:07:11.918815 | 2019-06-28T22:24:07 | 2019-06-28T22:24:07 | 194,339,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,010 | py | #!/usr/bin/env python3
#
# todo:
#
import argparse
import os
import re
import pickle
import stat
import pyIdSolver
parser = argparse.ArgumentParser()
parser.add_argument("file", \
help="list of prototypes to be solved")
parser.add_argument("--num", "-n", action="store_true",\
help="reduce with numerical values")
args = parser.parse_args()
#-------------------------------------------------------------------------------
# Generate batch scripts to be run on cluster
#-------------------------------------------------------------------------------
def generate_batch_files(proto_list, parstr):
proto_re = re.compile('PR')
proto_re = re.compile('PR(\d+)')
# iterate over integral files (hence effectively the integrals to solve)
for i in range(0,len(proto_list)):
PRname = "PR{0}".format(proto_list[i])
fname = "batch-integral{0}.sh".format(i)
# determine list of required databases
matching_file_list = []
matchingsfile = "{0}matchings".format(PRname)
db_numbers = []
with open(matchingsfile) as fh:
tmpstr = fh.read()
db_numbers = proto_re.findall(tmpstr)
# content
content = "#\n"
content += "INTEGRALFILE=tmpintegral{0}\n".format(i)
content += "OUTFILE=out{0}\n".format(i)
content += "STDOUTFILE=stdout{0}\n".format(i)
content += "\n"
content += "DIR=/afs/cern.ch/user/s/sapeta/workspace/work/ttbar/idsolver/nnlo\n"
content += "\n"
content += "cp $DIR/solve_integrals .\n"
content += "cp $DIR/$INTEGRALFILE .\n"
for dbnum in db_numbers:
content += "cp $DIR/idPR{0}.dat .\n".format(dbnum)
content += "cp $DIR/PR{0}inc.dat .\n".format(dbnum)
content += "\n"
content += "form() {\n"
content += " /afs/cern.ch/user/s/sapeta/local/bin/form \"$@\"\n"
content += " }\n"
content += "export -f form\n"
content += "\n"
content += "time $DIR/solve_integrals $INTEGRALFILE $OUTFILE {0} > $STDOUTFILE\n".\
format(parstr)
content += "\n"
content += "cp $INTEGRALFILE $DIR/\n"
content += "cp $OUTFILE $DIR/\n"
content += "cp $STDOUTFILE $DIR/\n"
with open(fname, "w") as batchfile:
batchfile.write(content)
os.chmod(fname, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
def split_integrals_into_sublists(file):
integrals = []
prototype_numbers = []
proto_re = re.compile('PR(\d+)')
with open(file) as fh:
integrals = fh.read().split()
#sublists = \
#[integrals[x:x+args.npack] for x in range(0,len(integrals),args.npack)]
for i in range(0,len(integrals)):
fname = "tmpintegral{0}".format(i)
print(fname, integrals[i])
with open(fname, "w") as integral_file:
n = int(proto_re.findall(integrals[i])[0])
prototype_numbers.append(n)
integral_file.write(integrals[i]+"\n")
return prototype_numbers
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# MAIN PART OF THE SCRIPT
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
if __name__ == '__main__':
parstr = "ep ap b c qT2"
if args.num:
parstr = "ep=1/2749 ap=1/3089 b=1/1453 c=1/2857 qT2=1/3557"
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
proto_list = split_integrals_into_sublists(args.file)
#-----------------------------------------------------------------------------
# generate batch run files
#-----------------------------------------------------------------------------
generate_batch_files(proto_list,parstr)
| [
"reneang17@gmail.com"
] | reneang17@gmail.com |
908924e7f0fc7872418cbaa367802ed95561b18e | b08b7b47124c7cbc0c3afab6f55f727a7d2bd059 | /src/utils/common.py | 257b0d4a53bf6b8eb18da8f97e1aa53c9b10cb7f | [] | no_license | bigshanedogg/kaggle-jigsaw-competition | 356bb33ecd105bdd9431be0fb5e1fc5b4f8ae58f | 77ee7044f1f26525bdd128be437916c2a63924da | refs/heads/main | 2023-08-31T17:15:34.274167 | 2021-11-15T15:27:23 | 2021-11-15T15:27:23 | 428,316,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,185 | py | import os
import re
import json
import random
import shutil
import string
import numpy as np
import pandas as pd
import torch
from datetime import datetime
from soynlp.normalizer import repeat_normalize
def show_five_nums(data, verbose=True):
quartiles = np.percentile(data, [25, 50, 75])
min_v = np.min(data)
max_v = np.max(data)
avg = np.mean(data)
if verbose:
print("Min: {min_v:.3f}\tMax: {max_v:.3f}\tAvg: {avg:.3f}\tQ1: {q1:.3f}\tQ2: {q2:.3f}\tQ3: {q3:.3f}".format(min_v=min_v, max_v=max_v, avg=avg, q1=quartiles[0], q2=quartiles[1], q3=quartiles[2]))
return min_v, max_v, quartiles[0], quartiles[1], quartiles[2], avg
def set_device(obj, device):
def _set_model_device(model, device):
if device is None: device = "cpu"
if device == "cpu" or (isinstance(device, torch.device) and device.type == "cpu"):
non_blocking = False
else:
torch.cuda.set_device(device)
non_blocking = True
# set device
model = model.to(device, non_blocking=non_blocking)
message = "Setting model device: {device}".format(device=device)
print(message)
return model
def _set_optimizer_device(optimizer, device):
for param in optimizer.state.values():
if isinstance(param, torch.Tensor):
param.data = param.data.to(device)
if param._grad is not None:
param._grad.data = param._grad.data.to(device)
elif isinstance(param, dict):
for subparam in param.values():
if isinstance(subparam, torch.Tensor):
subparam.data = subparam.data.to(device)
if subparam._grad is not None:
subparam._grad.data = subparam._grad.data.to(device)
return optimizer
def _set_criterions_device(criterions, device):
if device is None:
device = "cpu"
# set device
for k, v in criterions.items():
v.set_device(device=device)
criterions[k] = v
message = "Setting criterions device: {device}".format(device=device)
print(message)
return criterions
if isinstance(obj, torch.nn.modules.Module):
obj = _set_model_device(model=obj, device=device)
elif isinstance(obj, torch.optim.Optimizer):
obj = _set_optimizer_device(optimizer=obj, device=device)
elif isinstance(obj, dict):
obj = _set_criterions_device(criterions=obj, device=device)
return obj
def convert_to_tensor(data, device):
# if isinstance(data, list):
# data = np.array(data)
# data = torch.from_numpy(data).to(device)
data = torch.tensor(data, dtype=torch.long).to(device)
return data
def convert_to_numpy(tensor: torch.Tensor):
tensor = tensor.cpu().detach().numpy()
return tensor
def is_primitive(obj):
return not hasattr(obj, '__dict__')
def get_now_str(str_format="%Y%m%d_%H%M%S"):
now_str = datetime.now().strftime(str_format)
return now_str
def get_nth_index(obj, value, n):
indices = [idx for idx, element in enumerate(obj) if element == value]
if len(indices) == 0 or len(indices) < n:
return -1
else:
return indices[n]
def get_last_index(obj, value):
indices = [idx for idx, element in enumerate(obj) if element == value]
if len(indices) == 0:
return -1
else:
return indices[-1]
def get_randint_except(low, high, except_value):
assert except_value in range(low, high), "'except_value' must be between low and high."
assert high > low, "high must be greater than low."
assert not (except_value == low and low + 1 == high), \
"except_value cannot be the only value in range: except_value:{ev}, low:{l}, high:{h".format(ev=except_value, l=low, h=high)
value = np.random.randint(low=low, high=high)
while value == except_value:
value = np.random.randint(low=low, high=high)
return value
def clean_text(text):
pattern = re.compile(f'[^ .,?!/@$%~%·∼()\x00-\x7Fㄱ-ㅣ가-힣]+')
text = pattern.sub(' ', text)
text = repeat_normalize(text, num_repeats=2)
text = text.strip()
return text
def shuffle_related_lists(lists):
is_equal_length = True
length = len(lists[0])
for i in range(1, len(lists)):
if length != len(lists[i]):
is_equal_length = False
break
assert is_equal_length, "All lists must have equal lengthes"
shuffled = [[] for _ in range(0, len(lists))]
indices = list(range(0, length))
random.shuffle(indices)
for idx in indices:
for i, _list in enumerate(lists):
shuffled[i].append(_list[idx])
return shuffled
def shuffle_dictionary_lists(dictionaries):
is_equal_length = True
length = -1
for dictionary in dictionaries:
for k,v in dictionary.items():
if length < 0: length = len(v)
if length != len(v):
is_equal_length = False
break
if not is_equal_length: break
assert is_equal_length, "All lists must have equal lengthes"
shuffled = [{k:[] for k,v in dictionary.items()} for dictionary in dictionaries]
indices = list(range(0, length))
random.shuffle(indices)
for dictionary_idx, dictionary in enumerate(dictionaries):
for k,v in dictionary.items():
shuffled[dictionary_idx][k] = [v[idx] for idx in indices]
return shuffled
def lambda_lr_transformer(current_step, num_warmup_steps, d_model):
arg1 = (current_step + 1) ** -0.5
arg2 = (current_step + 1) * (num_warmup_steps ** -1.5)
lr = (d_model ** -0.5) * min(arg1, arg2)
return lr
def read_all_files_from_dir(data_dir, extension, encoding="utf-8"):
data = []
file_path_list = os.listdir(data_dir)
for file_path in file_path_list:
if not file_path.endswith(extension): continue
with open(data_dir+file_path, "r", encoding=encoding) as fp:
if extension.endswith("txt"):
for row in fp:
data.append(row)
elif extension.endswith("json"):
rows = json.load(fp)
data += rows
return data
def is_empty_row_in_dict(data):
mask = [len(v)<=0 for k, v in data.items()]
flag = any(mask)
return flag
def is_valid_file(path):
if not os.path.exists(path): return False
if not os.path.isfile(path): return False
return True
def is_valid_path(path):
if not os.path.exists(path): return False
if not os.path.isdir(path): return False
return True
def init_path(path, reset=False):
if os.path.exists(path) and reset: shutil.rmtree(path)
if not os.path.exists(path): os.makedirs(path)
if not path.endswith("/"): path += "/"
return path
def is_cpu_device(device):
if isinstance(device, str) and device == "cpu":
return True
elif isinstance(device, torch.device) and device.type == "cpu":
return True
else:
return False
def get_device_index(device, default=0):
device_index = default
if is_cpu_device(device):
device_index = default
elif isinstance(device, torch.device):
device_index = device.index
elif isinstance(device, str):
if re.search("[a-zA-Z]", device) is not None:
device = re.sub("[^0-9]", "", device)
device_index = int(device)
elif isinstance(device, int):
device_index = device
return device_index
def get_available_devices():
devices = []
if torch.cuda.is_available():
num_cuda_devices = torch.cuda.device_count()
for device_index in range(0, num_cuda_devices):
try:
_capability = torch.cuda.get_device_capability(device_index)
device = "cuda:{idx}".format(idx=device_index)
devices.append(device)
except AssertionError as ex:
continue
return devices
def set_seed(seed):
'''Sets the seed of the entire notebook so results are the same every time we run.
This is for REPRODUCIBILITY.'''
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# When running on the CuDNN backend, two further options must be set
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Set a fixed value for the hash seed
os.environ['PYTHONHASHSEED'] = str(seed)
def read_file(file_path, extension, encoding):
rows = None
if extension == "txt":
rows = read_txt(path=file_path, encoding=encoding)
elif extension == "json":
rows = read_json(path=file_path, encoding=encoding)
elif extension == "csv":
rows = read_csv(path=file_path)
return rows
def read_txt(path, encoding):
data = []
with open(path, "r", encoding=encoding) as fp:
for row in fp:
data.append(row)
return data
def read_json(path, encoding):
data = []
with open(path, "r", encoding=encoding) as fp:
data = json.load(fp)
return data
def read_csv(path):
df = pd.read_csv(path)
data = df.values.tolist()
return data
def split_segment_by_turn(utterances, turn_ids):
sequence = []
segment = []
first_turn_id = turn_ids[0]
replied = False
for idx, (turn_id, utterance) in enumerate(zip(turn_ids, utterances)):
if replied and turn_id == first_turn_id:
sequence.append(segment)
segment = []
replied = False
segment.append(utterance)
if turn_id != first_turn_id: replied = True
sequence.append(segment)
return sequence
def get_top_n_values(values, top_n=5, descending=True):
output = [(idx, _score) for idx, _score in enumerate(values)]
if descending:
output = sorted(output, key=lambda x: -x[1])
else:
output = sorted(output, key=lambda x: x[1])
if top_n > 0:
output = output[:top_n]
return output
def get_hash_name(size=12, chars=string.ascii_lowercase + string.digits):
return ''.join(random.SystemRandom().choice(chars) for _ in range(size))
| [
"bigshane319@gmail.com"
] | bigshane319@gmail.com |
f4b5e75239b0d12d4143ea60ff0aa770e29c357b | 5b2c0019174094e38138004b0b8a23a04190e7a4 | /venv/Lib/site-packages/IPython/external/path/_path.py | 359185039f37c3b13a2093f9892c0987f70db226 | [
"BSD-3-Clause"
] | permissive | Kiiwi/Syssel | 70b500b4fdff4216006d27f1be79a444475b51b4 | 83705e3fd0edf40f09df950d5ce91c95586573f5 | refs/heads/master | 2022-11-08T19:31:09.660481 | 2016-05-19T21:54:30 | 2016-05-19T21:54:30 | 58,882,760 | 0 | 1 | BSD-3-Clause | 2022-10-24T11:26:42 | 2016-05-15T20:26:27 | Python | UTF-8 | Python | false | false | 39,855 | py | #
# Copyright (c) 2010 Mikhail Gusarov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
""" path.py - An object representing a path to a file or directory.
Original author:
Jason Orendorff <jason.orendorff\x40gmail\x2ecom>
Current maintainer:
Jason R. Coombs <jaraco@jaraco.com>
Contributors:
Mikhail Gusarov <dottedmag@dottedmag.net>
Marc Abramowitz <marc@marc-abramowitz.com>
Jason R. Coombs <jaraco@jaraco.com>
Jason Chu <jchu@xentac.net>
Vojislav Stojkovic <vstojkovic@syntertainment.com>
Example::
from path import path
d = path('/home/guido/bin')
for f in d.files('*.py'):
f.chmod(0755)
path.py requires Python 2.5 or later.
"""
from __future__ import with_statement
import sys
import warnings
import os
import fnmatch
import glob
import shutil
import codecs
import hashlib
import errno
import tempfile
import functools
import operator
import re
try:
import win32security
except ImportError:
pass
try:
import pwd
except ImportError:
pass
################################
# Monkey patchy python 3 support
try:
basestring
except NameError:
basestring = str
try:
unicode
except NameError:
unicode = str
try:
getcwdu = os.getcwdu
except AttributeError:
getcwdu = os.getcwd
if sys.version < '3':
def u(x):
return codecs.unicode_escape_decode(x)[0]
else:
def u(x):
return x
o777 = 511
o766 = 502
o666 = 438
o554 = 364
################################
__version__ = '4.3'
__all__ = ['path']
class TreeWalkWarning(Warning):
pass
def simple_cache(func):
"""
Save results for the 'using_module' classmethod.
When Python 3.2 is available, use functools.lru_cache instead.
"""
saved_results = {}
def wrapper(cls, module):
if module in saved_results:
return saved_results[module]
saved_results[module] = func(cls, module)
return saved_results[module]
return wrapper
class ClassProperty(property):
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
class multimethod(object):
"""
Acts like a classmethod when invoked from the class and like an
instancemethod when invoked from the instance.
"""
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
return (
functools.partial(self.func, owner) if instance is None
else functools.partial(self.func, owner, instance)
)
class path(unicode):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
module = os.path
"The path module to use for path operations."
def __init__(self, other=''):
if other is None:
raise TypeError("Invalid initial value for path: None")
@classmethod
@simple_cache
def using_module(cls, module):
subclass_name = cls.__name__ + '_' + module.__name__
bases = (cls,)
ns = {'module': module}
return type(subclass_name, bases, ns)
@ClassProperty
@classmethod
def _next_class(cls):
"""
What class should be used to construct new instances from this class
"""
return cls
# --- Special Python methods.
def __repr__(self):
return '%s(%s)' % (type(self).__name__, super(path, self).__repr__())
# Adding a path and a string yields a path.
def __add__(self, more):
try:
return self._next_class(super(path, self).__add__(more))
except TypeError: # Python bug
return NotImplemented
def __radd__(self, other):
if not isinstance(other, basestring):
return NotImplemented
return self._next_class(other.__add__(self))
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self._next_class(self.module.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def __enter__(self):
self._old_dir = self.getcwd()
os.chdir(self)
return self
def __exit__(self, *_):
os.chdir(self._old_dir)
@classmethod
def getcwd(cls):
""" Return the current working directory as a path object. """
return cls(getcwdu())
#
# --- Operations on path strings.
def abspath(self):
return self._next_class(self.module.abspath(self))
def normcase(self):
return self._next_class(self.module.normcase(self))
def normpath(self):
return self._next_class(self.module.normpath(self))
def realpath(self):
return self._next_class(self.module.realpath(self))
def expanduser(self):
return self._next_class(self.module.expanduser(self))
def expandvars(self):
return self._next_class(self.module.expandvars(self))
def dirname(self):
return self._next_class(self.module.dirname(self))
def basename(self):
return self._next_class(self.module.basename(self))
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
@property
def namebase(self):
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
"""
base, ext = self.module.splitext(self.name)
return base
@property
def ext(self):
""" The file extension, for example '.py'. """
f, ext = self.module.splitext(self)
return ext
@property
def drive(self):
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
"""
drive, r = self.module.splitdrive(self)
return self._next_class(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example,
path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = self.module.split(self)
return self._next_class(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = self.module.splitdrive(self)
return self._next_class(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = self.module.splitext(self)
return self._next_class(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
def splitunc(self):
unc, rest = self.module.splitunc(self)
return self._next_class(unc), rest
@property
def uncshare(self):
"""
The UNC mount point for this path.
This is empty for paths on local drives.
"""
unc, r = self.module.splitunc(self)
return self._next_class(unc)
@multimethod
def joinpath(cls, first, *others):
"""
Join first to zero or more path components, adding a separator
character (first.module.sep) if needed. Returns a new instance of
first._next_class.
"""
if not isinstance(first, cls):
first = cls(first)
return first._next_class(first.module.join(first, *others))
def splitall(self):
r""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, ``'/'`` or ``'C:\\'``). The other items in
the list will be strings.
``path.path.joinpath(*result)`` will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self, start='.'):
""" Return this path as a relative path,
based from start, which defaults to the current working directory.
"""
cwd = self._next_class(start)
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self._next_class(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != self.module.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != self.module.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = self.module.join(*segments)
return self._next_class(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see path.walkdirs).
With the optional 'pattern' argument, this only lists
directories whose names match the given pattern. For
example, ``d.dirs('build-*')``.
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see path.walkfiles).
With the optional 'pattern' argument, this only lists files
whose names match the given pattern. For example,
``d.files('*.pyc')``.
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional 'pattern' argument, this yields only
directories whose names match the given pattern. For
example, ``mydir.walkdirs('*test')`` yields only directories
with names ending in 'test'.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, pattern, limits the results to files
with names that match the pattern. For example,
``mydir.walkfiles('*.tmp')`` yields only files with the .tmp
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if self.name matches the given pattern.
pattern - A filename pattern with wildcards,
for example ``'*.py'``.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, path('/users').glob('*/bin/*') returns a list
of all the files users have in their bin directories.
"""
cls = self._next_class
return [cls(s) for s in glob.glob(self / pattern)]
#
# --- Reading or writing an entire file at once.
def open(self, *args, **kwargs):
""" Open this file. Return a file object. """
return open(self, *args, **kwargs)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
with self.open('rb') as f:
return f.read()
def chunks(self, size, *args, **kwargs):
""" Returns a generator yielding chunks of the file, so it can
be read piece by piece with a simple for loop.
Any argument you pass after `size` will be passed to `open()`.
:example:
>>> for chunk in path("file.txt").chunk(8192):
... print(chunk)
This will read the file by chunks of 8192 bytes.
"""
with open(self, *args, **kwargs) as f:
while True:
d = f.read(size)
if not d:
break
yield d
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
with self.open(mode) as f:
f.write(bytes)
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This method uses 'U' mode, so '\r\n' and '\r' are automatically
translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
with self.open('U') as f:
return f.read()
else:
# Unicode
with codecs.open(self, 'r', encoding, errors) as f:
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode.)
t = f.read()
return (t.replace(u('\r\n'), u('\n'))
.replace(u('\r\x85'), u('\n'))
.replace(u('\r'), u('\n'))
.replace(u('\x85'), u('\n'))
.replace(u('\u2028'), u('\n')))
def write_text(self, text, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or open(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u('\r\n'), u('\n'))
.replace(u('\r\x85'), u('\n'))
.replace(u('\r'), u('\n'))
.replace(u('\x85'), u('\n'))
.replace(u('\u2028'), u('\n')))
text = text.replace(u('\n'), linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode.
"""
if encoding is None and retain:
with self.open('U') as f:
return f.readlines()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
with self.open(mode) as f:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u('\r\n'), u('\x0d\x85')):
line = line[:-2]
elif line[-1:] in (u('\r'), u('\n'),
u('\x85'), u('\u2028')):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
def read_md5(self):
""" Calculate the md5 hash for this file.
This reads through the entire file.
"""
return self.read_hash('md5')
def _hash(self, hash_name):
""" Returns a hash object for the file at the current path.
`hash_name` should be a hash algo name such as 'md5' or 'sha1'
that's available in the `hashlib` module.
"""
m = hashlib.new(hash_name)
for chunk in self.chunks(8192):
m.update(chunk)
return m
def read_hash(self, hash_name):
""" Calculate given hash for this file.
List of supported hashes can be obtained from hashlib package. This
reads the entire file.
"""
return self._hash(hash_name).digest()
def read_hexhash(self, hash_name):
""" Calculate given hash for this file, returning hexdigest.
List of supported hashes can be obtained from hashlib package. This
reads the entire file.
"""
return self._hash(hash_name).hexdigest()
# --- Methods for querying the filesystem.
# N.B. On some platforms, the os.path functions may be implemented in C
# (e.g. isdir on Windows, Python 3.2.2), and compiled functions don't get
# bound. Playing it safe and wrapping them all in method calls.
def isabs(self):
return self.module.isabs(self)
def exists(self):
return self.module.exists(self)
def isdir(self):
return self.module.isdir(self)
def isfile(self):
return self.module.isfile(self)
def islink(self):
return self.module.islink(self)
def ismount(self):
return self.module.ismount(self)
def samefile(self, other):
return self.module.samefile(self, other)
def getatime(self):
return self.module.getatime(self)
atime = property(
getatime, None, None,
""" Last access time of the file. """)
def getmtime(self):
return self.module.getmtime(self)
mtime = property(
getmtime, None, None,
""" Last-modified time of the file. """)
def getctime(self):
return self.module.getctime(self)
ctime = property(
getctime, None, None,
""" Creation time of the file. """)
def getsize(self):
return self.module.getsize(self)
size = property(
getsize, None, None,
""" Size of the file, in bytes. """)
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def __get_owner_windows(self):
r"""
Return the name of the owner of this file or directory. Follow
symbolic links.
Return a name of the form ur'DOMAIN\User Name'; may be a group.
"""
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u('\\') + account
def __get_owner_unix(self):
"""
Return the name of the owner of this file or directory. Follow
symbolic links.
"""
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
def __get_owner_not_implemented(self):
raise NotImplementedError("Ownership not available on this platform.")
if 'win32security' in globals():
get_owner = __get_owner_windows
elif 'pwd' in globals():
get_owner = __get_owner_unix
else:
get_owner = __get_owner_not_implemented
owner = property(
get_owner, None, None,
""" Name of the owner of this file or directory. """)
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
#
# --- Modifying operations on files and directories
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
return self
def chmod(self, mode):
os.chmod(self, mode)
return self
if hasattr(os, 'chown'):
def chown(self, uid=-1, gid=-1):
os.chown(self, uid, gid)
return self
def rename(self, new):
os.rename(self, new)
return self._next_class(new)
def renames(self, new):
os.renames(self, new)
return self._next_class(new)
#
# --- Create/delete operations on directories
def mkdir(self, mode=o777):
os.mkdir(self, mode)
return self
def mkdir_p(self, mode=o777):
try:
self.mkdir(mode)
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.EEXIST:
raise
return self
def makedirs(self, mode=o777):
os.makedirs(self, mode)
return self
def makedirs_p(self, mode=o777):
try:
self.makedirs(mode)
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.EEXIST:
raise
return self
def rmdir(self):
os.rmdir(self)
return self
def rmdir_p(self):
try:
self.rmdir()
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST:
raise
return self
def removedirs(self):
os.removedirs(self)
return self
def removedirs_p(self):
try:
self.removedirs()
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST:
raise
return self
# --- Modifying operations on files
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, o666)
os.close(fd)
os.utime(self, None)
return self
def remove(self):
os.remove(self)
return self
def remove_p(self):
try:
self.unlink()
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.ENOENT:
raise
return self
def unlink(self):
os.unlink(self)
return self
def unlink_p(self):
self.remove_p()
return self
# --- Links
if hasattr(os, 'link'):
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
return self._next_class(newpath)
if hasattr(os, 'symlink'):
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
return self._next_class(newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self._next_class(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
#
# --- High-level functions from shutil
copyfile = shutil.copyfile
copymode = shutil.copymode
copystat = shutil.copystat
copy = shutil.copy
copy2 = shutil.copy2
copytree = shutil.copytree
if hasattr(shutil, 'move'):
move = shutil.move
rmtree = shutil.rmtree
def rmtree_p(self):
try:
self.rmtree()
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.ENOENT:
raise
return self
def chdir(self):
os.chdir(self)
cd = chdir
#
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
return self
class tempdir(path):
"""
A temporary directory via tempfile.mkdtemp, and constructed with the
same parameters that you can use as a context manager.
Example:
with tempdir() as d:
# do stuff with the path object "d"
# here the directory is deleted automatically
"""
@ClassProperty
@classmethod
def _next_class(cls):
return path
def __new__(cls, *args, **kwargs):
dirname = tempfile.mkdtemp(*args, **kwargs)
return super(tempdir, cls).__new__(cls, dirname)
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not exc_value:
self.rmtree()
def _permission_mask(mode):
"""
Convert a Unix chmod symbolic mode like 'ugo+rwx' to a function
suitable for applying to a mask to affect that change.
>>> mask = _permission_mask('ugo+rwx')
>>> oct(mask(o554))
'o777'
>>> oct(_permission_mask('gw-x')(o777))
'o766'
"""
parsed = re.match('(?P<who>[ugo]+)(?P<op>[-+])(?P<what>[rwx]+)$', mode)
if not parsed:
raise ValueError("Unrecognized symbolic mode", mode)
spec_map = dict(r=4, w=2, x=1)
spec = reduce(operator.or_, [spec_map[perm]
for perm in parsed.group('what')])
# now apply spec to each in who
shift_map = dict(u=6, g=3, o=0)
mask = reduce(operator.or_, [spec << shift_map[subj]
for subj in parsed.group('who')])
op = parsed.group('op')
# if op is -, invert the mask
if op == '-':
mask ^= o777
op_map = {'+': operator.or_, '-': operator.and_}
return functools.partial(op_map[op], mask)
| [
"tommy.lee.ryan@gmail.com"
] | tommy.lee.ryan@gmail.com |
80a8ad40352c21f370fe34ef623717fcd0fb0e12 | a6894d17fdbceb56d4364f0e279d03b16a181396 | /working-env/lib/python2.5/TurboGears-1.0.2.2-py2.5.egg/turbogears/i18n/data/gl_ES.py | cd66c622a6504223bf630350451cb5556c01c232 | [] | no_license | thraxil/gtreed | c1c5a19178c1f50ff5e61887b13ff7b004da1d25 | ca228848364edb204b15a7411fd6192379781c78 | refs/heads/master | 2020-04-18T03:02:15.468044 | 2008-12-10T20:02:12 | 2008-12-10T20:02:12 | 88,388 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | # Formatting configuration for locale gl_ES
languages={'gl': 'galego'}
countries={'ES': u'Espa\xf1a'}
months=['Xaneiro', 'Febreiro', 'Marzo', 'Abril', 'Maio', u'Xu\xf1o', 'Xullo', 'Agosto', 'Setembro', 'Outubro', 'Novembro', 'Decembro']
abbrMonths=['Xan', 'Feb', 'Mar', 'Abr', 'Mai', u'Xu\xf1', 'Xul', 'Ago', 'Set', 'Out', 'Nov', 'Dec']
days=['Luns', 'Martes', u'M\xe9rcores', 'Xoves', 'Venres', u'S\xe1bado', 'Domingo']
abbrDays=['Lun', 'Mar', u'M\xe9r', 'Xov', 'Ven', u'S\xe1b', 'Dom']
dateFormats={'medium': '%%(abbrmonthname)s %d,%y', 'full': '%%(dayname)s %d %%(monthname)s %Y', 'long': '%d %%(monthname)s %Y', 'short': '%d/%m/%y'}
numericSymbols={'group': '.', 'nativeZeroDigit': '0', 'exponential': 'E', 'perMille': u'\u2030', 'nan': u'\ufffd', 'decimal': ',', 'percentSign': '%', 'list': ';', 'patternDigit': '#', 'plusSign': '+', 'infinity': u'\u221e', 'minusSign': '-'} | [
"anders@columbia.edu"
] | anders@columbia.edu |
234fe0b3f16df5e57da1016934fe2f8075019def | 2a31c353c77acc1f134780d81d1be9d7eedfaf75 | /sampleapp/urls.py | 34eb58e25b776a19ee4244acc8478f05ce18ee7d | [] | no_license | CCCodes/Simple-Django-App | 9eb43cd1eca40f031fa4f592a97a632fe892521b | f65f2fecaaf352f01dd9ee6580fc017024eff6bd | refs/heads/master | 2020-03-22T02:35:27.957759 | 2018-07-02T02:23:32 | 2018-07-02T02:23:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | from django.conf.urls import url
from django.contrib import admin
from django.urls import path, include
from sampleapp import views
app_name = "sampleapp"
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^login/$', views.login, name='login'),
url(r'^login_failed/$', views.login_failed, name='login_failed'),
url(r'^login_submit/$', views.login_submit, name='login_submit'),
url(r'^select/$', views.select, name='select'),
url(r'^output/$', views.output, name='output')
]
| [
"caitlinchou@gmail.com"
] | caitlinchou@gmail.com |
d3f138c72c0428b9ca0d2101a5a8994f16946352 | 19b9b6062c491060a63078b5b9947deb4e4d132b | /Checker/SignalCheck.py | 2cc1b065fafc01542c5779027af26776930e7c0d | [] | no_license | JefferyPaul/StrategyAnalyzer | bcd8baed306ab68f829143025db40178181d229b | 2c7dfc8d615cc8433df0b1108ecbb39b929e0ddc | refs/heads/master | 2020-04-07T20:11:41.002909 | 2018-09-10T01:04:39 | 2018-09-10T01:04:39 | 158,678,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,337 | py | import os
import pandas as pd
from datetime import *
from Checker.getConfig import get_config
from DataManager import get_data_path
from Shower.BandShower import BandShower
from Shower.TargetPositionShower import TargetPositionShower
def set_start_date(str_start):
if len(str_start) == 0:
start_date = datetime(2000, 1, 1)
else:
start_date = datetime.strptime(str_start, "%Y-%m-%d")
return start_date
def set_end_date(str_end):
if len(str_end) == 0:
end_date = datetime(2020, 1, 1)
else:
end_date = datetime.strptime(str_end, "%Y-%m-%d")
return end_date
'''
1 SimulationSignals VS SimulationSignals
2 SimulationSignals VS liveSignals
仅在 Trader-Signal.py中读取文件时的文件名有区分,数据结构和对比流程不存在区别
'''
if __name__ == '__main__':
#
# 1 参数获取
dict_config = get_config()
path_input_folder = dict_config["path_input_folder"]
path_output_folder = dict_config["path_output_folder"]
list_strategies = dict_config["strategies"]
list_traders = dict_config["traders"]
compare_match_mode = dict_config["compare_match_mode"]
show_mode = dict_config["show_mode"]
compare_mode = dict_config["compare_mode"]
dt_round_level = dict_config["dt_round_level"]
position_normal_or_std = dict_config["position_normal_or_std"]
if type(dict_config["start_date"]) == str:
start_date = set_start_date(dict_config["start_date"])
else:
start_date = dict_config["start_date"]
if type(dict_config["end_date"]) == str:
end_date = set_end_date((dict_config["end_date"]))
else:
end_date = dict_config["end_date"]
# 2 获取所需对比的策略的数据所在的目录
df_data_file_path = pd.DataFrame(
get_data_path(list_strategies, list_traders, path_input_folder))
print(df_data_file_path["Path"])
# 3 获取并整理数据
# 4 画图展示
py_start_time_t = datetime.now().strftime("%H:%M:%S")
py_start_time = datetime.now().strftime("%Y%m%d_%H%M%S")
print(py_start_time)
'''
根据compare需求,compare_match_mode分为:
1 不同strategy 同trader ticker比较
2 同strategy 不同trader ticker比较
3 不配对,逐一显示
'''
if compare_match_mode == "1":
df_data_file_path_gb = df_data_file_path.groupby("TraderA")
elif compare_match_mode == "2":
df_data_file_path_gb = df_data_file_path.groupby("Strategy")
elif compare_match_mode == "3":
df_data_file_path_gb = df_data_file_path.groupby("strategy_traderA")
else:
print("compare_match_mode is Wrong, changed in mode '3'")
df_data_file_path_gb = df_data_file_path.groupby("strategy_traderA")
# 遍历所有对比项
for invar_item, df_data_file_path_i in df_data_file_path_gb:
'''
show_mode分为:
1 TargetPosition 对比
2 Band 对比
3 Both
compare_mode分为:
Single : 单独一个trader也展示--展示所有path中的内容;
Compare: 仅当path中存在多于或等于2个 相同类型的trader时才展示,用于对比,不对比数据不足的内容。
'''
if show_mode == "1" or show_mode == "3":
tp = TargetPositionShower(invar_item, df_data_file_path_i, start_date, end_date, dt_round_level,
position_normal_or_std)
if compare_mode == "2":
grid = tp.show_target_position("Single")
else:
grid = tp.show_target_position("Compare")
if grid == "":
continue
output_path_folder = r"%s/%s" % (path_output_folder, py_start_time)
if not os.path.exists(output_path_folder):
os.mkdir(output_path_folder)
grid.render(
r"%s/%s-targetPosition.html" % (
output_path_folder,
invar_item)
)
print(" %s - TargetPosition Done " % invar_item)
if show_mode == "2" or show_mode == "3":
tp = BandShower(invar_item, df_data_file_path_i, start_date, end_date, dt_round_level)
if compare_mode == "2":
grid = tp.show_band("Signal")
else:
grid = tp.show_band("Compare")
if grid == "":
continue
output_path_folder = r"%s/%s" % (path_output_folder, py_start_time)
if not os.path.exists(output_path_folder):
os.mkdir(output_path_folder)
grid.render(
r"%s/%s-band.html" % (
output_path_folder,
invar_item)
)
print(" %s - band Done " % invar_item)
print(" Start at : %s " % py_start_time_t)
print(" Finished at : %s " % datetime.now().strftime("%H:%M:%S"))
print(" ALL FINISHED")
| [
"595837423@qq.com"
] | 595837423@qq.com |
e3f22ef6f52e667dd7bb0c49d12ce580026b23a1 | 56a0762c741bcac3ab1172eb6114a9e59a48a5df | /mensajes/urls.py | c121166b0614ecbba8e0d369dfd323fc6757e6d3 | [
"MIT"
] | permissive | jjmartinr01/gauss3 | 54af1735a035a566f237d8e0fd9a6fe4447845a2 | 41a23d35c763890d8f729c9d63ac073673689400 | refs/heads/master | 2023-08-23T06:40:51.033857 | 2023-08-08T11:50:50 | 2023-08-08T11:50:50 | 171,710,013 | 1 | 0 | MIT | 2023-02-15T18:43:56 | 2019-02-20T16:35:03 | HTML | UTF-8 | Python | false | false | 520 | py | # -*- coding: utf-8 -*-
from django.urls import path
from . import views
urlpatterns = [
path('correo/', views.correo),
path('responder_mensaje/', views.responder_mensaje),
path('mensaje_importante/', views.mensaje_importante),
path('enviados/', views.enviados),
path('recibidos/', views.recibidos),
path('ajax_mensajes/', views.ajax_mensajes),
path('borrar_avisos/', views.borrar_avisos),
path('get_avisos/', views.get_avisos),
path('redactar_mensaje/', views.redactar_mensaje),
]
| [
"jmar0269@gmail.com"
] | jmar0269@gmail.com |
fbeb3df0ad93d859ac3ddaaaa8cf30f6e5c85e3f | f3bd271bf00325881fb5b2533b9ef7f7448a75ec | /classes/_torsion5.py | 83b99fba600d303db2edd39535fc5154b743f3b9 | [] | no_license | obaica/xcp2k | 7f99fc9d494859e16b9b0ea8e217b0493f4b2f59 | 6e15c2c95658f545102595dc1783f5e03a9e6916 | refs/heads/master | 2020-07-15T17:27:43.378835 | 2019-02-11T16:32:24 | 2019-02-11T16:32:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | from xcp2k.inputsection import InputSection
from _point39 import _point39
class _torsion5(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Atoms = None
self.POINT_list = []
self._name = "TORSION"
self._keywords = {'Atoms': 'ATOMS'}
self._repeated_subsections = {'POINT': '_point39'}
self._aliases = {'Points': 'Atoms'}
self._attributes = ['POINT_list']
def POINT_add(self, section_parameters=None):
new_section = _point39()
if section_parameters is not None:
if hasattr(new_section, 'Section_parameters'):
new_section.Section_parameters = section_parameters
self.POINT_list.append(new_section)
return new_section
@property
def Points(self):
"""
See documentation for Atoms
"""
return self.Atoms
@Points.setter
def Points(self, value):
self.Atoms = value
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
b23442ddc1decb7f8b5ac16595474fb0958b92f7 | 26605ec8a8bdd64a45af7d444d097d9e2f832dc9 | /electrum_xazab/plugins/keepkey/cmdline.py | d9c0b09fa26ff935158f1a8c786bcf62bd7adc2b | [
"MIT"
] | permissive | nunumichael/electrum-xazab | b67f821fd4a19e924d8ad902f076223df9b7511f | f128c765f451b418a418f9cd8b8e24fd8f66df74 | refs/heads/master | 2023-05-05T05:30:03.935745 | 2021-05-26T19:12:47 | 2021-05-26T19:12:47 | 370,091,240 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | from electrum_xazab.plugin import hook
from .keepkey import KeepKeyPlugin
from ..hw_wallet import CmdLineHandler
class Plugin(KeepKeyPlugin):
handler = CmdLineHandler()
@hook
def init_keystore(self, keystore):
if not isinstance(keystore, self.keystore_class):
return
keystore.handler = self.handler
def create_handler(self, window):
return self.handler
| [
"71531505+xazab@users.noreply.github.com"
] | 71531505+xazab@users.noreply.github.com |
a61fb1cc5cb3816c0998f5905ac5942446afa481 | f246b414cce8687d6e5d1bb77cd94132b89580a2 | /commerce/auctions/migrations/0001_initial.py | 2a4e4b16150603161e70e2399aa01a2c0e0217c6 | [] | no_license | thewolfcommander/cs50-web | edbccd29b0b649852c7af73d5ecba4f51fa47ad3 | beead0967d36ef398b699601c8ebae646827556d | refs/heads/master | 2022-12-23T08:03:52.729833 | 2020-10-01T14:14:31 | 2020-10-01T14:14:31 | 300,309,278 | 2 | 2 | null | 2020-10-01T14:29:44 | 2020-10-01T14:29:42 | null | UTF-8 | Python | false | false | 5,777 | py | # Generated by Django 3.1 on 2020-09-06 02:37
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Listing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='Title')),
('description', models.TextField(verbose_name='Description')),
('price', models.DecimalField(decimal_places=2, max_digits=15, verbose_name='Starting Bid')),
('image', models.URLField(blank=True, null=True, verbose_name='Image URL')),
('category', models.CharField(blank=True, choices=[('BOOKS', 'Books'), ('MUSIC', 'Music'), ('MOVIES', 'Movies'), ('GAMES', 'Games'), ('COMPUTERS', 'Computers'), ('ELECTRONICS', 'Electronics'), ('KITCHEN', 'Kitchen'), ('HOME', 'Home'), ('HEALTH', 'Health'), ('PETS', 'Pets'), ('TOYS', 'Toys'), ('FASHION', 'Fashion'), ('SHOES', 'Shoes'), ('SPORTS', 'Sports'), ('BABY', 'Baby'), ('TRAVEL', 'Travel')], max_length=200, null=True, verbose_name='Category')),
('creator', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='listings', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(default='', verbose_name='Comment')),
('timestamp', models.DateTimeField(auto_now_add=True, null=True)),
('commenter', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL)),
('listing', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='auctions.listing')),
],
),
migrations.CreateModel(
name='Bid',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True, null=True)),
('bid_price', models.DecimalField(decimal_places=2, max_digits=15, null=True, verbose_name='Bid Price')),
('bidder', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='bids', to=settings.AUTH_USER_MODEL)),
('listing', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='bids', to='auctions.listing')),
],
),
migrations.AddField(
model_name='user',
name='watchlist',
field=models.ManyToManyField(blank=True, related_name='watchlist', to='auctions.Listing'),
),
]
| [
"nstu778@aucklanduni.ac.nz"
] | nstu778@aucklanduni.ac.nz |
43222f5e5d245a3e43971f99b03671da295988e0 | 0706632fa39c3c5ea0ae36b65c427f9220e5e480 | /maml_rl/samplers/multi_task_sampler.py | 1fc93f21d1c1c54b1e53d91af333d0822489b348 | [
"MIT"
] | permissive | jasleen1722/pytorch-maml-rl | 24aa60b6b3ed4d972fee4e33f17df5fe04fdf0bc | 243214b17da2ebfa152bba784778884b46a7e349 | refs/heads/master | 2022-11-14T09:15:55.882981 | 2020-05-28T10:39:00 | 2020-05-28T10:39:00 | 280,620,859 | 1 | 0 | MIT | 2020-07-18T09:06:46 | 2020-07-18T09:06:45 | null | UTF-8 | Python | false | false | 13,271 | py | import torch
import torch.multiprocessing as mp
import asyncio
import threading
import time
from datetime import datetime, timezone
from copy import deepcopy
from maml_rl.samplers.sampler import Sampler, make_env
from maml_rl.envs.utils.sync_vector_env import SyncVectorEnv
from maml_rl.episode import BatchEpisodes
from maml_rl.utils.reinforcement_learning import reinforce_loss
def _create_consumer(queue, futures, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
while True:
data = queue.get()
if data is None:
break
index, step, episodes = data
future = futures if (step is None) else futures[step]
if not future[index].cancelled():
loop.call_soon_threadsafe(future[index].set_result, episodes)
class MultiTaskSampler(Sampler):
"""Vectorized sampler to sample trajectories from multiple environements.
Parameters
----------
env_name : str
Name of the environment. This environment should be an environment
registered through `gym`. See `maml.envs`.
env_kwargs : dict
Additional keywork arguments to be added when creating the environment.
batch_size : int
Number of trajectories to sample from each task (ie. `fast_batch_size`).
policy : `maml_rl.policies.Policy` instance
The policy network for sampling. Note that the policy network is an
instance of `torch.nn.Module` that takes observations as input and
returns a distribution (typically `Normal` or `Categorical`).
baseline : `maml_rl.baseline.LinearFeatureBaseline` instance
The baseline. This baseline is an instance of `nn.Module`, with an
additional `fit` method to fit the parameters of the model.
env : `gym.Env` instance (optional)
An instance of the environment given by `env_name`. This is used to
sample tasks from. If not provided, an instance is created from `env_name`.
seed : int (optional)
Random seed for the different environments. Note that each task and each
environement inside every process use different random seed derived from
this value if provided.
num_workers : int
Number of processes to launch. Note that the number of processes does
not have to be equal to the number of tasks in a batch (ie. `meta_batch_size`),
and can scale with the amount of CPUs available instead.
"""
def __init__(self,
env_name,
env_kwargs,
batch_size,
policy,
baseline,
env=None,
seed=None,
num_workers=1):
super(MultiTaskSampler, self).__init__(env_name,
env_kwargs,
batch_size,
policy,
seed=seed,
env=env)
self.num_workers = num_workers
self.task_queue = mp.JoinableQueue()
self.train_episodes_queue = mp.Queue()
self.valid_episodes_queue = mp.Queue()
policy_lock = mp.Lock()
self.workers = [SamplerWorker(index,
env_name,
env_kwargs,
batch_size,
self.env.observation_space,
self.env.action_space,
self.policy,
deepcopy(baseline),
self.seed,
self.task_queue,
self.train_episodes_queue,
self.valid_episodes_queue,
policy_lock)
for index in range(num_workers)]
for worker in self.workers:
worker.daemon = True
worker.start()
self._waiting_sample = False
self._event_loop = asyncio.get_event_loop()
self._train_consumer_thread = None
self._valid_consumer_thread = None
def sample_tasks(self, num_tasks):
return self.env.unwrapped.sample_tasks(num_tasks)
def sample_async(self, tasks, **kwargs):
if self._waiting_sample:
raise RuntimeError('Calling `sample_async` while waiting '
'for a pending call to `sample_async` '
'to complete. Please call `sample_wait` '
'before calling `sample_async` again.')
for index, task in enumerate(tasks):
self.task_queue.put((index, task, kwargs))
num_steps = kwargs.get('num_steps', 1)
futures = self._start_consumer_threads(tasks,
num_steps=num_steps)
self._waiting_sample = True
return futures
def sample_wait(self, episodes_futures):
if not self._waiting_sample:
raise RuntimeError('Calling `sample_wait` without any '
'prior call to `sample_async`.')
async def _wait(train_futures, valid_futures):
# Gather the train and valid episodes
train_episodes = await asyncio.gather(*[asyncio.gather(*futures)
for futures in train_futures])
valid_episodes = await asyncio.gather(*valid_futures)
return (train_episodes, valid_episodes)
samples = self._event_loop.run_until_complete(_wait(*episodes_futures))
self._join_consumer_threads()
self._waiting_sample = False
return samples
def sample(self, tasks, **kwargs):
futures = self.sample_async(tasks, **kwargs)
return self.sample_wait(futures)
@property
def train_consumer_thread(self):
if self._train_consumer_thread is None:
raise ValueError()
return self._train_consumer_thread
@property
def valid_consumer_thread(self):
if self._valid_consumer_thread is None:
raise ValueError()
return self._valid_consumer_thread
def _start_consumer_threads(self, tasks, num_steps=1):
# Start train episodes consumer thread
train_episodes_futures = [[self._event_loop.create_future() for _ in tasks]
for _ in range(num_steps)]
self._train_consumer_thread = threading.Thread(target=_create_consumer,
args=(self.train_episodes_queue, train_episodes_futures),
kwargs={'loop': self._event_loop},
name='train-consumer')
self._train_consumer_thread.daemon = True
self._train_consumer_thread.start()
# Start valid episodes consumer thread
valid_episodes_futures = [self._event_loop.create_future() for _ in tasks]
self._valid_consumer_thread = threading.Thread(target=_create_consumer,
args=(self.valid_episodes_queue, valid_episodes_futures),
kwargs={'loop': self._event_loop},
name='valid-consumer')
self._valid_consumer_thread.daemon = True
self._valid_consumer_thread.start()
return (train_episodes_futures, valid_episodes_futures)
def _join_consumer_threads(self):
if self._train_consumer_thread is not None:
self.train_episodes_queue.put(None)
self.train_consumer_thread.join()
if self._valid_consumer_thread is not None:
self.valid_episodes_queue.put(None)
self.valid_consumer_thread.join()
self._train_consumer_thread = None
self._valid_consumer_thread = None
def close(self):
if self.closed:
return
for _ in range(self.num_workers):
self.task_queue.put(None)
self.task_queue.join()
self._join_consumer_threads()
self.closed = True
class SamplerWorker(mp.Process):
def __init__(self,
index,
env_name,
env_kwargs,
batch_size,
observation_space,
action_space,
policy,
baseline,
seed,
task_queue,
train_queue,
valid_queue,
policy_lock):
super(SamplerWorker, self).__init__()
env_fns = [make_env(env_name, env_kwargs=env_kwargs)
for _ in range(batch_size)]
self.envs = SyncVectorEnv(env_fns,
observation_space=observation_space,
action_space=action_space)
self.envs.seed(None if (seed is None) else seed + index * batch_size)
self.batch_size = batch_size
self.policy = policy
self.baseline = baseline
self.task_queue = task_queue
self.train_queue = train_queue
self.valid_queue = valid_queue
self.policy_lock = policy_lock
def sample(self,
index,
num_steps=1,
fast_lr=0.5,
gamma=0.95,
gae_lambda=1.0,
device='cpu'):
# Sample the training trajectories with the initial policy and adapt the
# policy to the task, based on the REINFORCE loss computed on the
# training trajectories. The gradient update in the fast adaptation uses
# `first_order=True` no matter if the second order version of MAML is
# applied since this is only used for sampling trajectories, and not
# for optimization.
params = None
for step in range(num_steps):
train_episodes = self.create_episodes(params=params,
gamma=gamma,
gae_lambda=gae_lambda,
device=device)
train_episodes.log('_enqueueAt', datetime.now(timezone.utc))
# QKFIX: Deep copy the episodes before sending them to their
# respective queues, to avoid a race condition. This issue would
# cause the policy pi = policy(observations) to be miscomputed for
# some timesteps, which in turns makes the loss explode.
self.train_queue.put((index, step, deepcopy(train_episodes)))
with self.policy_lock:
loss = reinforce_loss(self.policy, train_episodes, params=params)
params = self.policy.update_params(loss,
params=params,
step_size=fast_lr,
first_order=True)
# Sample the validation trajectories with the adapted policy
valid_episodes = self.create_episodes(params=params,
gamma=gamma,
gae_lambda=gae_lambda,
device=device)
valid_episodes.log('_enqueueAt', datetime.now(timezone.utc))
self.valid_queue.put((index, None, deepcopy(valid_episodes)))
def create_episodes(self,
params=None,
gamma=0.95,
gae_lambda=1.0,
device='cpu'):
episodes = BatchEpisodes(batch_size=self.batch_size,
gamma=gamma,
device=device)
episodes.log('_createdAt', datetime.now(timezone.utc))
episodes.log('process_name', self.name)
t0 = time.time()
for item in self.sample_trajectories(params=params):
episodes.append(*item)
episodes.log('duration', time.time() - t0)
self.baseline.fit(episodes)
episodes.compute_advantages(self.baseline,
gae_lambda=gae_lambda,
normalize=True)
return episodes
def sample_trajectories(self, params=None):
observations = self.envs.reset()
with torch.no_grad():
while not self.envs.dones.all():
observations_tensor = torch.from_numpy(observations)
pi = self.policy(observations_tensor, params=params)
actions_tensor = pi.sample()
actions = actions_tensor.cpu().numpy()
new_observations, rewards, _, infos = self.envs.step(actions)
batch_ids = infos['batch_ids']
yield (observations, actions, rewards, batch_ids)
observations = new_observations
def run(self):
while True:
data = self.task_queue.get()
if data is None:
self.envs.close()
self.task_queue.task_done()
break
index, task, kwargs = data
self.envs.reset_task(task)
self.sample(index, **kwargs)
self.task_queue.task_done()
| [
"tristan.deleu@gmail.com"
] | tristan.deleu@gmail.com |
9b6e4467cb0a35180ca9666a768fe8b78f8e4250 | ec062c479c09ce250c3e23ff47f144f423b55648 | /py/Lib/site-packages/azure/mgmt/compute/compute/v2016_04_30_preview/operations/images_operations.py | 9036334dcb789747cc350335931fcc85d57e17d3 | [] | no_license | betisb/InputParser | c442ffc877a941bd5b7aac4d843a4d21594d8e96 | 68747d69e04d126f7ea679f93a291a6de244a95f | refs/heads/master | 2021-07-13T05:05:19.479329 | 2019-05-28T16:56:53 | 2019-05-28T16:56:53 | 188,087,891 | 0 | 2 | null | 2020-07-24T00:14:31 | 2019-05-22T17:52:13 | Python | UTF-8 | Python | false | false | 19,041 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class ImagesOperations(object):
"""ImagesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-04-30-preview".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-04-30-preview"
self.config = config
def create_or_update(
self, resource_group_name, image_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Create or update an image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param image_name: The name of the image.
:type image_name: str
:param parameters: Parameters supplied to the Create Image operation.
:type parameters: :class:`Image
<azure.mgmt.compute.compute.v2016_04_30_preview.models.Image>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`Image
<azure.mgmt.compute.compute.v2016_04_30_preview.models.Image>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Image')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Image', response)
if response.status_code == 201:
deserialized = self._deserialize('Image', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, image_name, custom_headers=None, raw=False, **operation_config):
"""Deletes an Image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param image_name: The name of the image.
:type image_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`OperationStatusResponse
<azure.mgmt.compute.compute.v2016_04_30_preview.models.OperationStatusResponse>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, image_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets an image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param image_name: The name of the image.
:type image_name: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`Image
<azure.mgmt.compute.compute.v2016_04_30_preview.models.Image>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`Image
<azure.mgmt.compute.compute.v2016_04_30_preview.models.Image>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'imageName': self._serialize.url("image_name", image_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Image', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets the list of images under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of :class:`Image
<azure.mgmt.compute.compute.v2016_04_30_preview.models.Image>`
:rtype: :class:`ImagePaged
<azure.mgmt.compute.compute.v2016_04_30_preview.models.ImagePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ImagePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ImagePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Gets the list of Images in the subscription. Use nextLink property in
the response to get the next page of Images. Do this till nextLink is
not null to fetch all the Images.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of :class:`Image
<azure.mgmt.compute.compute.v2016_04_30_preview.models.Image>`
:rtype: :class:`ImagePaged
<azure.mgmt.compute.compute.v2016_04_30_preview.models.ImagePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/images'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ImagePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ImagePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| [
"betis.h.a@live.com"
] | betis.h.a@live.com |
6b4bbf999b1e3971e8818a0b461d50577e4be523 | 3803bbc41c561b80d3ff6b79d45f00a29d868706 | /src/13ionetcdf/abinit.src | 51df8130fc9ba981594670600ac33c9a5c41ecc4 | [] | no_license | RADI-ux/abinit-cmake | 820cc3d6887b9e57b515d493c1a4cdc55e646dea | 54f0edb964b4f0153db532c7687db10e70ea80e2 | refs/heads/master | 2020-09-05T05:05:09.372589 | 2009-02-22T20:59:57 | 2009-02-22T22:43:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | src | # -*- Python -*-
#
# Copyright (c) 2005-2008 ABINIT Group (Yann Pouillon)
# All rights reserved.
#
# This file is part of the ABINIT software package. For license information,
# please see the COPYING file in the top-level directory of the ABINIT source
# distribution.
#
#
# Source files making up the 3ionetcdf library
#
# Source file attributes
ABI_SRC_NIL = 0 # No attribute
ABI_SRC_BLT = 1 # The file is built by a script
ABI_SRC_DEP = 2 # The file depends on other files
# Source files
sources = [
"abi_etsf_init.F90",
"abi_etsf_electrons_put.F90",
"abi_etsf_geo_put.F90",
"handle_err_netcdf.F90",
"hdr_io_etsf.F90",
"ini_wf_etsf.F90",
"ini_wf_netcdf.F90",
"write_header_moldynnetcdf.F90",
"write_moldynvaluenetcdf.F90"]
| [
"ondrej@certik.cz"
] | ondrej@certik.cz |
d6caa39c10d52e910422dd0d943485dcab3e9a75 | e5799f58d30361dd783f2932474c86cb7b0bbab0 | /calculator/views.py | 7a272276e19d3f6cd0deeeca93028fab19865b45 | [] | no_license | Shekharnunia/simple-calculator | a28aacfede97e9022bc7e9e59b278b8e62a681a3 | 006c8196acd5284e28cbe451ab5a5b0314975378 | refs/heads/master | 2022-11-18T05:43:32.816169 | 2020-07-20T11:28:51 | 2020-07-20T11:28:51 | 281,097,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,328 | py | import operator
from django.shortcuts import render
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
class CalculatorAPIView(APIView):
def post(self, request, *args, **kwargs):
print(request.data)
value1 = int(request.data.get("value1", None))
value2 = int(request.data.get("value2", None))
operation = request.data.get("operation", 1)
ops = {'+' : operator.add, '-' : operator.sub, '*' : operator.mul}
if operation in ops:
output = ops[operation](value1, value2)
return Response(output, status=status.HTTP_200_OK)
return Response(status=status.HTTP_400_BAD_REQUEST)
def get(self, request, format=False):
value1 = int(request.GET.get("value1", None))
value2 = int(request.GET.get("value2", None))
operation = request.GET.get("operation", 1)
ops = {'add' : operator.add, '-' : operator.sub, '*' : operator.mul}
print(value1, value2, operation)
print(len(operation))
print(operation in ops)
if operation in ops:
output = ops[operation](value1, value2)
return Response(output, status=status.HTTP_200_OK)
return Response(status=status.HTTP_400_BAD_REQUEST)
| [
"shekharnunia@gmail.com"
] | shekharnunia@gmail.com |
f66ad320b398c7d248337244ae82393096b99540 | 268c588de53d48f2e48c694535e27c1be104229d | /Adapter_Pattern.py | b5fd93232e521c89f70478c8b19fe8abe2a22854 | [] | no_license | wax8280/Python_Design_Patterns | def64b1662924807946a9847ac1bf0437382a716 | 88fb08ad3605fb06166bf45d814f5b85a37364b5 | refs/heads/master | 2021-01-11T01:21:14.964828 | 2016-10-14T15:40:42 | 2016-10-14T15:40:42 | 70,715,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | # coding:utf-8
class Synthesizer:
def __init__(self, name):
self.name = name
def __str__(self):
return 'the {} synthesizer'.format(self.name)
def play(self):
return 'is playing an electronic song'
class Human:
def __init__(self, name):
self.name = name
def __str__(self):
return '{} the human'.format(self.name)
def speak(self):
return 'says hello'
class Computer:
def __init__(self, name):
self.name = name
def __str__(self):
return 'the {} computer'.format(self.name)
def execute(self):
return 'executes a program'
class Adapter:
def __init__(self, obj, adapted_methods):
self.obj = obj
self.__dict__.update(adapted_methods)
def __str__(self):
return str(self.obj)
def main():
objects = [Computer('Asus')]
synth = Synthesizer('moog')
human = Human('Bob')
objects.append(Adapter(human, dict(execute=human.speak)))
objects.append(Adapter(synth, dict(execute=synth.play)))
for i in objects:
print('{} {}'.format(str(i), i.execute()))
if __name__ == "__main__":
main()
| [
"wax8280@163.com"
] | wax8280@163.com |
7d4aff7df2367bb22dc9f41b31a08713bd0699f1 | d190750d6cb34e9d86ae96724cf4b56a2f57a74a | /tests/r/test_penicillin.py | 0cb436b425b437de661d400636d1bc4084227041 | [
"Apache-2.0"
] | permissive | ROAD2018/observations | a119f61a48213d791de0620804adb8d21c2ad9fb | 2c8b1ac31025938cb17762e540f2f592e302d5de | refs/heads/master | 2021-09-24T04:28:02.725245 | 2018-09-16T23:06:30 | 2018-09-16T23:06:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.penicillin import penicillin
def test_penicillin():
"""Test module penicillin.py by downloading
penicillin.csv and testing shape of
extracted data has 144 rows and 3 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = penicillin(test_path)
try:
assert x_train.shape == (144, 3)
except:
shutil.rmtree(test_path)
raise()
| [
"dustinviettran@gmail.com"
] | dustinviettran@gmail.com |
70e519043cf0a431f68a0786fbad374223501e77 | 3da69696601b2b3ad7bc1285a5f0343c7eafea80 | /lc888.py | 9b44f944a4450fb681eae24b3aa24a73adda0b00 | [] | no_license | GeorgyZhou/Leetcode-Problem | ee586463a2e4e75c910c095bdc057f1be70b5c1b | d6fac85a94a7188e93d4e202e67b6485562d12bd | refs/heads/master | 2021-06-30T15:58:04.698200 | 2020-12-18T22:55:49 | 2020-12-18T22:55:49 | 66,054,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | class Solution(object):
def fairCandySwap(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: List[int]
"""
sum_a = sum(A)
sum_b = sum(B)
target = (sum_a + sum_b) / 2
diff = target - sum_b
set_a = set(A)
for b in B:
if b + diff in set_a:
return [b + diff, b]
| [
"michaelchouqj@gmail.com"
] | michaelchouqj@gmail.com |
f65794dc3bfb3a87865b42bd60309bdff9092190 | 28a462a28f443c285ca5efec181ebe36b147c167 | /tests/compile/basic/es2020/IterationStatement[0,0].LabelledEvaluation.spec | 24b97f4ffaca37ee1ec9e0e2e4054f282340c901 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | kaist-plrg/jstar | 63e71f9156860dc21cccc33a9f6c638dfee448ea | 1282919127ea18a7e40c7a55e63a1ddaaf7d9db4 | refs/heads/main | 2022-07-22T08:12:34.947712 | 2022-02-27T04:19:33 | 2022-02-27T11:06:14 | 384,045,526 | 6 | 4 | NOASSERTION | 2022-02-27T11:05:26 | 2021-07-08T07:53:21 | Python | UTF-8 | Python | false | false | 556 | spec | 1. Let _V_ be *undefined*.
1. Repeat,
1. Let _stmtResult_ be the result of evaluating |Statement|.
1. If LoopContinues(_stmtResult_, _labelSet_) is *false*, return Completion(UpdateEmpty(_stmtResult_, _V_)).
1. If _stmtResult_.[[Value]] is not ~empty~, set _V_ to _stmtResult_.[[Value]].
1. Let _exprRef_ be the result of evaluating |Expression|.
1. Let _exprValue_ be ? GetValue(_exprRef_).
1. If ! ToBoolean(_exprValue_) is *false*, return NormalCompletion(_V_). | [
"h2oche22@gmail.com"
] | h2oche22@gmail.com |
c41f02c8804faa4da500fd7196ad6460a39b89d9 | 8d49683cd799ed66bc9dd197c197d1e1c7a73120 | /src/gamesbyexample/tutorialguess2.py | dc65f038f09cfaa639c5bba236608793939f3f78 | [
"MIT"
] | permissive | trujilloshane/PythonStdioGames | d2e53ec22121b30e4b5317e46ed685831492d9c3 | 83ac9cd367f688539b77f67f0d44433fc6fdcbdf | refs/heads/master | 2020-12-13T15:12:26.917070 | 2020-01-06T09:38:39 | 2020-01-06T09:38:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | # Tutorial: Guess the Number, by Al Sweigart al@inventwithpython.com
# Part 2 of a tutorial to make a "Guess the Number" game, bit by bit.
# Try copying the code in this program on your own and running the
# program before moving on to part 3. (You don't have to copy the
# comments.)
import random
secretNumber = random.randint(1, 20)
print('Hello! What is your name?')
playerName = input()
print('It is good to meet you, ' + playerName)
print('I am thinking of a number from 1 to 20.')
print('Take a guess.')
guess = input()
print('My secret number was', secretNumber)
| [
"asweigart@gmail.com"
] | asweigart@gmail.com |
e9758f17759c46976528fab01887e7701ee4fe9d | 3147604001706f368a5cd73317a99a95e1501aca | /tensorflow_federated/python/core/impl/compiler/transformations.py | 31561a38f5b66ed52d275a69c98f81161d4e6998 | [
"Apache-2.0"
] | permissive | mlimwxxnn/federated | 7c4eb05c91ea3df905d55204bce9fdc07a3340da | 4da4cf1453a118087580b7d739c53a71e4e63bd4 | refs/heads/master | 2022-11-24T03:44:57.840507 | 2020-07-23T23:52:51 | 2020-07-23T23:53:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,293 | py | # Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library of composite transformation functions.
A composite transformation is one that applies multiple atomic transformation to
an AST either pointwise or serially.
"""
from typing import Mapping
from absl import logging
from tensorflow_federated.python.common_libs import anonymous_tuple
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.impl import tree_to_cc_transformations
from tensorflow_federated.python.core.impl.compiler import building_block_factory
from tensorflow_federated.python.core.impl.compiler import building_blocks
from tensorflow_federated.python.core.impl.compiler import compiled_computation_transforms
from tensorflow_federated.python.core.impl.compiler import transformation_utils
from tensorflow_federated.python.core.impl.compiler import tree_analysis
from tensorflow_federated.python.core.impl.compiler import tree_transformations
from tensorflow_federated.python.core.impl.types import type_analysis
def prepare_for_rebinding(comp):
"""Prepares `comp` for extracting rebound variables.
Currently, this means replacing all called lambdas and inlining all blocks.
This does not necessarly guarantee that the resulting computation has no
called lambdas, it merely reduces a level of indirection here. This reduction
has proved sufficient for identifying variables which are about to be rebound
in the top-level lambda, necessarily when compiler components factor work out
from a single function into multiple functions. Since this function makes no
guarantees about sufficiency, it is the responsibility of the caller to
ensure that no unbound variables are introduced during the rebinding.
Args:
comp: Instance of `building_blocks.ComputationBuildingBlock` from which all
occurrences of a given variable need to be extracted and rebound.
Returns:
Another instance of `building_blocks.ComputationBuildingBlock` which has
had all called lambdas replaced by blocks, all blocks inlined and all
selections from tuples collapsed.
"""
# TODO(b/146430051): Follow up here and consider removing or enforcing more
# strict output invariants when `remove_called_lambdas_and_blocks` is moved
# in here.
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
comp, _ = tree_transformations.uniquify_reference_names(comp)
comp, _ = tree_transformations.replace_called_lambda_with_block(comp)
block_inliner = tree_transformations.InlineBlock(comp)
selection_replacer = tree_transformations.ReplaceSelectionFromTuple()
transforms = [block_inliner, selection_replacer]
symbol_tree = transformation_utils.SymbolTree(
transformation_utils.ReferenceCounter)
def _transform_fn(comp, symbol_tree):
"""Transform function chaining inlining and collapsing selections."""
modified = False
for transform in transforms:
if transform.global_transform:
comp, transform_modified = transform.transform(comp, symbol_tree)
else:
comp, transform_modified = transform.transform(comp)
modified = modified or transform_modified
return comp, modified
return transformation_utils.transform_postorder_with_symbol_bindings(
comp, _transform_fn, symbol_tree)
def remove_called_lambdas_and_blocks(comp):
"""Removes any called lambdas and blocks from `comp`.
This function first resolves any higher-order functions, so that replacing
called lambdas with blocks and then inlining the block locals cannot result
in more called lambdas. It then performs this sequence of transformations,
taking care to inline selections from tuples before inlining the rest of
the block locals to prevent possible combinatorial growth of the generated
AST.
Args:
comp: Instance of `building_blocks.ComputationBuildingBlock` from which we
want to remove called lambdas and blocks.
Returns:
A transformed version of `comp` which has no called lambdas or blocks, and
no extraneous selections from tuples.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
comp, names_uniquified = tree_transformations.uniquify_reference_names(comp)
comp, fns_resolved = tree_transformations.resolve_higher_order_functions(comp)
comp, lambdas_replaced = tree_transformations.replace_called_lambda_with_block(
comp)
if fns_resolved or lambdas_replaced:
comp, _ = tree_transformations.uniquify_reference_names(comp)
comp, sels_removed = tree_transformations.inline_selections_from_tuple(comp)
if sels_removed:
comp, _ = tree_transformations.uniquify_reference_names(comp)
comp, locals_inlined = tree_transformations.inline_block_locals(comp)
modified = names_uniquified or fns_resolved or lambdas_replaced or sels_removed or locals_inlined
return comp, modified
def _generate_simple_tensorflow(comp):
"""Naively generates TensorFlow to represent `comp`."""
tf_parser_callable = tree_to_cc_transformations.TFParser()
comp, _ = tree_transformations.insert_called_tf_identity_at_leaves(comp)
comp, _ = transformation_utils.transform_postorder(comp, tf_parser_callable)
return comp
def construct_tensorflow_calling_lambda_on_concrete_arg(
parameter: building_blocks.Reference,
body: building_blocks.ComputationBuildingBlock,
concrete_arg: building_blocks.ComputationBuildingBlock):
"""Generates TensorFlow for lambda invocation with given arg, body and param.
That is, generates TensorFlow block encapsulating the logic represented by
invoking a function with parameter `parameter` and body `body`, with argument
`concrete_arg`.
Via the guarantee made in `compiled_computation_transforms.StructCalledGraphs`
this function makes the claim that the computations which define
`concrete_arg` will be executed exactly once in the generated TenosorFlow.
Args:
parameter: Instance of `building_blocks.Reference` defining the parameter of
the function to be generated and invoked, as described above. After
calling this transformation, every instance of parameter` in `body` will
represent a reference to `concrete_arg`.
body: `building_blocks.ComputationBuildingBlock` representing the body of
the function for which we are generating TensorFlow.
concrete_arg: `building_blocks.ComputationBuildingBlock` representing the
argument to be passed to the resulting function. `concrete_arg` will then
be referred to by every occurrence of `parameter` in `body`. Therefore
`concrete_arg` must have an equivalent type signature to that of
`parameter`.
Returns:
A called `building_blocks.CompiledComputation`, as specified above.
Raises:
TypeError: If the arguments are of the wrong types, or the type signature
of `concrete_arg` does not match that of `parameter`.
"""
py_typecheck.check_type(parameter, building_blocks.Reference)
py_typecheck.check_type(body, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(concrete_arg,
building_blocks.ComputationBuildingBlock)
parameter.type_signature.check_equivalent_to(concrete_arg.type_signature)
encapsulating_lambda = _generate_simple_tensorflow(
building_blocks.Lambda(parameter.name, parameter.type_signature, body))
comp_called = _generate_simple_tensorflow(
building_blocks.Call(encapsulating_lambda, concrete_arg))
return comp_called
def _replace_references_in_comp_with_selections_from_arg(
comp: building_blocks.ComputationBuildingBlock,
arg_ref: building_blocks.Reference, name_to_output_index: Mapping[str,
int]):
"""Uses `name_to_output_index` to rebind references in `comp`."""
def _replace_values_with_selections(inner_comp):
if inner_comp.is_reference():
selected_index = name_to_output_index[inner_comp.name]
return building_blocks.Selection(
source=arg_ref, index=selected_index), True
return inner_comp, False
comp_replaced, _ = transformation_utils.transform_postorder(
comp, _replace_values_with_selections)
return comp_replaced
def _construct_tensorflow_representing_single_local_assignment(
arg_ref, arg_class, previous_output, name_to_output_index):
"""Constructs TensorFlow to represent assignment to a block local in sequence.
Creates a tuple which represents all computations in the block local sequence
depending on those variables which have already been processed, by combining
the elements of `previous_output` with the computations in `arg_class`. Then
generates TensorFlow to capture the logic this tuple encapsulates.
Args:
arg_ref: `building_blocks.Reference` to use in representing
`previous_output` inside the body of the Lambda to be parsed to
TensorFlow. Notice that this is here for name safety.
arg_class: `list` of `building_blocks.ComputationBuildingBlock`s which are
dependent on the block local being processed or any preceding block local;
this should be one of the classes resulting from
`group_block_locals_by_namespace`.
previous_output: The result of parsing previous block local bindings into
functions in the same manner.
name_to_output_index: `dict` mapping block local variables to their index in
the result of the generated TensorFlow. This is used to resolve references
in the computations of `arg_class`, but will not be modified.
Returns:
Called instance of `building_blocks.CompiledComputation` representing
the tuple described above.
"""
pass_through_args = [
building_blocks.Selection(source=arg_ref, index=idx)
for idx, _ in enumerate(previous_output.type_signature)
]
vals_replaced = [
_replace_references_in_comp_with_selections_from_arg(
c, arg_ref, name_to_output_index) for c in arg_class
]
return_tuple = building_blocks.Tuple(pass_through_args + vals_replaced)
comp_called = construct_tensorflow_calling_lambda_on_concrete_arg(
arg_ref, return_tuple, previous_output)
return comp_called
def _get_unbound_ref(block):
"""Helper to get unbound ref name and type spec if it exists in `block`."""
all_unbound_refs = transformation_utils.get_map_of_unbound_references(block)
top_level_unbound_ref = all_unbound_refs[block]
num_unbound_refs = len(top_level_unbound_ref)
if num_unbound_refs == 0:
return None
elif num_unbound_refs > 1:
raise ValueError('`create_tensorflow_representing_block` must be passed '
'a block with at most a single unbound reference; '
'encountered the block {} with {} unbound '
'references.'.format(block, len(top_level_unbound_ref)))
unbound_ref_name = top_level_unbound_ref.pop()
top_level_type_spec = None
def _get_unbound_ref_type_spec(inner_comp):
if (inner_comp.is_reference() and inner_comp.name == unbound_ref_name):
nonlocal top_level_type_spec
top_level_type_spec = inner_comp.type_signature
return inner_comp, False
transformation_utils.transform_postorder(block, _get_unbound_ref_type_spec)
return building_blocks.Reference(unbound_ref_name, top_level_type_spec)
def _check_parameters_for_tf_block_generation(block):
"""Helper to validate parameters for parsing block locals into TF graphs."""
py_typecheck.check_type(block, building_blocks.Block)
for _, comp in block.locals:
if not (comp.is_call() and comp.function.is_compiled_computation()):
raise ValueError(
'create_tensorflow_representing_block may only be called '
'on a block whose local variables are all bound to '
'called TensorFlow computations; encountered a local '
'bound to {}'.format(comp))
def _check_contains_only_refs_sels_and_tuples(inner_comp):
if not (inner_comp.is_reference() or inner_comp.is_selection() or
inner_comp.is_struct()):
raise ValueError(
'create_tensorflow_representing_block may only be called '
'on a block whose result contains only Selections, '
'Tuples and References; encountered the building block '
'{}.'.format(inner_comp))
return inner_comp, False
transformation_utils.transform_postorder(
block.result, _check_contains_only_refs_sels_and_tuples)
def create_tensorflow_representing_block(block):
"""Generates non-duplicated TensorFlow for Block locals binding called graphs.
Assuming that the argument `block` satisfies the following conditions:
1. The local variables in `block` are all called graphs, with arbitrary
arguments.
2. The result of the Block contains tuples, selections and references,
but nothing else.
Then `create_tensorflow_representing_block` will generate a structure, which
may contain tensorflow functions, calls to tensorflow functions, and
references, but which have generated this TensorFlow code without duplicating
work done by referencing the block locals.
Args:
block: Instance of `building_blocks.Block`, whose local variables are all
called instances of `building_blocks.CompiledComputation`, and whose
result contains only instances of `building_blocks.Reference`,
`building_blocks.Selection` or `building_blocks.Tuple`.
Returns:
A transformed version of `block`, which has pushed references to the called
graphs in the locals of `block` into TensorFlow.
Raises:
TypeError: If `block` is not an instance of `building_blocks.Block`.
ValueError: If the locals of `block` are anything other than called graphs,
or if the result of `block` contains anything other than selections,
references and tuples.
"""
_check_parameters_for_tf_block_generation(block)
name_generator = building_block_factory.unique_name_generator(block)
def _construct_reference_representing(comp_to_represent):
"""Helper closing over `name_generator` for name safety."""
arg_type = comp_to_represent.type_signature
arg_name = next(name_generator)
return building_blocks.Reference(arg_name, arg_type)
top_level_ref = _get_unbound_ref(block)
named_comp_classes = tree_transformations.group_block_locals_by_namespace(
block)
if top_level_ref:
first_comps = [x[1] for x in named_comp_classes[0]]
tup = building_blocks.Tuple([top_level_ref] + first_comps)
graph_tup = _generate_simple_tensorflow(tup)
output_comp = construct_tensorflow_calling_lambda_on_concrete_arg(
top_level_ref, graph_tup, top_level_ref)
name_to_output_index = {top_level_ref.name: 0}
else:
output_comp = building_block_factory.create_compiled_empty_tuple()
name_to_output_index = {}
block_local_names = [x[0] for x in block.locals]
def _update_name_to_output_index(name_class):
"""Helper closing over `name_to_output_index` and `block_local_names`."""
offset = len(name_to_output_index.keys())
for idx, comp_name in enumerate(name_class):
for var_name in block_local_names:
if var_name == comp_name:
name_to_output_index[var_name] = idx + offset
if top_level_ref:
first_names = [x[0] for x in named_comp_classes[0]]
_update_name_to_output_index(first_names)
remaining_comp_classes = named_comp_classes[1:]
else:
remaining_comp_classes = named_comp_classes[:]
for named_comp_class in remaining_comp_classes:
if named_comp_class:
comp_class = [x[1] for x in named_comp_class]
name_class = [x[0] for x in named_comp_class]
arg_ref = _construct_reference_representing(output_comp)
output_comp = _construct_tensorflow_representing_single_local_assignment(
arg_ref, comp_class, output_comp, name_to_output_index)
_update_name_to_output_index(name_class)
arg_ref = _construct_reference_representing(output_comp)
result_replaced = _replace_references_in_comp_with_selections_from_arg(
block.result, arg_ref, name_to_output_index)
comp_called = construct_tensorflow_calling_lambda_on_concrete_arg(
arg_ref, result_replaced, output_comp)
return comp_called, True
def remove_duplicate_called_graphs(comp):
"""Deduplicates called graphs for a subset of TFF AST constructs.
Args:
comp: Instance of `building_blocks.ComputationBuildingBlock` whose called
graphs we wish to deduplicate, according to `tree_analysis.trees_equal`.
For `comp` to be eligible here, it must be either a lambda itself whose
body contains no lambdas or blocks, or another computation containing no
lambdas or blocks. This restriction is necessary because
`remove_duplicate_called_graphs` makes no effort to ensure that it is not
pulling references out of their defining scope, except for the case where
`comp` is a lambda itself. This function exits early and logs a warning if
this assumption is violated. Additionally, `comp` must contain only
computations which can be represented in TensorFlow, IE, satisfy the type
restriction in `type_analysis.is_tensorflow_compatible_type`.
Returns:
Either a called instance of `building_blocks.CompiledComputation` or a
`building_blocks.CompiledComputation` itself, depending on whether `comp`
is of non-functional or functional type respectively. Additionally, returns
a boolean to match the `transformation_utils.TransformSpec` pattern.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
tree_analysis.check_has_unique_names(comp)
name_generator = building_block_factory.unique_name_generator(comp)
if comp.is_lambda():
comp_to_check = comp.result
else:
comp_to_check = comp
if tree_analysis.contains_types(comp_to_check, (
building_blocks.Block,
building_blocks.Lambda,
)):
logging.warning(
'The preprocessors have failed to remove called lambdas '
'and blocks; falling back to less efficient, but '
'guaranteed, TensorFlow generation with computation %s.', comp)
return comp, False
leaf_called_graphs = []
def _pack_called_graphs_into_block(inner_comp):
"""Packs deduplicated bindings to called graphs in `leaf_called_graphs`."""
if inner_comp.is_call() and inner_comp.function.is_compiled_computation():
for (name, x) in leaf_called_graphs:
if tree_analysis.trees_equal(x, inner_comp):
return building_blocks.Reference(name,
inner_comp.type_signature), True
new_name = next(name_generator)
leaf_called_graphs.append((new_name, inner_comp))
return building_blocks.Reference(new_name,
inner_comp.type_signature), True
return inner_comp, False
if comp.is_lambda():
transformed_result, _ = transformation_utils.transform_postorder(
comp.result, _pack_called_graphs_into_block)
packed_into_block = building_blocks.Block(leaf_called_graphs,
transformed_result)
parsed, _ = create_tensorflow_representing_block(packed_into_block)
tff_func = building_blocks.Lambda(comp.parameter_name, comp.parameter_type,
parsed)
tf_parser_callable = tree_to_cc_transformations.TFParser()
comp, _ = tree_transformations.insert_called_tf_identity_at_leaves(tff_func)
tf_generated, _ = transformation_utils.transform_postorder(
comp, tf_parser_callable)
else:
transformed_result, _ = transformation_utils.transform_postorder(
comp, _pack_called_graphs_into_block)
packed_into_block = building_blocks.Block(leaf_called_graphs,
transformed_result)
tf_generated, _ = create_tensorflow_representing_block(packed_into_block)
return tf_generated, True
class RemoveDuplicatesAndApplyTransform(transformation_utils.TransformSpec):
"""Deduplicates before applying an interim transform, then repacks."""
def __init__(self, comp: building_blocks.ComputationBuildingBlock,
interim_transform_spec: transformation_utils.TransformSpec):
"""Constructs a new instance.
Args:
comp: Instance of `building_blocks.ComputationBuildingBlock` on which to
apply the transform.
interim_transform_spec: Instance of `transformation_utils.TransformSpec`
whose `transform` method must take a `building_blocks.Tuple` and return
a named tuple type, to be applied after deduplication.
Raises:
TypeError: If types do not match.
ValueError: If the `uri` has an unexpected value.
"""
super().__init__()
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(interim_transform_spec,
transformation_utils.TransformSpec)
self._name_generator = building_block_factory.unique_name_generator(comp)
self._interim_transform = interim_transform_spec
def should_transform(self, comp):
return self._interim_transform.should_transform(comp) and comp.is_struct()
def _construct_deduped_tuple_and_selection_map(self, comp):
deduped_tuple = []
selection_map = []
for called_intrinsic in comp:
index_in_deduped_tuple = None
for idx, previous_called_intrinsic in enumerate(deduped_tuple):
if tree_analysis.trees_equal(called_intrinsic,
previous_called_intrinsic):
index_in_deduped_tuple = idx
if index_in_deduped_tuple is None:
deduped_tuple.append(called_intrinsic)
index_in_deduped_tuple = len(deduped_tuple) - 1
selection_map.append(index_in_deduped_tuple)
return deduped_tuple, selection_map
def transform(self, comp):
if not self.should_transform(comp):
return comp, False
deduped_tuple, selection_map = self._construct_deduped_tuple_and_selection_map(
comp)
transform_applied, _ = self._interim_transform.transform(
building_blocks.Tuple(deduped_tuple))
transform_applied.type_signature.check_struct()
if len(comp) == len(deduped_tuple):
# Fall back if no optimization is made.
return transform_applied, True
lam_arg = building_blocks.Reference(
next(self._name_generator), transform_applied.type_signature)
replacement_tuple = []
for i in selection_map:
selected = building_blocks.Selection(lam_arg, index=i)
replacement_tuple.append(selected)
tup = building_blocks.Tuple(replacement_tuple)
lam = building_blocks.Lambda(lam_arg.name, lam_arg.type_signature, tup)
return building_blocks.Call(lam, transform_applied), True
def dedupe_and_merge_tuple_intrinsics(comp, uri):
r"""Merges tuples of called intrinsics into one called intrinsic."""
# TODO(b/147359721): The application of the function below is a workaround to
# a known pattern preventing TFF from deduplicating, effectively because tree
# equality won't determine that [a, a][0] and [a, a][1] are actually the same
# thing. A fuller fix is planned, but requires increasing the invariants
# respected further up the TFF compilation pipelines. That is, in order to
# reason about sufficiency of our ability to detect duplicates at this layer,
# we would very much prefer to be operating in the subset of TFF effectively
# representing local computation.
def _remove_selection_from_block_holding_tuple(comp):
"""Reduces selection from a block holding a tuple."""
if (comp.is_selection() and comp.source.is_block() and
comp.source.result.is_struct()):
if comp.index is None:
names = [
x[0]
for x in anonymous_tuple.iter_elements(comp.source.type_signature)
]
index = names.index(comp.name)
else:
index = comp.index
return building_blocks.Block(comp.source.locals,
comp.source.result[index]), True
return comp, False
comp, _ = transformation_utils.transform_postorder(
comp, _remove_selection_from_block_holding_tuple)
transform_spec = tree_transformations.MergeTupleIntrinsics(comp, uri)
dedupe_and_merger = RemoveDuplicatesAndApplyTransform(comp, transform_spec)
return transformation_utils.transform_postorder(comp,
dedupe_and_merger.transform)
def optimize_tensorflow_graphs(comp, grappler_config_proto):
"""Performs any static optimization on TensorFlow subcomputations."""
tf_optimizer = compiled_computation_transforms.TensorFlowOptimizer(
grappler_config_proto)
return transformation_utils.transform_postorder(comp, tf_optimizer.transform)
class TensorFlowGenerator(transformation_utils.TransformSpec):
"""TransformSpec which generates TensorFlow to represent local computation.
Any TFF computation which declares as its parameters and return values only
instances of `computation_types.SequenceType`,
`computation_types.NamedTupleType`, and `computation_types.TensorType`s, and
not capturing any references from an outer scope or containing any intrinsics,
can be represented by a TensorFlow computation. This TransformSpec identifies
computations such computations and generates a semantically equivalent
TensorFlow computation.
"""
def __init__(self):
self._naive_tf_parser = tree_to_cc_transformations.TFParser()
def transform(self, local_function):
if not self.should_transform(local_function):
return local_function, False
refs_removed, _ = remove_called_lambdas_and_blocks(local_function)
parsed_to_tf, _ = remove_duplicate_called_graphs(refs_removed)
if parsed_to_tf.is_compiled_computation() or (
parsed_to_tf.is_call() and
parsed_to_tf.function.is_compiled_computation()):
return parsed_to_tf, True
# TODO(b/146430051): We should only end up in this case if
# `remove_called_lambdas_and_blocks` above is in its failure mode, IE,
# failing to resolve references due to too-deep indirection; we should
# remove this extra case and simply raise if we fail here when we fix the
# attached bug.
called_graphs_inserted, _ = tree_transformations.insert_called_tf_identity_at_leaves(
parsed_to_tf)
compiled_comp, _ = transformation_utils.transform_postorder(
called_graphs_inserted, self._naive_tf_parser)
return compiled_comp, True
def should_transform(self, comp):
if not (type_analysis.is_tensorflow_compatible_type(comp.type_signature) or
(comp.type_signature.is_function() and
type_analysis.is_tensorflow_compatible_type(
comp.type_signature.parameter) and
type_analysis.is_tensorflow_compatible_type(
comp.type_signature.result))):
return False
elif comp.is_compiled_computation() or (
comp.is_call() and comp.function.is_compiled_computation()):
# These represent the final result of TF generation; no need to transform.
return False
unbound_refs = transformation_utils.get_map_of_unbound_references(
comp)[comp]
if unbound_refs:
# We cannot represent these captures without further information.
return False
if tree_analysis.contains_types(comp, building_blocks.Intrinsic):
return False
return True
def compile_local_computation_to_tensorflow(comp):
"""Compiles any fully specified local function to a TensorFlow computation."""
if comp.is_compiled_computation() or (
comp.is_call() and comp.function.is_compiled_computation()):
# These represent the final result of TF generation; no need to transform,
# so we short-circuit here.
return comp, False
local_tf_generator = TensorFlowGenerator()
transformed, modified = transformation_utils.transform_preorder(
comp, local_tf_generator.transform)
return transformed, modified
def transform_to_call_dominant(
comp: building_blocks.ComputationBuildingBlock
) -> transformation_utils.TransformReturnType:
"""Normalizes computations into Call-Dominant Form.
A computation is in call-dominant form if the following conditions are true:
1. Every intrinsic which will be invoked to execute the computation appears
as a top-level let binding (modulo an encapsulating global lambda).
2. Each of these intrinsics is depended upon by the output. This implies in
particular that any intrinsic which is not depended upon by the output is
removed.
3. All reference bindings have unique names.
In an intermediate step, this function invokes
`tree_transformations.resolve_higher_order_functions` in order to ensure that
the function member of every `building_blocks.Call` must be either: a
`building_blocks.CompiledComputation`; a `building_blocks.Intrinsic`;
a `building_blocks.Lambda` with non-functional return type; a reference to
a function bound as parameter to an uncalled `building_blocks.Lambda`;
or a (possibly nested) selection from a reference to the parameter of
an such an uncalled `building_blocks.Lambda`.
Note that if no lambda takes a functional parameter, the final case in
the enumeration above is additionally disallowed.
Args:
comp: Instance of `building_blocks.ComputationBuildingBlock` to transform.
Returns:
A two-tuple, whose first element is a building block representing the same
logic as `comp`, and whose second is a boolean indicating whether or not
any transformations were in fact run.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
def _check_calls_are_concrete(comp):
"""Encodes condition for completeness of direct extraction of calls.
After checking this condition, all functions which are semantically called
(IE, functions which will be invoked eventually by running the computation)
are called directly, and we can simply extract them by pattern-matching on
`building_blocks.Call`.
Args:
comp: Instance of `building_blocks.ComputationBuildingBlock` to check for
condition that functional argument of `Call` constructs contains only
the enumeration in the top-level docstring.
Raises:
ValueError: If `comp` fails this condition.
"""
symbol_tree = transformation_utils.SymbolTree(
transformation_utils.ReferenceCounter)
def _check_for_call_arguments(comp_to_check, symbol_tree):
if not comp_to_check.is_call():
return comp_to_check, False
functional_arg = comp_to_check.function
if functional_arg.is_compiled_computation(
) or functional_arg.is_intrinsic():
return comp_to_check, False
elif functional_arg.is_lambda():
if type_analysis.contains(functional_arg.type_signature.result,
lambda x: x.is_function()):
raise ValueError('Called higher-order functions are disallowed in '
'transforming to call-dominant form, as they may '
'break the reliance on pattern-matching to extract '
'called intrinsics. Encountered a call to the'
'lambda {l} with type signature {t}.'.format(
l=functional_arg,
t=functional_arg.type_signature))
return comp_to_check, False
elif functional_arg.is_reference():
# This case and the following handle the possibility that a lambda
# declares a functional parameter, and this parameter is invoked in its
# body.
payload = symbol_tree.get_payload_with_name(functional_arg.name)
if payload is None:
return comp, False
if payload.value is not None:
raise ValueError('Called references which are not bound to lambda '
'parameters are disallowed in transforming to '
'call-dominant form, as they may break the reliance '
'on pattern-matching to extract called intrinsics. '
'Encountered a call to the reference {r}, which is '
'bound to the value {v} in this computation.'.format(
r=functional_arg, v=payload.value))
elif functional_arg.is_selection():
concrete_source = functional_arg.source
while concrete_source.is_selection():
concrete_source = concrete_source.source
if concrete_source.is_reference():
payload = symbol_tree.get_payload_with_name(concrete_source.name)
if payload is None:
return comp, False
if payload.value is not None:
raise ValueError('Called selections from references which are not '
'bound to lambda parameters are disallowed in '
'transforming to call-dominant form, as they may '
'break the reliance on pattern-matching to '
'extract called intrinsics. Encountered a call to '
'the reference {r}, which is bound to the value '
'{v} in this computation.'.format(
r=functional_arg, v=payload.value))
return comp, False
else:
raise ValueError('Called selections are only permitted in '
'transforming to call-comiunant form the case that '
'they select from lambda parameters; encountered a '
'call to selection {s}.'.format(s=functional_arg))
else:
raise ValueError('During transformation to call-dominant form, we rely '
'on the assumption that all called functions are '
'either: compiled computations; intrinsics; lambdas '
'with nonfuntional return types; or selections from '
'lambda parameters. Encountered the called function '
'{f} of type {t}.'.format(
f=functional_arg, t=type(functional_arg)))
transformation_utils.transform_postorder_with_symbol_bindings(
comp, _check_for_call_arguments, symbol_tree)
def _inline_functions(comp):
function_type_reference_names = []
def _populate_function_type_ref_names(comp):
if comp.is_reference() and comp.type_signature.is_function():
function_type_reference_names.append(comp.name)
return comp, False
transformation_utils.transform_postorder(comp,
_populate_function_type_ref_names)
return tree_transformations.inline_block_locals(
comp, variable_names=set(function_type_reference_names))
def _extract_calls_and_blocks(comp):
def _predicate(comp):
return comp.is_call()
block_extracter = tree_transformations.ExtractComputation(comp, _predicate)
return transformation_utils.transform_postorder(comp,
block_extracter.transform)
def _resolve_calls_to_concrete_functions(comp):
"""Removes symbol bindings which contain functional types."""
comp, refs_renamed = tree_transformations.uniquify_reference_names(comp)
comp, fns_resolved = tree_transformations.resolve_higher_order_functions(
comp)
comp, called_lambdas_replaced = tree_transformations.replace_called_lambda_with_block(
comp)
comp, selections_inlined = tree_transformations.inline_selections_from_tuple(
comp)
if selections_inlined:
comp, _ = tree_transformations.uniquify_reference_names(comp)
comp, fns_inlined = _inline_functions(comp)
comp, locals_removed = tree_transformations.remove_unused_block_locals(comp)
modified = (
refs_renamed or fns_resolved or called_lambdas_replaced or
selections_inlined or fns_inlined or locals_removed)
return comp, modified
comp, modified = _resolve_calls_to_concrete_functions(comp)
_check_calls_are_concrete(comp)
for transform in [
_extract_calls_and_blocks,
# Extraction can leave some tuples packing references to clean up. Leaving
# would not violate CDF, but we prefer to do this for cleanliness.
tree_transformations.inline_selections_from_tuple,
tree_transformations.merge_chained_blocks,
tree_transformations.remove_duplicate_block_locals,
tree_transformations.remove_unused_block_locals,
tree_transformations.uniquify_reference_names,
]:
comp, transformed = transform(comp)
modified = modified or transformed
return comp, modified
| [
"tensorflow.copybara@gmail.com"
] | tensorflow.copybara@gmail.com |
919de8bda5555e026279ff964d9c8272e55f685d | 8da91c26d423bacbeee1163ac7e969904c7e4338 | /pyvisdk/do/performance_statistics_description.py | 0f598e45cec3f2d9bde0398e97bdd6052bb9f0e3 | [] | no_license | pexip/os-python-infi-pyvisdk | 5d8f3a3858cdd61fb76485574e74ae525cdc7e25 | 1aadea0afbc306d09f6ecb9af0e683dbbf961d20 | refs/heads/master | 2023-08-28T02:40:28.789786 | 2020-07-16T04:00:53 | 2020-07-16T04:00:53 | 10,032,240 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def PerformanceStatisticsDescription(vim, *args, **kwargs):
'''Data object to capture all information needed to describe a sample inventory.'''
obj = vim.client.factory.create('{urn:vim25}PerformanceStatisticsDescription')
# do some validation checking...
if (len(args) + len(kwargs)) < 0:
raise IndexError('Expected at least 1 arguments got: %d' % len(args))
required = [ ]
optional = [ 'intervals', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| [
"jmb@pexip.com"
] | jmb@pexip.com |
5774b8d69dcd3e00d299f03348bf1b4f69ab5b72 | 6a6d8c0c8ddd6f5a1c03788f35320dd4b82314ea | /yamtbx/command_line/resolution_shells.py | cf6f26cf672b843e46f52bd2e3242f51bb60570a | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause",
"MIT"
] | permissive | nsls-ii-mx/yamtbx | b817a131a8f6f515db99bc1743f81218997ac4ed | 311cf5a20e27a035a9e89c2abcb3c7d5e3684d67 | refs/heads/master | 2021-01-11T12:05:38.166937 | 2017-01-24T16:26:44 | 2017-01-24T16:26:44 | 76,574,177 | 1 | 0 | null | 2016-12-15T16:00:06 | 2016-12-15T16:00:06 | null | UTF-8 | Python | false | false | 1,555 | py | # LIBTBX_SET_DISPATCHER_NAME yamtbx.resolution_shells
def run(d_max, d_min, nbins, power, quiet=False):
step = ( d_min**(-power) - d_max**(-power) ) / float(nbins)
start = 1./(d_max**power)
d_vals = map(lambda x: (start + x * step)**(-1./power), xrange(nbins+1))
if not quiet:
print "%d resolution shells (%.3f - %.3f A) split by 1/d^%d" % (nbins, d_max, d_min, power)
print " ".join(map(lambda x: "%.3f"%x, d_vals))
print
print "For XSCALE,"
print " RESOLUTION_SHELLS= %s" % (" ".join(map(lambda x: "%.3f"%x, d_vals[1:])))
return d_vals
# run()
if __name__ == "__main__":
import sys
import optparse
parser = optparse.OptionParser(prog="yamtbx.resolution_shells",
description="Show resolution shells",
usage="usage: %prog [options] d_max d_min")
parser.add_option("-n","--nshells", action="store", dest="nbins", type=int, default=9,
help="Number of shells (default: 9)")
parser.add_option("-p","--power", action="store", dest="pow", type=int, default=2,
help="Split shells by 1/d^power. 2: xds style (default); 3: scalepack style")
opts, args = parser.parse_args(sys.argv[1:])
if len(args) != 2:
parser.print_help()
quit()
try:
d_max, d_min = map(float, args)
except:
parser.print_help()
quit()
if d_max < d_min: d_max, d_min = d_min, d_max
run(d_max, d_min, opts.nbins, opts.pow)
| [
"keitaroyam@users.noreply.github.com"
] | keitaroyam@users.noreply.github.com |
6dbd9a7835aecb05ef4225d9b2774b2348f87fd2 | 1b3c32f1de0b0fb88f181ae1e1f47f00fcea576f | /setup.py | 2e3c13130fccf12524868690d84853acdaa41aa7 | [
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unicode"
] | permissive | source-foundry/font-unicode | 0df33bda7774c926b1258e7003962a62ab2a1b4f | 74cc56f5674f41ee09f47f2c8f3dda0349a1ff73 | refs/heads/master | 2021-01-10T03:40:43.528760 | 2016-02-15T03:46:19 | 2016-02-15T03:46:19 | 43,691,845 | 8 | 2 | null | 2017-03-05T21:18:33 | 2015-10-05T15:08:31 | Python | UTF-8 | Python | false | false | 2,314 | py | import os
import re
from setuptools import setup, find_packages
def docs_read(fname):
return open(os.path.join(os.path.dirname(__file__), 'docs', fname)).read()
def version_read():
settings_file = open(os.path.join(os.path.dirname(__file__), 'lib', 'fontunicode', 'settings.py')).read()
major_regex = """major_version\s*?=\s*?["']{1}(\d+)["']{1}"""
minor_regex = """minor_version\s*?=\s*?["']{1}(\d+)["']{1}"""
patch_regex = """patch_version\s*?=\s*?["']{1}(\d+)["']{1}"""
major_match = re.search(major_regex, settings_file)
minor_match = re.search(minor_regex, settings_file)
patch_match = re.search(patch_regex, settings_file)
major_version = major_match.group(1)
minor_version = minor_match.group(1)
patch_version = patch_match.group(1)
if len(major_version) == 0:
major_version = 0
if len(minor_version) == 0:
minor_version = 0
if len(patch_version) == 0:
patch_version = 0
return major_version + "." + minor_version + "." + patch_version
setup(
name='font-unicode',
version=version_read(),
description='Command line Unicode character code point and character name search',
long_description=(docs_read('README.rst')),
url='https://github.com/source-foundry/font-unicode',
license='MIT license',
author='Christopher Simpkins',
author_email='chris@sourcefoundry.org',
platforms=['any'],
entry_points = {
'console_scripts': [
'font-unicode = fontunicode.app:main'
],
},
packages=find_packages("lib"),
package_dir={'': 'lib'},
install_requires=['commandlines'],
keywords='unicode, font, fonts, typeface, typefaces, type, type design, type development, character, code point, name, search',
include_package_data=True,
classifiers=[
'Intended Audience :: End Users/Desktop',
'Development Status :: 4 - Beta',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: Microsoft :: Windows'
],
) | [
"git.simpkins@gmail.com"
] | git.simpkins@gmail.com |
1269ca1daa7c5c39a3505ef9cf0ed5ba02d2c6ff | 09409b6910f7d58a28e46b90b111d5ff3d5442cc | /VSRTorch/Framework/Environment.py | ee43920616307bf06becbc22dbd67ab4f8913c6b | [
"MIT"
] | permissive | moyulization/VideoSuperResolution | 5600ae1cc0638226c3f5683b84e6731ba5e56f10 | dc8bf94aa65c1a4e92e6024ca77b402f5b252fcf | refs/heads/master | 2020-05-23T04:49:06.309103 | 2019-05-07T11:31:24 | 2019-05-07T11:31:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,557 | py | # Copyright (c): Wenyi Tang 2017-2019.
# Author: Wenyi Tang
# Email: wenyi.tang@intel.com
# Update Date: 2019/5/7 下午5:21
import logging
from pathlib import Path
import numpy as np
import torch
def _make_ckpt_name(name, step):
return '{}_ep{:04d}.pth'.format(name, step)
def _parse_ckpt_name(name):
if not name:
return 0
model_name, epochs = Path(name).stem.split('.')[0].split('_')
return int(epochs[2:])
class Env:
"""Pytorch model runtime Env-ironment.
Args:
model: a Model object (note it's NOT nn.Module), representing a container
of multiple nn.Module objects. See `VSRTorch.Models.Model` for details.
work_dir: a folder path, working directory of this environment.
log_level: logging verbosity level.
pre_train_model: (optional) a path to .pth file to restore the model.
Usage:
Use `with` syntax to enter the Env:
>>> with Env(...) as e: ...
"""
def __init__(self, model, work_dir, log_level='DEBUG', pre_train_model=None):
self._m = model
self._saved = Path(work_dir) / 'save'
self._logd = Path(work_dir) / 'log'
self._restored = False
self._logger = logging.getLogger("VSR")
self._logger.setLevel(log_level)
self._pth = Path(pre_train_model or '')
def _startup(self):
self._saved.mkdir(parents=True, exist_ok=True)
self._logd.mkdir(parents=True, exist_ok=True)
if not self._pth.exists() or not self._pth.is_file():
self._pth = None
if self._logger.isEnabledFor(logging.DEBUG):
hdl = logging.FileHandler(self._logd / 'training.txt')
self._logger.addHandler(hdl)
def _close(self):
pass
def __enter__(self):
"""Create session of tensorflow and build model graph"""
self._startup()
self.model.display()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Close session"""
self._close()
@property
def model(self):
return self._m
def _find_last_ckpt(self, pattern):
# restore the latest checkpoint in save dir
# sort as modification time
ckpt = sorted(self._saved.glob(pattern), key=lambda x: x.stat().st_mtime_ns)
return ckpt[-1].resolve() if ckpt else None
def _restore_model(self, epoch=None, pth=None, map_location=None):
last_epoch = 0
for key, model in self.model.modules.items():
if pth is None:
if epoch is None:
ckpt = f'*{key}*.pth'
else:
ckpt = _make_ckpt_name(key, epoch)
fp = self._find_last_ckpt(ckpt)
else:
fp = pth
if fp:
self._logger.info(f"Restoring params for {key} from {fp}.")
try:
last_epoch = max(_parse_ckpt_name(str(fp)), last_epoch)
except ValueError:
last_epoch = 0
try:
model.load_state_dict(torch.load(str(fp), map_location=map_location))
except RuntimeError:
self._logger.warning(f"Couldn't restore state for {key} from {fp}.")
if pth is None:
for key, opt in self.model.opts.items():
fp = self._saved / f'{key}.pth'
try:
opt.load_state_dict(torch.load(str(fp)))
except (ValueError, FileNotFoundError):
self._logger.warning(f"trying to restore state for optimizer {key}, "
"but failed.")
return last_epoch
def _save_model(self, step):
for key, model in self.model.modules.items():
fp = self._saved / _make_ckpt_name(key, step)
torch.save(model.state_dict(), str(fp))
for key, opt in self.model.opts.items():
fp = self._saved / f'{key}.pth'
torch.save(opt.state_dict(), str(fp))
def _restore(self, epoch=None, map_location=None):
# restore graph
if self._restored:
return self.last_epoch
self.last_epoch = self._restore_model(epoch, self._pth, map_location)
self._restored = True
return self.last_epoch
def set_seed(self, seed):
"""set a seed for RNG
Note: RNG in torch and numpy is different.
"""
np.random.seed(seed)
torch.manual_seed(seed)
def export(self, export_dir='.', version=1):
"""export ONNX model.
Args:
export_dir: path to save onnx files.
version: (optional) a child-folder to control output versions.
"""
export_path = Path(export_dir) / str(version)
while export_path.exists():
version += 1 # step ahead 1 version
export_path = Path(export_dir) / str(version)
export_path.mkdir(exist_ok=False, parents=True)
self.model.export(export_path)
self._logger.info(f"Export ONNX to {str(export_path)}")
| [
"twytwy12345@live.com"
] | twytwy12345@live.com |
4b7b32f5c4bdb6bfa3a50322f262394885ae6996 | 139af68b78734a6bc53bd942ffa05476baf3d71d | /Python Basic 2020/scholarship.py | 46bf7b78b9d1c70997de4b88288fd892fcd3cf19 | [] | no_license | MiroVatov/Python-SoftUni | 7fe3fc0a3928848c5317fb120f789c773bfc117e | 0d0d6f116281b4de8c413d254386e27d992d047b | refs/heads/main | 2023-08-24T09:44:31.261137 | 2021-10-18T14:04:03 | 2021-10-18T14:04:03 | 317,510,574 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 961 | py | import math
income = float(input())
average_grades = float(input())
min_salary = float(input())
social_scholarship = math.floor(0.35 * min_salary)
scholarship = math.floor(average_grades * 25)
if income < min_salary:
if 4.5 >= average_grades > 5.50:
print(f'You get a Social scholarship {social_scholarship} BGN')
elif average_grades < 4.5:
print('You cannot get a scholarship!')
if income > min_salary:
if average_grades <= 5.5:
print('You cannot get a scholarship!')
elif average_grades >= 5.5:
print(f'You get a scholarship for excellent results {scholarship} BGN')
if income < min_salary:
if average_grades >= 5.5:
if social_scholarship > scholarship:
print (f'You get a Social scholarship {social_scholarship} BGN')
elif scholarship > social_scholarship:
print(f'You get a scholarship for excellent results {scholarship} BGN')
| [
"noreply@github.com"
] | MiroVatov.noreply@github.com |
923b08f989cc5e5cf10261fb953a99ce009c5723 | 738b6d6ec4572f5848940b6adc58907a03bda6fb | /tests/pymcell4_positive/1910_get_molecule_ids_w_pattern/model.py | 4028ae43db75f8bc3d72eae558a1763e960ec613 | [
"Unlicense",
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | mcellteam/mcell_tests | 09cd1010a356e0e07c88d7e044a73c5606c6e51a | 34d2d967b75d56edbae999bf0090641850f4f4fe | refs/heads/master | 2021-12-24T02:36:24.987085 | 2021-09-24T14:19:41 | 2021-09-24T14:19:41 | 174,733,926 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,800 | py | #!/usr/bin/env python3
# WARNING: This is an automatically generated file and will be overwritten
# by CellBlender on the next model export.
import sys
import os
MODEL_PATH = os.path.dirname(os.path.abspath(__file__))
# ---- import mcell module located in directory ----
# ---- specified by system variable MCELL_PATH ----
MCELL_PATH = os.environ.get('MCELL_PATH', '')
if MCELL_PATH:
lib_path = os.path.join(MCELL_PATH, 'lib')
if os.path.exists(os.path.join(lib_path, 'mcell.so')) or \
os.path.exists(os.path.join(lib_path, 'mcell.pyd')):
sys.path.append(lib_path)
else:
print("Error: Python module mcell.so or mcell.pyd was not found in "
"directory '" + lib_path + "' constructed from system variable "
"MCELL_PATH.")
sys.exit(1)
else:
print("Error: system variable MCELL_PATH that is used to find the mcell "
"library was not set.")
sys.exit(1)
import mcell as m
import mcell as m
# ---- model parameters ----
# load parameters from BNGL
params = m.bngl_utils.load_bngl_parameters('model.bngl')
# ---- simulation setup ----
ITERATIONS = 1
TIME_STEP = 1e-06
DUMP = False
EXPORT_DATA_MODEL = True
SEED = 1
# create main model object
model = m.Model()
model.load_bngl('model.bngl')
# ---- configuration ----
model.config.time_step = TIME_STEP
model.config.seed = SEED
model.config.total_iterations = ITERATIONS
model.notifications.rxn_and_species_report = False
model.config.partition_dimension = 2
model.config.subpartition_dimension = 0.2
model.initialize()
model.run_iterations(1)
num_AR = params['num_AR']
num_AS = params['num_AS']
num_BT = params['num_BT']
num_BU = params['num_BU']
num_ASBT = params['num_ASBT']
num_ARBU = params['num_ARBU']
ids_A = model.get_molecule_ids(pattern = m.Complex('A'))
ids_B = model.get_molecule_ids(pattern = m.Complex('B'))
ids_AR = model.get_molecule_ids(pattern = m.Complex('A(a~R)'))
ids_AS = model.get_molecule_ids(pattern = m.Complex('A(a~S)'))
ids_BT = model.get_molecule_ids(pattern = m.Complex('B(b~T)'))
ids_BU = model.get_molecule_ids(pattern = m.Complex('B(b~U)'))
ids_ASBT = model.get_molecule_ids(pattern = m.Complex('A(a~S,b!1).B(b~T,a!1)'))
ids_ARBU = model.get_molecule_ids(pattern = m.Complex('A(a~R,b!1).B(b~U,a!1)'))
ids_AB = model.get_molecule_ids(pattern = m.Complex('A(b!1).B(a!1)'))
assert len(ids_A) == num_AR + num_AS + num_ASBT + num_ARBU
assert len(ids_B) == num_BT + num_BU + num_ASBT + num_ARBU
assert len(ids_AR) == num_AR + num_ARBU
assert len(ids_AS) == num_AS + num_ASBT
assert len(ids_BT) == num_BT + num_ASBT
assert len(ids_BU) == num_BU + num_ARBU
assert len(ids_ASBT) == num_ASBT
assert len(ids_ARBU) == num_ARBU
assert len(ids_AB) == num_ASBT + num_ARBU
model.end_simulation()
| [
"ahusar@salk.edu"
] | ahusar@salk.edu |
9c1ad872a0bf0ef2e608edf22975f93cf2a85ed0 | 91be8144fb27840eab01f75b249493a0db80d416 | /statistics/settings.py | c64577ea484a40cc0a4270de350a0c82af53406f | [] | no_license | xutaoding/statistics | dee28454c9ebb21aa5f88862dcd2c3c8013bc68d | f38b2e93c1dc239291ea70fc2fea14d23d0a14ef | refs/heads/master | 2016-09-12T17:03:44.965553 | 2016-05-11T07:58:10 | 2016-05-11T07:58:10 | 56,758,825 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,208 | py | """
Django settings for statistics project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's2+9_=21jl9vi0rjri&evcai!d)hzts+e^eu8jo1#6_q4ucs=z'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.news'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'statistics.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'statistics.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"xutao.ding@chinascopefinancial.com"
] | xutao.ding@chinascopefinancial.com |
f9b7c3a7b6915c1378baab9f04fb52f655f514bf | e7a804e5e68c4019262a5cb619ba80ef34614ae3 | /pybind/slxos/v16r_1_00b/interface/ethernet/port_profile_to_interface_associations/port_profile_port/__init__.py | b023102edcb4930cf7a9d1b5d50c8f0c970d5752 | [
"Apache-2.0"
] | permissive | shivharis/pybind | 787978726f7efa7e4662d32ebe0075f36f6ff2f4 | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | refs/heads/master | 2021-06-10T14:37:04.186120 | 2017-01-24T22:13:25 | 2017-01-24T22:13:25 | 70,860,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,136 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import port_to_port_profile_domain_association
import port_to_port_profile_associations
class port_profile_port(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/ethernet/port-profile-to-interface-associations/port-profile-port. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__port_to_port_profile_domain_association','__port_to_port_profile_associations',)
_yang_name = 'port-profile-port'
_rest_name = 'port-profile-port'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__port_to_port_profile_associations = YANGDynClass(base=YANGListType("port_to_port_profile_association",port_to_port_profile_associations.port_to_port_profile_associations, yang_name="port-to-port-profile-associations", rest_name="profile", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='port-to-port-profile-association', extensions={u'tailf-common': {u'info': u'Associate a list of port-profiles with an interface.', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'120', u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'hidden': u'full', u'callpoint': u'port-to-port-profile-associations-callpoint'}}), is_container='list', yang_name="port-to-port-profile-associations", rest_name="profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Associate a list of port-profiles with an interface.', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'120', u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'hidden': u'full', u'callpoint': u'port-to-port-profile-associations-callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='list', is_config=True)
self.__port_to_port_profile_domain_association = YANGDynClass(base=port_to_port_profile_domain_association.port_to_port_profile_domain_association, is_container='container', presence=False, yang_name="port-to-port-profile-domain-association", rest_name="domain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'port-to-port-profile-domain-association-callpoint', u'info': u'Associate a port-profile-domain with an interface.', u'alt-name': u'domain', u'sort-priority': u'120'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'ethernet', u'port-profile-to-interface-associations', u'port-profile-port']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Ethernet', u'port-profile-port']
def _get_port_to_port_profile_domain_association(self):
"""
Getter method for port_to_port_profile_domain_association, mapped from YANG variable /interface/ethernet/port_profile_to_interface_associations/port_profile_port/port_to_port_profile_domain_association (container)
YANG Description: Associate a port-profile-domain with an interface.
"""
return self.__port_to_port_profile_domain_association
def _set_port_to_port_profile_domain_association(self, v, load=False):
"""
Setter method for port_to_port_profile_domain_association, mapped from YANG variable /interface/ethernet/port_profile_to_interface_associations/port_profile_port/port_to_port_profile_domain_association (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_to_port_profile_domain_association is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_to_port_profile_domain_association() directly.
YANG Description: Associate a port-profile-domain with an interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=port_to_port_profile_domain_association.port_to_port_profile_domain_association, is_container='container', presence=False, yang_name="port-to-port-profile-domain-association", rest_name="domain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'port-to-port-profile-domain-association-callpoint', u'info': u'Associate a port-profile-domain with an interface.', u'alt-name': u'domain', u'sort-priority': u'120'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port_to_port_profile_domain_association must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=port_to_port_profile_domain_association.port_to_port_profile_domain_association, is_container='container', presence=False, yang_name="port-to-port-profile-domain-association", rest_name="domain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'port-to-port-profile-domain-association-callpoint', u'info': u'Associate a port-profile-domain with an interface.', u'alt-name': u'domain', u'sort-priority': u'120'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='container', is_config=True)""",
})
self.__port_to_port_profile_domain_association = t
if hasattr(self, '_set'):
self._set()
def _unset_port_to_port_profile_domain_association(self):
self.__port_to_port_profile_domain_association = YANGDynClass(base=port_to_port_profile_domain_association.port_to_port_profile_domain_association, is_container='container', presence=False, yang_name="port-to-port-profile-domain-association", rest_name="domain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'port-to-port-profile-domain-association-callpoint', u'info': u'Associate a port-profile-domain with an interface.', u'alt-name': u'domain', u'sort-priority': u'120'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='container', is_config=True)
def _get_port_to_port_profile_associations(self):
"""
Getter method for port_to_port_profile_associations, mapped from YANG variable /interface/ethernet/port_profile_to_interface_associations/port_profile_port/port_to_port_profile_associations (list)
YANG Description: The list of automatic port profiles. Each row
provides the name of the port profile associated
with an interface.
"""
return self.__port_to_port_profile_associations
def _set_port_to_port_profile_associations(self, v, load=False):
"""
Setter method for port_to_port_profile_associations, mapped from YANG variable /interface/ethernet/port_profile_to_interface_associations/port_profile_port/port_to_port_profile_associations (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_to_port_profile_associations is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_to_port_profile_associations() directly.
YANG Description: The list of automatic port profiles. Each row
provides the name of the port profile associated
with an interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("port_to_port_profile_association",port_to_port_profile_associations.port_to_port_profile_associations, yang_name="port-to-port-profile-associations", rest_name="profile", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='port-to-port-profile-association', extensions={u'tailf-common': {u'info': u'Associate a list of port-profiles with an interface.', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'120', u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'hidden': u'full', u'callpoint': u'port-to-port-profile-associations-callpoint'}}), is_container='list', yang_name="port-to-port-profile-associations", rest_name="profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Associate a list of port-profiles with an interface.', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'120', u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'hidden': u'full', u'callpoint': u'port-to-port-profile-associations-callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port_to_port_profile_associations must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("port_to_port_profile_association",port_to_port_profile_associations.port_to_port_profile_associations, yang_name="port-to-port-profile-associations", rest_name="profile", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='port-to-port-profile-association', extensions={u'tailf-common': {u'info': u'Associate a list of port-profiles with an interface.', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'120', u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'hidden': u'full', u'callpoint': u'port-to-port-profile-associations-callpoint'}}), is_container='list', yang_name="port-to-port-profile-associations", rest_name="profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Associate a list of port-profiles with an interface.', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'120', u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'hidden': u'full', u'callpoint': u'port-to-port-profile-associations-callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='list', is_config=True)""",
})
self.__port_to_port_profile_associations = t
if hasattr(self, '_set'):
self._set()
def _unset_port_to_port_profile_associations(self):
self.__port_to_port_profile_associations = YANGDynClass(base=YANGListType("port_to_port_profile_association",port_to_port_profile_associations.port_to_port_profile_associations, yang_name="port-to-port-profile-associations", rest_name="profile", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='port-to-port-profile-association', extensions={u'tailf-common': {u'info': u'Associate a list of port-profiles with an interface.', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'120', u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'hidden': u'full', u'callpoint': u'port-to-port-profile-associations-callpoint'}}), is_container='list', yang_name="port-to-port-profile-associations", rest_name="profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Associate a list of port-profiles with an interface.', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'120', u'cli-suppress-list-no': None, u'alt-name': u'profile', u'cli-full-command': None, u'hidden': u'full', u'callpoint': u'port-to-port-profile-associations-callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-port-profile', defining_module='brocade-port-profile', yang_type='list', is_config=True)
port_to_port_profile_domain_association = __builtin__.property(_get_port_to_port_profile_domain_association, _set_port_to_port_profile_domain_association)
port_to_port_profile_associations = __builtin__.property(_get_port_to_port_profile_associations, _set_port_to_port_profile_associations)
_pyangbind_elements = {'port_to_port_profile_domain_association': port_to_port_profile_domain_association, 'port_to_port_profile_associations': port_to_port_profile_associations, }
| [
"badaniya@brocade.com"
] | badaniya@brocade.com |
ae8e9c862609271c8b66204f40532e2e21281027 | b7b2f80ab5e1ee0ea028576e3014b62b8d3a8d7e | /pyfile/pyfm-011/pyfmlight.py | 7ddadbd3b74d728a4c752e8270718342b39d2480 | [] | no_license | pglen/pgpygtk | 4d1405478a714f003984cf3e3db04ff1f767470b | 33f58010e304f1a312f2356de453ecedb7aa21ef | refs/heads/master | 2021-01-22T01:18:52.238415 | 2019-01-01T01:37:24 | 2019-01-01T01:37:24 | 102,215,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,603 | py | #!/usr/bin/env python
# 3D File Manager in Python OpenGL, light helper routines
#
#import math, sys, rand
#import gtk.gtkgl
from OpenGL.GL import *
from OpenGL.GLU import *
def light(self):
# Lighting properties.
#light_ambient = [0.0, 0.0, 0.0, 1.0]
#light_ambient = [1.0, 1.0, 1.0, 1.0]
light_ambient = [0.5, 0.5, 0.5, 1.0]
#light_diffuse = [0.0, 0.0, 0.0, 1.0]
light_diffuse = [0.5, 0.5, 0.5, 1.0]
#light_diffuse = [1.0, 1.0, 1.0, 1.0]
light_specular = [.5, .5, .5, 1.0]
#light_specular = [1.0, 1.0, 1.0, 1.0]
#light_specular = [.2, .2, .2, 1.0]
#light_position = [1.0, 1.0, 1.0, 1.0]
#light_position = [0.0, 5.0, 5.0, 0.0]
light_position = [0.0, 0.0, 1.0, 0.0]
#light_position = [5.0, 5.0, 5.0, 0.0]
#light_model_ambient = [0.2, 0.2, 0.2, 1.0]
light_model_ambient = [0.5, 0.5, 0.5, 1.0]
#light_model_ambient = [0.9, 0.9, 0.9, 1.0]
light_local_view = 0.0
#pos = (5.0, 5.0, 5.0, 0.0)
# Initialise the lighting properties.
glLightfv (GL_LIGHT0, GL_AMBIENT, light_ambient)
glLightfv (GL_LIGHT0, GL_DIFFUSE, light_diffuse)
glLightfv (GL_LIGHT0, GL_SPECULAR, light_specular)
glLightfv (GL_LIGHT0, GL_POSITION, light_position)
#glLightModelfv (GL_LIGHT_MODEL_AMBIENT, light_model_ambient)
#glLightModelf (GL_LIGHT_MODEL_LOCAL_VIEWER, light_local_view)
glEnable (GL_LIGHTING)
glEnable (GL_LIGHT0)
glEnable (GL_DEPTH_TEST)
glClearColor(.0, .0, .0, 1.0)
#glClearColor(.5, .5, .5, 1.0)
#glClearColor(1.0, 1.0, 1.0, 1.0)
glClearDepth(1.0)
| [
"peterglen99@gmail.com"
] | peterglen99@gmail.com |
a35fdea02255d765351560a273e9c3223a934f95 | 4293c8d2e0e8eb7d21e2706ecfdbbe6d80244f5d | /pfurl/tests/test_message.py | 2758baaaaefb60743929b3a867b21ba13d954cc3 | [
"MIT"
] | permissive | FNNDSC/pfurl | 52352a4c9085ee620e509bd5e0b20c82913e52ad | c37e57b5dc03a81a15e566f2d325a7dd1047ac10 | refs/heads/master | 2021-11-22T22:25:54.776531 | 2021-10-28T17:01:27 | 2021-10-28T17:01:27 | 87,982,205 | 1 | 10 | MIT | 2021-01-18T07:40:41 | 2017-04-11T21:45:59 | Python | UTF-8 | Python | false | false | 2,860 | py | from unittest import TestCase
from pfurl import Message
from pfurl import Colors
class TestMessage(TestCase):
def test_message_constructor(self):
message1 = Message()
message2 = Message()
message1.syslog(True)
message1(Colors.RED + Colors.WHITE_BCKGRND + 'hello world!\n' + Colors.NO_COLOUR)
# Send message via datagram to 'pangea' on port '1701'.
# message1.to('pangea:1701')
# message1('hello, pangea!\n');
# message1('this has been sent over a datagram socket...\n')
# Now for some column width specs and 'debug' type messages
# These will all display on the console since debug=5 and the
# message1.verbosity(10) means that all debug tagged messages with
# level less-than-or-equal-to 10 will be passed.
message1.to('stdout')
message1.verbosity(10)
message1('starting process 1...', lw=90, debug=5)
message1('[ ok ]\n', rw=20, syslog=False, debug=5)
message1('parsing process 1 outputs...', lw=90, debug=5)
message1('[ ok ]\n', rw=20, syslog=False, debug=5)
message1('preparing final report...', lw=90, debug=5)
message1('[ ok ]\n', rw=20, syslog=False, debug=5)
message2.to('/tmp/message2.log')
message2.tee(True)
# A verbosity level of message2.verbosity(1) and a
# message2.to(sys.stdout) will not output any of the
# following since the debug level for each message
# is set to '5'. The verbosity should be at least
# message2.verbosity(5) for output to appear on the
# console.
#
# If message2.tee(True) and message2.to('/tmp/message2.log')
# then all messages will be displayed regardless
# of the internal verbosity level.
message2.verbosity(1)
message2('starting process 1...', lw=90, debug=5)
message2('[ ok ]\n', rw=20, syslog=False, debug=5)
message2('parsing process 1 outputs...', lw=90, debug=5)
message2('[ ok ]\n', rw=20, syslog=False, debug=5)
message2('preparing final report...', lw=90, debug=5)
message2('[ ok ]\n', rw=20, syslog=False, debug=5)
message1.to('/tmp/test.log')
message1('and now to /tmp/test.log\n')
message2.to(open('/tmp/test2.log', 'a'))
message2('another message to /tmp/test2.log\n')
message2.tagstring('MARK-->')
message2('this text is tagged\n')
message2('and so is this text\n')
message1.clear()
message1.append('this is message ')
message1.append('that is constructed over several ')
message1.append('function calls...\n')
message1.to('stdout')
message1()
message2.tag(False)
message2('goodbye!\n')
# didn't crash
self.assertTrue(True)
| [
"rudolph.pienaar@gmail.com"
] | rudolph.pienaar@gmail.com |
5b607d6b9cee4ca29ffb02c954d4974d9d659227 | eb297ff1e0011438fd184cc338b3fb86859b81c9 | /Chapter 2/2-09.py | f6650c9ed58648f9a33f971fed80805763e78249 | [] | no_license | mynameisbenzo/PythonCrashCourse | c73a4505d9cdfe4df78e3ed01adb3491debf8a9b | 831a9962a3c6cab53ecfdb1d2cceb0dd2d9c5a0a | refs/heads/master | 2021-04-12T08:13:51.772957 | 2018-05-02T05:54:57 | 2018-05-02T05:54:57 | 126,091,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | # adding whitespace is fine, but what about taking whitespace out?
favLanguage = 'python '
# whether you see it or not, that space at the end of python is currently being printed
print(favLanguage)
# let's strip that space with rstrip()
favLanguage = favLanguage.rstrip()
# now instead of printing 'python ', we are currently printing 'python'
print(favLanguage)
# using lstrip()
favLanguage = ' python'
favLanguage = favLanguage.lstrip()
# will print 'python' not ' python'
print(favLanguage)
# using strip()
favLanguage = ' python'
favLanguage = favLanguage.strip()
# will print 'python' not ' python '
print(favLanguage) | [
"lorhernandez@csumb.edu"
] | lorhernandez@csumb.edu |
627d5af8cce30f6aae3454e7506d35ace02be8cd | a1abb7681dffe3e9f0b651ed2769c7decc31300e | /src/room.py | c287a8aab8badc4360cd94615f47fa31972f85c3 | [
"MIT"
] | permissive | pyrobit/UnderwaterBattles | 65010faba9e13bbae8a1032da9ee76b767e24981 | 01519291401600bad6dfbbfaecfacbce2219f7c8 | refs/heads/main | 2023-04-23T05:52:20.125325 | 2021-04-30T16:41:17 | 2021-04-30T16:41:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,622 | py | from math import pi, hypot
import pygame as pg
from random import uniform
from objects.bubble import Bubble
from objects.body import Body
from objects.mobs import Mother
from objects.mob_guns import GunBossLeg
from gui.text_box import TextBox
from special_effects import add_effect
from data.config import *
from data.colors import WHITE
from data.paths import FONT_1
from data.mobs import BOSS_SKELETON_BODY
from data.player import BG_RADIUS_00
from room_generator import BOSS_PIECES
class Room:
"""Class that stores, updates and draws all objects in the room, such as:
- mobs;
- bullets;
- homing bullets;
- bubbles;
- special effects;
- room hint text;
- Boss Skeleton body
It also stores some parameters of the room such as:
- radius of player's gravitational field, in which bubbles are attracted to the player;
- screen offset according to player's position (mobs should be drawn only if they are on screen)
- boss disposition state ('in current room', 'in neighbour room', 'far away')
which is used to determine should we draw boss skeleton or not.
Normally mobs in a neighbour room aren't being drawn, but Boss Skeleton is too large,
so it has to be drawn even if the Boss is in the neighbour room;
"""
mobs = []
bullets = []
# Homing bullets are different from regular bullets. They have their own health
# and can be knocked down by the player's bullets. Therefore, they are kept in a separate list.
homing_bullets = []
bubbles = []
bottom_effects = [] # bottom effects are being drawn before all room objects are drawn
top_effects = [] # top effects are being drawn after all room objects are drawn
hint_text = None
boss_skeleton = Body(BOSS_SKELETON_BODY)
# This is a list that temporarily stores mobs for the next room while the player is being transported.
# It is needed in order to separately draw and update mobs in the previous room and mobs
# in the next room during the player's transportation. After the end of transportation,
# the main list of mobs is replaced with a temporary list, and the temporary list is cleared.
new_mobs = []
# Additional parameters of the room
gravity_radius = BG_RADIUS_00
screen_rect = pg.Rect(0, 0, SCR_W, SCR_H)
boss_state = BOSS_IN_NEIGHBOUR_ROOM
def __init__(self):
self.set_hint_text('')
self.boss_skeleton.update(SCR_W2, SCR_H2, 0, (0, 0), 0.5 * pi)
@property
def boss_defeated(self):
return self.boss_state == BOSS_IN_CURRENT_ROOM and not self.mobs
def reset(self):
""" Method is called when a new game is started.
Resets all room data.
"""
self.bubbles = []
self.bullets = []
self.homing_bullets = []
self.top_effects = []
self.bottom_effects = []
self.mobs = []
self.new_mobs = []
self.boss_state = BOSS_IS_FAR_AWAY
def set_params_after_transportation(self):
"""Clears all lists of objects in room and replaces the list
of mobs to list of new mobs for this room.
After that the temporary list of new mobs is cleared.
"""
self.bubbles = []
self.bullets = []
self.homing_bullets = []
self.top_effects = []
self.bottom_effects = []
self.mobs = self.new_mobs.copy()
self.new_mobs = []
def update_boss_state(self):
"""Updates the boss disposition state due to
transportation of the player to the next room.
"""
if self.boss_state == BOSS_IN_CURRENT_ROOM:
self.boss_state = BOSS_IN_NEIGHBOUR_ROOM
elif (self.boss_state == BOSS_IN_NEIGHBOUR_ROOM and
any(mob.name in BOSS_PIECES for mob in self.new_mobs)):
self.boss_state = BOSS_IN_CURRENT_ROOM
else:
self.boss_state = BOSS_IS_FAR_AWAY
def set_hint_text(self, text):
"""
:param text: list of strings
sets background room hint text, explaining the rules of the game
"""
self.hint_text = TextBox(text, FONT_1, int(47 * SCR_H/600), True,
WHITE, (2/3 * SCR_H, 11/60 * SCR_H))
def update_bullets(self, dt):
for bullet in self.bullets:
bullet.update(dt)
# filter out bullets that hit the target or are outside the room and
# make sure there are not more than 100 bullets (for performance reasons)
self.bullets = list(filter(lambda b: not b.is_outside and
not b.hit_the_target,
self.bullets))[:100]
def update_homing_bullets(self, player_x, player_y, dt):
for bullet in self.homing_bullets:
bullet.update(dt, player_x, player_y)
# filter out homing bullets that hit the target or were shot down
self.homing_bullets = list(filter(lambda b: b.health > 0 and
not b.hit_the_target,
self.homing_bullets))
def update_bubbles(self, x, y, dt):
for bubble in self.bubbles:
bubble.update(x, y, dt)
# filter out bubbles that are outside the room
self.bubbles = list(filter(lambda b: not b.is_outside, self.bubbles))
def update_effects(self, dt):
for effect in self.top_effects:
effect.update(dt)
for effect in self.bottom_effects:
effect.update(dt)
# filter out effects that are not longer running
self.top_effects = list(filter(lambda e: e.running, self.top_effects))
self.bottom_effects = list(filter(lambda e: e.running, self.bottom_effects))
def handle_bullet_explosion(self, bul_x, bul_y):
"""
Changes mobs' states according to their positions relative
to the explosion, and adds some special effects.
"""
for mob in self.mobs:
if hypot(bul_x - mob.pos[0], bul_y - mob.pos[1]) <= 500:
mob.health -= 25
mob.update_body_look()
add_effect('BigHitLines', self.top_effects, *mob.pos)
add_effect('PowerfulExplosion', self.bottom_effects, bul_x, bul_y)
add_effect('Flash', self.top_effects)
def move_objects(self, offset):
"""
Method is called when the player is being transported
to the next room. The objects of previous room become
moved by the given offset to be drawn properly during
player's transportation
"""
for bubble in self.bubbles:
bubble.move(*offset)
for mob in self.mobs:
mob.move(*offset)
for bullet in self.bullets:
bullet.move(*offset)
for bullet in self.homing_bullets:
bullet.move(*offset)
self.boss_skeleton.update(SCR_W2, SCR_H2, 0, (0, 0), 0.5 * pi)
if self.boss_state == BOSS_IN_NEIGHBOUR_ROOM:
self.boss_skeleton.move(*offset)
def set_gravity_radius(self, gravity_radius):
""" Sets the new radius of player's gravitational field. """
if self.mobs:
self.gravity_radius = gravity_radius
for bubble in self.bubbles:
bubble.gravity_r = gravity_radius
def maximize_gravity(self):
"""
Method is called when all mobs in the room are killed.
The radius of player's gravitational field is set equal to
the diameter of room, so that every bubble starts
gravitating to player regardless of his position in the room.
Also speeds of bubbles are maximized to reach player faster.
"""
for bubble in self.bubbles:
bubble.gravity_r = 2 * ROOM_RADIUS
bubble.maximize_vel()
def update_mobs(self, target, dt):
"""Updates mobs in the room. All mobs receive a list of bullets
as input in order to add new generated bullets to it.
Some mobs are capable of creating new mobs.
New mobs generated by them are put into the list of generated mobs
Then the main list of mobs is extended with this list of generated mobs.
"""
generated_mobs = []
for mob in self.mobs:
if isinstance(mob.gun, GunBossLeg):
mob.update(target, self.homing_bullets, self.screen_rect, dt)
else:
mob.update(target, self.bullets, self.screen_rect, dt)
if isinstance(mob, Mother):
generated_mobs.extend(mob.generate_mob(dt))
self.mobs.extend(generated_mobs)
# filter out the mobs that are killed by player
for mob in self.mobs:
if mob.health <= 0:
self.add_bubbles(mob.pos, mob.bubbles)
self.mobs = list(filter(lambda m: m.health > 0, self.mobs))
def update_new_mobs(self, player_x, player_y, dt):
"""
Method updates positions and bodies of all mobs of the room,
player is being transported to.
"""
target = (player_x, player_y)
for mob in self.new_mobs:
mob.update_pos(dt)
mob.gamma = mob.count_gamma()
mob.update_body(self.screen_rect, dt, target)
def set_screen_rect(self, pos):
"""Sets the center of room screen-rectangle equal to the player's new pos. """
self.screen_rect.center = pos
def update(self, player_pos, dt):
"""Updates all objects in the room and room parameters. """
self.set_screen_rect(player_pos)
self.update_mobs(player_pos, dt)
self.update_bubbles(*player_pos, dt)
self.update_bullets(dt)
self.update_homing_bullets(*player_pos, dt)
self.update_effects(dt)
if not self.mobs:
self.maximize_gravity()
def add_bubbles(self, mob_pos, mob_bubbles):
"""Method is called when a mob is killed.
Adds mob's bubbles to the list of bubbles.
"""
for name, n in mob_bubbles.items():
for i in range(n):
bubble = Bubble(*mob_pos, uniform(0, 2 * pi),
self.gravity_radius, name)
self.bubbles.append(bubble)
def draw_hint_text(self, surface, dx, dy):
self.hint_text.draw(surface, dx, dy)
def draw_bubbles(self, surface, dx, dy):
for bubble in self.bubbles:
bubble.draw(surface, dx, dy)
def draw_mobs(self, surface, dx, dy):
for mob in self.mobs:
mob.draw(surface, dx, dy, self.screen_rect)
def draw_new_mobs(self, surface, dx, dy):
for mob in self.new_mobs:
mob.draw(surface, dx, dy, self.screen_rect)
def draw_boss_skeleton(self, surface, dx, dy):
if self.boss_state != BOSS_IS_FAR_AWAY:
self.boss_skeleton.draw(surface, dx, dy)
def draw_bombs(self, surface, dx, dy):
for bullet in self.bullets:
if bullet.vel == 0:
bullet.draw(surface, dx, dy)
def draw_bullets(self, surface, dx, dy):
for bullet in self.bullets:
if bullet.vel != 0:
bullet.draw(surface, dx, dy)
for bullet in self.homing_bullets:
bullet.draw(surface, dx, dy)
def draw_top_effects(self, surface, dx, dy):
for effect in self.top_effects:
effect.draw(surface, dx, dy)
def draw_bottom_effects(self, surface, dx, dy):
for effect in self.bottom_effects:
effect.draw(surface, dx, dy) | [
"ildar.239@mail.ru"
] | ildar.239@mail.ru |
358135f3c10986d829cb49c921930c45ce321063 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/atomic/negativeInteger/Schema+Instance/NISTXML-SV-IV-atomic-negativeInteger-minExclusive-1-1.py | b2bf10a8b4a038f05f850d22b1b89862949d679d | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 333 | py | from output.models.nist_data.atomic.negative_integer.schema_instance.nistschema_sv_iv_atomic_negative_integer_min_exclusive_1_xsd.nistschema_sv_iv_atomic_negative_integer_min_exclusive_1 import NistschemaSvIvAtomicNegativeIntegerMinExclusive1
obj = NistschemaSvIvAtomicNegativeIntegerMinExclusive1(
value=-999999999999999998
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
7d7bef5c7b17b9b279306a74373faadcaf8172a8 | e20a786fb69b361e1ddfa509df63713371fa1eae | /examples/random_bot_example.py | eb37b882de7d027d8f75eefe133e8e5fa9a3bbe4 | [
"Apache-2.0"
] | permissive | greentec/ffai | d7fec6192c75f996c77f714cef7d76e06d44b0af | 3a966a395e5d48c94377cf8dd367897f205d3f9b | refs/heads/master | 2020-06-19T10:34:14.722778 | 2019-06-24T14:15:20 | 2019-06-24T14:15:20 | 196,679,040 | 1 | 0 | Apache-2.0 | 2019-07-13T04:42:58 | 2019-07-13T04:42:57 | null | UTF-8 | Python | false | false | 1,980 | py | #!/usr/bin/env python3
from ffai.core.game import *
from ffai.core.model import *
from ffai.ai.registry import register_bot, make_bot
import numpy as np
class MyRandomBot(Agent):
def __init__(self, name, seed=None):
super().__init__(name)
self.my_team = None
self.rnd = np.random.RandomState(seed)
def new_game(self, game, team):
self.my_team = team
def act(self, game):
# Select a random action type
while True:
action_choice = self.rnd.choice(game.state.available_actions)
# Ignore PLACE_PLAYER actions
if action_choice.action_type != ActionType.PLACE_PLAYER:
break
# Select a random position and/or player
pos = self.rnd.choice(action_choice.positions) if len(action_choice.positions) > 0 else None
player = self.rnd.choice(action_choice.players) if len(action_choice.players) > 0 else None
# Make action object
action = Action(action_choice.action_type, pos=pos, player=player)
# Return action to the framework
return action
def end_game(self, game):
pass
# Register the bot to the framework
register_bot('my-random-bot', MyRandomBot)
if __name__ == "__main__":
# Load configurations, rules, arena and teams
config = get_config("ff-11-bot-bowl-i.json")
ruleset = get_rule_set(config.ruleset)
arena = get_arena(config.arena)
home = get_team_by_id("human-1", ruleset)
away = get_team_by_id("human-2", ruleset)
config.competition_mode = False
config.debug_mode = False
# Play 10 games
game_times = []
for i in range(10):
away_agent = make_bot("my-random-bot")
home_agent = make_bot("my-random-bot")
game = Game(i, home, away, home_agent, away_agent, config, arena=arena, ruleset=ruleset)
game.config.fast_mode = True
print("Starting game", (i+1))
game.init()
print("Game is over")
| [
"njustesen@gmail.com"
] | njustesen@gmail.com |
69aceb2215e2178aacdb6e054f0e95c840410ad3 | 19333aed645a4721108a67ba4d90292d2383314e | /52totalNQueens.py | 0168c944b93eb3ec6b25f8e5e9373388f24a80ec | [] | no_license | qinzhouhit/leetcode | f39f8720a19259310293c1eb2975ce459f7cf702 | 502e121cc25fcd81afe3d029145aeee56db794f0 | refs/heads/master | 2023-06-24T09:18:25.715450 | 2021-07-21T07:21:42 | 2021-07-21T07:21:42 | 189,679,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | '''
keys:
Solutions:
Similar:
T:
S:
'''
from typing import List
class Solution:
def totalNQueens(self, n: int) -> int:
def could_place(r, c):
# all of the three can not be 1
return not (cols[c] + anti_diagonal[r-c] + diagonal[r+c])
def place_queen(r, c):
cols[c] = 1 # exclude the c, don't need to check for the row
# since we are at this row
anti_diagonal[r - c] = 1
diagonal[r + c] = 1
def remove_queen(r, c):
cols[c] = 0 # exclude the r and c
anti_diagonal[r - c] = 0
diagonal[r + c] = 0
def backtrack(r = 0, ct = 0):
for c in range(n):
if could_place(r, c):
place_queen(r, c)
if r + 1 == n: # reaching last row
ct += 1
else:
backtrack(r + 1, ct)
remove_queen(r, c)
return ct
cols = [0] * n
diagonal = [0] * (2*n-1)
anti_diagonal = [0] * (2*n-1)
return backtrack(0, 0) | [
"qinzhouhit@gmail.com"
] | qinzhouhit@gmail.com |
aaf95bc377b4f6ac5571ff8ba34a024808a717cf | 786de89be635eb21295070a6a3452f3a7fe6712c | /psana_test/tags/V00-08-19/src/unitTests.py | 5fbaf4dccb077c9ee876b5b02f4684485a8f535a | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,300 | py | #--------------------------------------------------------------------------
# Description:
# Test script for psana_test
#
#------------------------------------------------------------------------
#--------------------------------
# Imports of standard modules --
#--------------------------------
import sys
import os
import tempfile
import unittest
import subprocess as sb
import psana
import pickle
import shutil
import numpy as np
from psana_test import epicsPvToStr
import psana_test.psanaTestLib as ptl
import psana_test.liveModeSimLib as liveModeLib
from AppUtils.AppDataPath import AppDataPath
import multiprocessing
DATADIR = ptl.getTestDataDir()
OUTDIR = "data/psana_test"
#------------------
# Utility functions
#------------------
def getLinesBeforeAndAfterPos(string,pos,linesBefore,linesAfter):
'''returns the positions in the string for the newline
characters that are linesBefore earlier in string from pos, and
linesAfter past pos in string
'''
linesBefore = min(1,linesBefore)
linesAfter = min(1,linesAfter)
startPos = pos
for idx in range(linesBefore):
startPos = string.rfind('\n',0,startPos)
if startPos == -1:
startPos = 0
break
endPos = pos
for idx in range(linesAfter):
endPos = string.find('\n',endPos)
if endPos == -1:
endPos = len(string)
break
return startPos, endPos
def getH5OutfileName(path):
basename = os.path.basename(path)
h5basename = os.path.splitext(basename)[0] + '.h5'
return os.path.join(OUTDIR,h5basename)
#-------------------------------
# Unit test class definition --
#-------------------------------
class Psana( unittest.TestCase ) :
def setUp(self) :
"""
Method called to prepare the test fixture. This is called immediately
before calling the test method; any exception raised by this method
will be considered an error rather than a test failure.
"""
assert os.path.exists(DATADIR), "Data dir: %s does not exist, cannot run unit tests" % DATADIR
assert os.path.exists(OUTDIR), "Output directory: %s does not exist, can't run unit tests" % OUTDIR
self.cleanUp = True # delete intermediate files if True
self.verbose = False # print psana output, ect
def tearDown(self) :
"""
Method called immediately after the test method has been called and
the result recorded. This is called even if the test method raised
an exception, so the implementation in subclasses may need to be
particularly careful about checking internal state. Any exception raised
by this method will be considered an error rather than a test failure.
This method will only be called if the setUp() succeeds, regardless
of the outcome of the test method.
"""
pass
def runPsanaOnCfg(self,cfgfile=None,cmdLineOptions='', errorCheck=True, linesBefore=10, linesAfter=5):
'''Runs psana, takes cfgfile object as well as cmdLineOptions.
If errorCheck is True it tests that lower case psana output does not include: fatal, error,
segmentation fault, seg falut, traceback
returns the pair of stdout, stderr from the psana run
'''
assert cfgfile is not None or cmdLineOptions is not '', "one of cfgfile or cmdLineOptions must be set"
assert isinstance(cmdLineOptions,str), "extraOpts for psana command line is %r, not a str" % cmdLineOptions
cfgFileStr = ''
if cfgfile is not None:
cfgfile.flush()
cfgFileStr = '-c %s' % cfgfile.name
psana_cmd = "psana %s %s" % (cmdLineOptions,cfgFileStr)
p = sb.Popen(psana_cmd,shell=True,stdout=sb.PIPE, stderr=sb.PIPE)
o,e = p.communicate()
if self.verbose:
print "===== psana cmd ======"
print psana_cmd
if cfgfile is not None:
print "===== psana cfg file ===="
print cfgfile.read()
print "===== psana stdout ===="
print o
print "===== psana stderr ===="
print e
sys.stdout.flush()
if errorCheck:
for output,source in zip([o,e],['stdout','stderr']):
lowerOutput = output.lower()
for errorTerm in ['fatal', 'error', 'segmentation fault', 'seg fault', 'traceback']:
pos = lowerOutput.find(errorTerm)
if pos >= 0:
startPos, endPos = getLinesBeforeAndAfterPos(lowerOutput,pos,linesBefore,linesAfter)
self.assertTrue(False,msg="'%s' found in psana output: ...\n%s\n..." % \
(errorTerm, output[startPos:endPos]))
return o,e
def h5Translate(self, inFile, outFile, cmdLineOptions=''):
cmdLine = cmdLineOptions
cmdLine += " -m Translator.H5Output"
cmdLine += " -o Translator.H5Output.output_file=%s" % outFile
cmdLine += " -o Translator.H5Output.overwrite=True"
cmdLine += " %s" % inFile
self.assertTrue(os.path.exists(inFile), "test data file: %s not found" % inFile)
try:
fin = file(inFile,'r')
except:
self.assertTrue(False,msg="test data exists, but this program cannot read it")
fin.close()
self.assertTrue(os.path.exists(os.path.split(outFile)[0]),msg="output directory does not exist")
try:
fout = file(outFile,'w')
except:
self.assertTrue(False,msg="program cannot write the file: %s check for permission issues" % outFile)
fout.close()
os.unlink(outFile)
self.runPsanaOnCfg(cmdLineOptions=cmdLine)
self.assertTrue(os.path.exists(outFile), msg="Translation did not produce outfile: %s" % outFile)
def test_Index(self):
# test the pickling of EventTime objects
ds = psana.DataSource('dir=/reg/g/psdm/data_test/multifile/test_005_xcstut13:exp=xcstut13:run=999')
savetimes = []
nevent=0
for evt in ds.events():
evtid = evt.get(psana.EventId)
evttime = evtid.idxtime()
if nevent != 1: #only save events 0,2,3
savetimes.append(evttime)
nevent+=1
if nevent>=4:
break
myfile = os.path.join(OUTDIR,'savetimes.pkl')
f=open(myfile,'wb')
pickle.dump(savetimes, f)
f.close()
# check that we get the right events back using the pickled EventTime objects
f=open(myfile,'rb')
times = pickle.load(f)
f.close()
os.remove(myfile)
ds = psana.DataSource('dir=/reg/g/psdm/data_test/multifile/test_005_xcstut13:exp=xcstut13:run=999:idx')
run = ds.runs().next()
expectFid = [5366,11177,14060]
expectSec = [1339858956,1339858972,1339858980]
expectNsec = [671607864,816395836,826443448]
for i in range(len(times)):
id = run.event(times[i]).get(psana.EventId)
self.assertEqual(id.fiducials(), expectFid[i], msg="incorrect fiducials from indexing. found %d, expect %d" % (id.fiducials(), expectFid[i]))
self.assertEqual(id.time()[0], expectSec[i], msg="incorrect seconds from indexing. found %d, expect %d" % (id.time()[0],expectSec[i]))
self.assertEqual(id.time()[1], expectNsec[i], msg="incorrect nanoseconds from indexing. found %d, expect %d" % (id.time()[1],expectNsec[i]))
self.assertEqual(run.nsteps(), 5, msg="incorrect number of calibsteps from indexing. found %d, expect 5" % run.nsteps())
# test that the calibcycle interface can also get a correct event
calibtimes = run.times(2)
self.assertEqual(len(calibtimes), 1, msg="incorrect number of events in calibstep. found %d, expect 1" % len(calibtimes))
id = run.event(calibtimes[0]).get(psana.EventId)
self.assertEqual(id.fiducials(), expectFid[1], msg="incorrect fiducials from calibcycle-indexing. found %d, expect %d" % (id.fiducials(), expectFid[1]))
self.assertEqual(id.time()[0], expectSec[1], msg="incorrect seconds from calibcycle-indexing. found %d, expect %d" % (id.time()[0],expectSec[1]))
self.assertEqual(id.time()[1], expectNsec[1], msg="incorrect nanoseconds from calibcycle-indexing. found %d, expect %d" % (id.time()[1],expectNsec[1]))
def test_MoreRecentEpicsStored(self):
'''When the same epics pv is recorded from several sources, or several times in the same source,
the most recent one should be stored. test_073 is a case where this occurs, and before the code
was changed to add the most recent one, it was the earlier one that was stored.
The earlier one, from pvid 192, has stamp.sec=767233751 stamp.nsec= 40108031
while the later one, from pvid 9 stamp.sec=767233751 stamp.nsec=140115967
'''
TEST_73 = os.path.join(DATADIR,'test_073_cxi_cxid5514_e423-r0049-s00-c00.xtc')
assert os.path.exists(TEST_73), "input file: %s does not exist, can't run test" % TEST_73
psana.setConfigFile('')
ds = psana.DataSource(TEST_73)
epicsStore = ds.env().epicsStore()
ds.events().next() # advance to event 0
pvName = 'CXI:R56:SHV:VHS2:CH1:CurrentMeasure'
pv = epicsStore.getPV(pvName)
self.assertFalse(pv is None, msg="could not get %s from epics store" % pvName)
self.assertEqual(pv.stamp().nsec(), 140115967, msg="pv %s does not have expected nano-seconds" % pvName)
self.assertEqual(pv.stamp().sec(), 767233751, msg="pv %s does not have expected seconds" % pvName)
def test_EpicsIssues1(self):
'''Test a number of issues:
* That a epics pv that is accidentally masked by an alias is accessible.
In the test file, test_010, there is an alias CXI:SC2:MZM:09:ENCPOSITIONGET that masks that pv.
Check that the pv is accessible.
* For hdf5 input, pv's with the same pvid from different sources are read back
properly.
* For hdf5 input, pv's with the same pvid from different sources cannot both have aliases
(this is a current limitation) test that only one alias is available. If this limitiation is
fixed, this test should be changed to test both aliases are available and work properly.
* The aliases available for the xtc input and the hdf5 input will be different. Test for
expected values (aliases in the xtc should be available when psana processes the xtc
except for aliases that have the same name as an existing pv).
In hdf5, additional aliases get removed as per the above test, can't have two aliases with
the same pvId (current limitation, change test if fixed).
'''
TEST_10 = os.path.join(DATADIR,'test_010_cxi_cxia4113_e325-r0002-s00-c00.xtc')
def checkAliases(self, aliases, estore, label):
for source, aliasDict in aliases.iteritems():
for alias, pvnamePvIdPair in aliasDict.iteritems():
pvname,pvId = pvnamePvIdPair
aliasPv = estore.getPV(alias)
pv = estore.getPV(pvname)
if aliasPv is not None:
self.assertFalse(pv is None,
msg="%s: source=%s\n can only get pv: %s through alias: %s" % (label, source, pvname, alias))
aliasStr = epicsPvToStr(aliasPv)
pvStr = epicsPvToStr(pv)
self.assertEqual(aliasStr,pvStr,
msg="%s: source=%s\n alias and pv data disagree for alias=%s pv=%s\n%s\%s" % (label, source, alias, pv, aliasStr, pvStr))
if hasattr(pv, 'pvName'):
self.assertEqual(pvname, pv.pvName(),msg="%s: source=%s\n pvName=%s disagrees with pvName=%s in epics data" % (label, source, pvname,pv.pvName()))
# some aliases in the xtc file for TEST_010
aliases={}
# alias pvName pvId
aliases['EpicsArch.0:NoDevice.1']={'KB1 Horiz Foucssing Mirror Roll and Pitch':('CXI:KB1:MMS:07.RBV',0),
'KB1 Horiz Foucssing Mirror Roll and Pitch-CXI:KB1:MMS:08.RBV':('CXI:KB1:MMS:08.RBV',1),
'KB1 Vert Foucssing Mirror Pitch':('CXI:KB1:MMS:11.RBV',2),
'KB1 Vert Foucssing Mirror Pitch-CXI:SC2:MZM:08:ENCPOSITIONGET':('CXI:SC2:MZM:08:ENCPOSITIONGET',3),
'KB1 Vert Foucssing Mirror Pitch-CXI:SC2:MZM:09:ENCPOSITIONGET':('CXI:SC2:MZM:09:ENCPOSITIONGET',4)
}
aliases['EpicsArch.0:NoDevice.0']={'e-beam duration in fs':('SIOC:SYS0:ML01:AO971', 0),
'x-ray beam duration in fs':('SIOC:SYS0:ML01:AO972', 1),
'x-ray power in GW, where the power is defined as x-ray pulse en':('SIOC:SYS0:ML01:AO973', 2),
'DG3 Spectrometer':('CXI:USR:MMS:20.RBV', 3),
'DG3 Spectrometer-CXI:DG3:PIC:01.RBV':('CXI:DG3:PIC:01.RBV', 4),
'DG3 Spectrometer-CXI:DG3:PIC:02.RBV':('CXI:DG3:PIC:02.RBV', 5),
'DG3 Spectrometer-CXI:DG3:PIC:03.RBV':('CXI:DG3:PIC:03.RBV', 6),
'DG3 Spectrometer-CXI:SC2:PIC:04.RBV':('CXI:SC2:PIC:04.RBV', 7),
'DG3 Spectrometer-CXI:SC2:PIC:05.RBV':('CXI:SC2:PIC:05.RBV', 8),
'DG3 Spectrometer-CXI:SC2:PIC:06.RBV':('CXI:SC2:PIC:06.RBV', 9),
'fine motors + focus':('CXI:USR:MMS:09.RBV', 10),
'fine motors + focus-CXI:USR:MMS:10.RBV':('CXI:USR:MMS:10.RBV', 11),
'fine motors + focus-CXI:USR:MMS:11.RBV':('CXI:USR:MMS:11.RBV', 12),
'fine motors + focus-CXI:USR:MMS:12.RBV':('CXI:USR:MMS:12.RBV', 13),
'LEDs':('CXI:USR:SC2:ANLINOUT:00:DEVICE:NAME', 14),
'CXI:SC2:MZM:07:ENCPOSITIONGET':('CXI:SC2:MZM:07:ENCPOSITIONGET', 44),
'CXI:SC2:MZM:08:ENCPOSITIONGET':('CXI:SC2:MZM:08:ENCPOSITIONGET', 45),
# 'CXI:SC2:MZM:09:ENCPOSITIONGET':('CXI:SC2:MZM:10:ENCPOSITIONGET', 46),
'CXI:SC2:MZM:09:ENCPOSITIONGET-CXI:SC2:MZM:12:ENCPOSITIONGET':('CXI:SC2:MZM:12:ENCPOSITIONGET', 47),
'140k Y Stepper':('CXI:DG2:MMS:17.RBV',225),
'DG4 IPM/PIM':('CXI:DG4:MMS:01.RBV',226),
'DG4 IPM/PIM-CXI:DG4:MMS:02.RBV':('CXI:DG4:MMS:02.RBV',227),
'DG4 IPM/PIM-CXI:DG4:MMS:03.RBV':('CXI:DG4:MMS:03.RBV',228),
'DG4 IPM/PIM-CXI:DG4:MMS:04.RBV':('CXI:DG4:MMS:04.RBV',229),
'DG4 IPM/PIM-CXI:DG4:MMS:05.RBV':('CXI:DG4:MMS:05.RBV',230),
}
h5_outfile = getH5OutfileName(TEST_10)
self.h5Translate(TEST_10, h5_outfile, cmdLineOptions='-n 1')
# this is the pv that is masked by an alias
src1_pvid_4 = {'pvname':'CXI:SC2:MZM:09:ENCPOSITIONGET',
'beginJobValue':0.0,
'event0value':0.0,
'event0stamp':(743137317, 444140000),
'alias':'KB1 Vert Foucssing Mirror Pitch-CXI:SC2:MZM:09:ENCPOSITIONGET'}
# this is a different pv from a different source that has the same pvid
src0_pvid_4 = {'pvname':'CXI:DG3:PIC:01.RBV',
'beginJobValue':1.0005e-02,
'event0value':1.0005e-02,
'event0stamp':(742173122, 724943000),
'alias': 'DG3 Spectrometer-CXI:DG3:PIC:01.RBV'}
# test xtc
psana.setConfigFile('')
dsXtc = psana.DataSource(TEST_10)
estore = dsXtc.env().epicsStore()
# the alias 'CXI:SC2:MZM:09:ENCPOSITIONGET' to the pv 'CXI:SC2:MZM:10:ENCPOSITIONGET'
# masks the original pv, psana should be removing this alias:
self.assertEqual('',estore.alias('CXI:SC2:MZM:10:ENCPOSITIONGET'), msg="xtc: psana is not removing alias 'CXI:SC2:MZM:09:ENCPOSITIONGET' for 'CXI:SC2:MZM:10:ENCPOSITIONGET' that masks pv with alias name")
# are pv's there
pv0 = estore.getPV(src0_pvid_4['pvname'])
pv1 = estore.getPV(src1_pvid_4['pvname'])
self.assertTrue(pv0 is not None, msg="xtc: src0 pvid4 pvname=%s not found during beginJob" % src0_pvid_4['pvname'])
self.assertTrue(pv1 is not None, msg="xtc: src1 pvid4 pvname=%s not found during beginJob" % src1_pvid_4['pvname'])
# right value in beginJob?
self.assertEqual(pv0.value(0), src0_pvid_4['beginJobValue'],msg="xtc: src0 pvid4 pvname=%s beginJob value wrong" % src0_pvid_4['pvname'])
self.assertEqual(pv1.value(0), src1_pvid_4['beginJobValue'],msg="xtc: src1 pvid4 pvname=%s beginJob value wrong" % src1_pvid_4['pvname'])
# are aliases there?
alias0 = estore.getPV(src0_pvid_4['alias'])
alias1 = estore.getPV(src1_pvid_4['alias'])
self.assertTrue(alias0 is not None, msg="xtc: src0 pvid4 alias=%s not found during beginJob" % src0_pvid_4['alias'])
self.assertTrue(alias1 is not None, msg="xtc: src1 pvid4 alias=%s not found during beginJob" % src1_pvid_4['alias'])
# do aliases have right value?
self.assertEqual(alias0.value(0), src0_pvid_4['beginJobValue'],msg="xtc: src0 pvid4 pvname=%s beginJob value wrong from alias=%s" % (src0_pvid_4['pvname'], src0_pvid_4['alias']))
self.assertEqual(alias1.value(0), src1_pvid_4['beginJobValue'],msg="xtc: src1 pvid4 pvname=%s beginJob value wrong from alias=%s" % (src1_pvid_4['pvname'], src1_pvid_4['alias']))
# check expected number of aliases and pvNames, have not verified that they are all correct, 199 and 227
# are what was observed when test was written
self.assertEqual(len(estore.aliases()), 199, msg="xtc: estore does not have expected number of aliases")
self.assertEqual(len(estore.pvNames()), 227, msg="xtc: estore does not have expected number of pvNames")
checkAliases(self, aliases, estore, "xtc configure")
# go to the next event
dsXtc.events().next()
pv0 = estore.getPV(src0_pvid_4['pvname'])
pv1 = estore.getPV(src1_pvid_4['pvname'])
self.assertTrue(pv0 is not None, msg="xtc: src0 pvid4 pvname=%s not found during event 0" % src0_pvid_4['pvname'])
self.assertTrue(pv1 is not None, msg="xtc: src1 pvid4 pvname=%s not found during event 0" % src1_pvid_4['pvname'])
self.assertEqual(pv0.value(0), src0_pvid_4['event0value'],msg="xtc: src0 pvid4 pvname=%s event0 value wrong" % src0_pvid_4['pvname'])
self.assertEqual(pv1.value(0), src1_pvid_4['event0value'],msg="xtc: src1 pvid4 pvname=%s event0 value wrong" % src1_pvid_4['pvname'])
self.assertEqual(pv0.stamp().sec(), src0_pvid_4['event0stamp'][0],msg="xtc: src0 pvid4 pvname=%s event0 stamp sec wrong" % src0_pvid_4['pvname'])
self.assertEqual(pv1.stamp().sec(), src1_pvid_4['event0stamp'][0],msg="xtc: src1 pvid4 pvname=%s event0 stamp sec wrong" % src1_pvid_4['pvname'])
self.assertEqual(pv0.stamp().nsec(), src0_pvid_4['event0stamp'][1],msg="xtc: src0 pvid4 pvname=%s event0 stamp sec wrong" % src0_pvid_4['pvname'])
self.assertEqual(pv1.stamp().nsec(), src1_pvid_4['event0stamp'][1],msg="xtc: src1 pvid4 pvname=%s event0 stamp sec wrong" % src1_pvid_4['pvname'])
alias0 = estore.getPV(src0_pvid_4['alias'])
alias1 = estore.getPV(src1_pvid_4['alias'])
checkAliases(self, aliases, estore, "configure")
self.assertTrue(alias0 is not None, msg="xtc: src0 pvid4 alias=%s not found during event0" % src0_pvid_4['alias'])
self.assertTrue(alias1 is not None, msg="xtc: src1 pvid4 alias=%s not found during event0" % src1_pvid_4['alias'])
self.assertEqual(alias0.value(0), src0_pvid_4['event0value'],msg="xtc: src0 pvid4 pvname=%s beginJob value wrong from alias=%s" % (src0_pvid_4['pvname'], src0_pvid_4['alias']))
self.assertEqual(alias1.value(0), src1_pvid_4['event0value'],msg="xtc: src1 pvid4 pvname=%s beginJob value wrong from alias=%s" % (src1_pvid_4['pvname'], src1_pvid_4['alias']))
del alias1
del alias0
del pv1
del pv0
del estore
del dsXtc
psana.setConfigFile('')
dsH5 = psana.DataSource(h5_outfile)
estore = dsH5.env().epicsStore()
# check expected number of aliases and pvNames, have not verified that they are all correct, 193 and 227
# are what was observed when test was written
self.assertEqual(len(estore.aliases()), 193, msg="h5: estore does not have expected number of aliases")
self.assertEqual(len(estore.pvNames()), 227, msg="h5: estore does not have expected number of pvNames")
checkAliases(self, aliases, estore, "h5 configure")
# are pv's there with right value?
pv0 = estore.getPV(src0_pvid_4['pvname'])
pv1 = estore.getPV(src1_pvid_4['pvname'])
self.assertTrue(pv0 is not None, msg="h5: src0 pvid4 pvname=%s not found during beginJob" % src0_pvid_4['pvname'])
self.assertTrue(pv1 is not None, msg="h5: src1 pvid4 pvname=%s not found during beginJob" % src1_pvid_4['pvname'])
self.assertEqual(pv0.value(0), src0_pvid_4['beginJobValue'],msg="h5: src0 pvid4 pvname=%s beginJob value wrong" % src0_pvid_4['pvname'])
self.assertEqual(pv1.value(0), src1_pvid_4['beginJobValue'],msg="h5: src1 pvid4 pvname=%s beginJob value wrong" % src1_pvid_4['pvname'])
alias0 = estore.getPV(src0_pvid_4['alias'])
alias1 = estore.getPV(src1_pvid_4['alias'])
# these aliases share the same pvid, due to limitation in psana-translate, there should only be one of them
self.assertTrue((alias0 is None) or (alias1 is None), msg="h5: one of the src0 pvid4 and scr1 pvid4 aliases is not none")
self.assertFalse((alias0 is None) and (alias1 is None), msg="h5: both the scr0 pvid4 and src1 pvid4 aliases are none")
# go to the next event
dsH5.events().next()
pv0 = estore.getPV(src0_pvid_4['pvname'])
pv1 = estore.getPV(src1_pvid_4['pvname'])
self.assertTrue(pv0 is not None, msg="src0 pvid4 pvname=%s not found during event 0" % src0_pvid_4['pvname'])
self.assertTrue(pv1 is not None, msg="src1 pvid4 pvname=%s not found during event 0" % src1_pvid_4['pvname'])
self.assertEqual(pv0.value(0), src0_pvid_4['event0value'],msg="src0 pvid4 pvname=%s event0 value wrong" % src0_pvid_4['pvname'])
self.assertEqual(pv1.value(0), src1_pvid_4['event0value'],msg="src1 pvid4 pvname=%s event0 value wrong" % src1_pvid_4['pvname'])
self.assertEqual(pv0.stamp().sec(), src0_pvid_4['event0stamp'][0],msg="src0 pvid4 pvname=%s event0 stamp sec wrong" % src0_pvid_4['pvname'])
self.assertEqual(pv1.stamp().sec(), src1_pvid_4['event0stamp'][0],msg="src1 pvid4 pvname=%s event0 stamp sec wrong" % src1_pvid_4['pvname'])
self.assertEqual(pv0.stamp().nsec(), src0_pvid_4['event0stamp'][1],msg="src0 pvid4 pvname=%s event0 stamp sec wrong" % src0_pvid_4['pvname'])
self.assertEqual(pv1.stamp().nsec(), src1_pvid_4['event0stamp'][1],msg="src1 pvid4 pvname=%s event0 stamp sec wrong" % src1_pvid_4['pvname'])
if self.cleanUp: os.unlink(h5_outfile)
def test_s80merge(self):
'''tests if the s80 stream is merged properly
'''
DAQ_fiducials_with_matching_s80 = [0x0DAE5, # s0
0x0DAFD, # s0
0x0DADF, # s1
0x0DAD9, # s5
0x0DAEB, # s5
0x0DAF7 # s5
]
DAQ_fiducials_with_no_matching_s80 = [0x0D95C, # s0
0x0DAD6, # s0
0x13137, # s1
0x0D956, # s1
0x0DAD3, # s1
0x0DAF4, # s1
0x1312B, # s1
0x0D95F, # s5
0x0DAC1, # s5
0x1313A, # s5
0x0E805, # s5
0x0E80E, # s0
]
s80_fiducials_with_no_DAQ = [56049, 56067, 78243]
def eventHasDaqAndS80Data(evt, opal0, opal1, opal2, orca, xtcav):
if evt.get(psana.Camera.FrameV1, opal0) is None: return False
if evt.get(psana.Camera.FrameV1, opal1) is None: return False
if evt.get(psana.Camera.FrameV1, opal2) is None: return False
if evt.get(psana.Camera.FrameV1, orca) is None: return False
if evt.get(psana.Camera.FrameV1, xtcav) is None: return False
return True
def eventHasOnlyDaq(evt, opal0, opal1, opal2, orca, xtcav):
if evt.get(psana.Camera.FrameV1, opal0) is None: return False
if evt.get(psana.Camera.FrameV1, opal1) is None: return False
if evt.get(psana.Camera.FrameV1, opal2) is None: return False
if evt.get(psana.Camera.FrameV1, orca) is None: return False
if evt.get(psana.Camera.FrameV1, xtcav) is not None: return False
return True
def eventHasOnlyS80(evt, opal0, opal1, opal2, orca, xtcav):
if evt.get(psana.Camera.FrameV1, opal0) is not None: return False
if evt.get(psana.Camera.FrameV1, opal1) is not None: return False
if evt.get(psana.Camera.FrameV1, opal2) is not None: return False
if evt.get(psana.Camera.FrameV1, orca) is not None: return False
if evt.get(psana.Camera.FrameV1, xtcav) is None: return False
return True
psana.setConfigFile('')
dataSourceDir = os.path.join(ptl.getMultiFileDataDir(),'test_004_xppa1714')
ds = psana.DataSource('exp=xppa1714:run=157:dir=%s' % dataSourceDir)
# by using the aliases, we test that psana is processing the alias list from
# both the s80 and the DAQ streams
# opal0 = psana.Source('DetInfo(XppEndstation.0:Opal1000.0)')
# opal1 = psana.Source('DetInfo(XppEndstation.0:Opal1000.1)')
# opal2 = psana.Source('DetInfo(XppEndstation.0:Opal1000.2)')
# orca = psana.Source('DetInfo(XppEndstation.0:OrcaFl40.0)')
# xtcav = psana.Source('DetInfo(XrayTransportDiagnostic.0:Opal1000.0)')
opal0 = psana.Source('opal_0')
opal1 = psana.Source('opal_1')
opal2 = psana.Source('opal_2')
orca = psana.Source('orca')
xtcav = psana.Source('xtcav')
for calibNumber, calibIter in enumerate(ds.steps()):
for eventNumber, evt in enumerate(calibIter.events()):
eventId = evt.get(psana.EventId)
fid = eventId.fiducials()
if fid in DAQ_fiducials_with_matching_s80:
self.assertTrue(eventHasDaqAndS80Data(evt, opal0, opal1, opal2, orca, xtcav),
msg="fid=%s should have both DAQ and s80" % fid)
elif fid in DAQ_fiducials_with_no_matching_s80:
self.assertTrue(eventHasOnlyDaq(evt, opal0, opal1, opal2, orca, xtcav),
"fid=%s should have only DAQ data" % fid)
elif fid in s80_fiducials_with_no_DAQ:
self.assertTrue(eventHasOnlyS80(evt, opal0, opal1, opal2, orca, xtcav),
"fid=%s should have only s80" % fid)
self.assertTrue((calibNumber,eventNumber) in [(0,16), (1,8)],
msg="should be 16 events in calib 0, and 8 in calib 1")
def test_mp(self):
'''parallel child process mode
For testing, I'll just run one process with psana_test.dump to get all the
output in one file. For some reason, I frequently get the error message
Standard exception caught in runApp(): ExceptionErrno: writing to ready pipe failed: Broken pipe [in function next at PSXtcMPInput/src/DgramSourceWorker.cpp:84
when I run psana_test.dump in parallel mode. Something flakely about the piping.
If often seems that I don't get the error message if I run with debug output.
So to test, I'm running with debug output, and saving the dump into a separate
file that I compare.
'''
dataSourceDir = os.path.join(ptl.getMultiFileDataDir(), 'test_004_xppa1714')
# test that mp mode gives us what we saw before on DAQ only streams
dumpOutput = 'unittest_test_mp_mpmode.dump'
cmd = '''psana -c '' -p 1'''
cmd += ' -o psana_test.dump.output_file=%s' % dumpOutput
cmd += (''' -m psana_test.dump exp=xppa1714:run=157:stream=0-20:dir=%s''' % dataSourceDir)
o,e = ptl.cmdTimeOut(cmd,100)
dumpOutput += '.subproc_0'
md5 = ptl.get_md5sum(dumpOutput)
prev_md5 = 'fbd1b3a999adb4cdef882c7aceb356dd'
failMsg = 'prev md5=%s\n' % prev_md5
failMsg += 'curr md5=%s\n' % md5
failMsg += 'are not equal. cmd:\n'
failMsg += cmd
self.assertEqual(prev_md5, md5, msg=failMsg)
os.unlink(dumpOutput)
# test that mp mode is the same as not mp mode (DAQ only streams)
dumpOutput = 'unittest_test_mp_normal.dump'
cmd = '''psana -c '' -o psana_test.dump.output_file=%s''' % dumpOutput
cmd += (''' -m psana_test.dump exp=xppa1714:run=157:stream=0-20:dir=%s''' % dataSourceDir)
o,e = ptl.cmdTimeOut(cmd,100)
md5 = ptl.get_md5sum(dumpOutput)
failMsg = 'prev md5=%s\n' % prev_md5
failMsg += 'curr md5=%s\n' % md5
failMsg += 'are not equal. cmd:\n'
failMsg += cmd
self.assertEqual(prev_md5, md5, msg=failMsg)
os.unlink(dumpOutput)
def test_jmpToThird(self):
'''tests the special options to set offset for the third event
'''
dataSourceDir = os.path.join(ptl.getMultiFileDataDir(), 'test_002_xppd9714')
ds = psana.setConfigFile('')
ccOffsetsOption = '121184 121184 121184 121184 144524 121184'
ccBaseFilenames = ['e428-r0016-s00-c00.xtc',
'e428-r0016-s01-c00.xtc',
'e428-r0016-s02-c00.xtc',
'e428-r0016-s03-c00.xtc',
'e428-r0016-s04-c00.xtc',
'e428-r0016-s05-c00.xtc']
ccFullFilenames = [os.path.join(dataSourceDir, fname) for fname in ccBaseFilenames]
ccFilenamesOption = ' '.join(ccFullFilenames)
class EventTime(object):
def __init__(self,sec,nsec,fid):
self.sec=sec
self.nsec=nsec
self.fid=fid
def __eq__(self, x):
'''only works for x = psana.EventId.
convenience function for tests below.
'''
tm = x.time()
if self.sec != tm[0]: return False
if self.nsec != tm[1]: return False
fid = x.fiducials()
if self.fid != fid: return False
return True
def __repr__(self):
return 'sec=%d nsec=%d fid=%d' % (self.sec, self.nsec, self.fid)
def psanaEventIdToStr(eventId):
return 'sec=%d nsec=%d fid=%d' % (eventId.time()[0], eventId.time()[1], eventId.fiducials())
firstTime = EventTime(sec=1399774510, nsec=475171424, fid=8130)
eventTimes = []
eventTimes.append(EventTime(sec=1399774513, nsec=616326150, fid=9261))
eventTimes.append(EventTime(sec=1399774516, nsec=691053493, fid=10368))
eventTimes.append(EventTime(sec=1399774519, nsec=890170627, fid=11520))
eventTimes.append(EventTime(sec=1399774523, nsec=339827189, fid=12762))
eventTimes.append(EventTime(sec=1399774526, nsec=630896109, fid=13947))
psana.setOption("PSXtcInput.XtcInputModule.third_event_jump_offsets",ccOffsetsOption)
psana.setOption("PSXtcInput.XtcInputModule.third_event_jump_filenames",ccFilenamesOption)
dsString = 'exp=xppd9714:run=16:dir=%s' % dataSourceDir
ds = psana.DataSource(dsString)
events = ds.events()
for evt, evtTestTime in zip(events, eventTimes):
evtTime = evt.get(psana.EventId)
failMsg = "Fail: data=%s\n test=%s\n first=%s" % \
(psanaEventIdToStr(evtTime),evtTestTime, firstTime)
self.assertTrue(evtTestTime == evtTime, msg=failMsg)
# psana remembers options. If another unit test function is run after this one
# the jump parameters will still be in effect. Set them to null.
psana.setOption("PSXtcInput.XtcInputModule.third_event_jump_offsets",'')
psana.setOption("PSXtcInput.XtcInputModule.third_event_jump_filenames",'')
def test_storeGetPut(self):
'''Test configStore and calibStore get/put interfaces
'''
TEST_42 = os.path.join(DATADIR,'test_042_Translator_t1.xtc')
assert os.path.exists(TEST_42), "input file: %s does not exist, can't run test" % TEST_42
psana.setConfigFile('')
ds = psana.DataSource(TEST_42)
cfgStore = ds.env().configStore()
# these are in the test data
# config store get will match any if no source given
self.assertIsNotNone(cfgStore.get(psana.ControlData.ConfigV2))
# these are in there
self.assertIsNotNone(cfgStore.get(psana.ControlData.ConfigV2,psana.Source('ProcInfo()')))
self.assertIsNotNone(cfgStore.get(psana.Ipimb.ConfigV2,psana.Source("BldInfo(XppSb2_Ipm)")))
# mismatches,
self.assertIsNone(cfgStore.get(psana.ControlData.ConfigV2, psana.Source('DetInfo(NoDetector.0:Evr.1)')))
self.assertIsNone(cfgStore.get(psana.ControlData.ConfigV2,"mykey"))
self.assertIsNone(cfgStore.get(psana.Ipimb.ConfigV2,psana.Source('ProcInfo()')))
self.assertRaises(TypeError, cfgStore.get, None)
# put an array in
ar = np.zeros((2,2))
cfgStore.put(ar)
cfgStore.put(ar,psana.Source('ProcInfo()'))
# retrieve it
self.assertIsNotNone(cfgStore.get(psana.ndarray_float64_2))
self.assertIsNotNone(cfgStore.get(psana.ndarray_float64_2,psana.Source('ProcInfo()')))
# test that we can't replace a C++ visible object from python
ar2 = np.zeros((2,2))
self.assertRaises(ValueError,cfgStore.put,ar2)
self.assertRaises(ValueError,cfgStore.put, ar2,psana.Source('ProcInfo()'))
# test for calib store as well
calibStore = ds.env().calibStore()
calibStore.put(ar)
calibStore.put(ar,psana.Source('ProcInfo()'))
self.assertIsNotNone(calibStore.get(psana.ndarray_float64_2))
self.assertIsNotNone(calibStore.get(psana.ndarray_float64_2, psana.Source('ProcInfo()')))
# test that we can't replace a C++ visible object from python
ar2 = np.zeros((2,2))
self.assertRaises(ValueError,calibStore.put,ar2)
self.assertRaises(ValueError,calibStore.put, ar2,psana.Source('ProcInfo()'))
# test key strings
cfgStore.put(ar,"mykey")
cfgStore.put(ar,psana.Source('ProcInfo()'),"mykey")
calibStore.put(ar,"mykey")
calibStore.put(ar,psana.Source('ProcInfo()'),"mykey")
self.assertIsNotNone(cfgStore.get(psana.ndarray_float64_2,"mykey"))
self.assertIsNotNone(cfgStore.get(psana.ndarray_float64_2,psana.Source('ProcInfo()'),"mykey"))
self.assertIsNotNone(calibStore.get(psana.ndarray_float64_2,"mykey"))
self.assertIsNotNone(calibStore.get(psana.ndarray_float64_2,psana.Source('ProcInfo()'),"mykey"))
self.assertRaises(TypeError, cfgStore.get, psana.ndarray_float64_2,"mykey","anotherString")
def test_eventStrGetPut(self):
'''Test that std::string is getting converted to Python str and vice versa
through the event store
'''
TEST_42 = os.path.join(DATADIR,'test_042_Translator_t1.xtc')
assert os.path.exists(TEST_42), "input file: %s does not exist, can't run test" % TEST_42
psana.setConfigFile('')
psana.setOption('modules','Translator.TestModuleNDArrayString psana_test.PsanaModulePutStr psana_test.PsanaModuleGetStr')
ds = psana.DataSource(TEST_42)
evt = ds.events().next()
# if we got this far, then the psana_test.PsanaModuleGetStr C++ module got the
# string from the Python module psana_test.PsanaModulePutStr.
# So Python -> C++ is working.
# Now we test C++ -> Python. The C++ module Translator.TestModuleNDArrayString
# puts a few strings in the event:
str1 = evt.get(str,'my_string1')
self.assertIsNotNone(str1,msg="could not get str with key='my_string1' from event")
self.assertTrue(str1.startswith('This is event number'), msg="retrieved my_string1 but did not start with 'This is event number'")
# now we test that we can put things in from Python and get them back out
evt.put('testing string','testkey')
testStr = evt.get(str,'testkey')
self.assertEqual(testStr,'testing string', msg="testStr does not have expected value")
def testLiveMode(self):
'''Test that live mode works works. In particular, we are concerned about
merging with control streams. Tests:
delay s80, then it catches up with the DAQ streams
delay DAQ streams, then they catch up with s80
This test uses some reduced xtc files produced with the current version of Tomy's small xtc data
tool. This tool will change, and psana will change to understand proxy types, so we may need to
recreate the files and rewrite the test at that point.
Each xtc test file is about 500kb.
'''
expname = 'amob5114'
run = 477
srcDir = os.path.join(ptl.getMultiFileDataDir(), 'test_009_%s' % expname)
assert os.path.exists(srcDir), "srcDir=%s doesn't exist" % srcDir
destDirBase = AppDataPath(os.path.join("psana_test",'liveModeSim')).path()
assert len(destDirBase)>0, "did not find liveModeSim base dir in the psana_test data dir"
# make a random directory for the testing that we will remove when done
destDir = tempfile.mkdtemp(dir=destDirBase)
offlineStdoutFilename = os.path.join(destDir, 'offline_DumpDgram.stdout')
offlineStderrFilename = os.path.join(destDir, 'offline_DumpDgram.stderr')
offlineCmd = 'psana -m psana_examples.DumpDgram '
offlineCmd += 'exp=%s:run=%d:dir=%s' % (expname, run, srcDir)
offlineCmd += ' >%s 2>%s' %(offlineStdoutFilename, offlineStderrFilename)
# get ground truth for what we should see:
os.system(offlineCmd)
# make sure no errors
offlineStderr = file(offlineStderrFilename,'r').read().strip()
self.assertEqual(offlineStderr,'',msg="There were errors in offline cmd=%s\nstderr=\n%s" % (offlineCmd, offlineStderr))
# below is the md5sum of what we expect above
md5sum_offline_mode = '8cc65ac5deeafaeb0316a5e841d9d414'
new_md5sum_offline = ptl.get_md5sum(offlineStdoutFilename)
self.assertEqual(md5sum_offline_mode, new_md5sum_offline,
msg="output of DumpDgram has changed. Check output of cmd=%s, edit test with new md5sum if neccessary" % offlineCmd)
##### - helper function to do live mode comparison - #######
def testLiveModeHelper(self, testlabel, srcDir, destDir, run, md5sum_offline_mode, daqInitialDelay, s80InitialDelay,
mbPerWrites, daqDelayBetweenWrites, s80DelayBetweenWrites, offlineStdoutFile):
# create string arguments for routine
initialDelays = '0-12:%.2f,80:%.2f' % (daqInitialDelay, s80InitialDelay)
mb_per_writes='0-255:%.2f' % mbPerWrites
delays_between_writes='0-12:%.2f,80:%.2f' % (daqDelayBetweenWrites, s80DelayBetweenWrites)
liveModeArgs = ['.inprogress',
run,
srcDir,
destDir,
initialDelays,
mb_per_writes,
'0-255:-1', # read everything
delays_between_writes,
True, # force overwrite
False] # verbose, set this to True when debugging to see how files written
liveModeProcess = multiprocessing.Process(target=liveModeLib.simLiveMode, args=liveModeArgs)
dumpStdOutFile = os.path.join(destDir, "livemode-%s.stdout" % testlabel)
dumpStdErrFile = os.path.join(destDir, "livemode-%s.stderr" % testlabel)
assert not os.path.exists(dumpStdOutFile), "unexpected, dump file exists in new random dir. filename=%s" % dumpStdOutFile
assert not os.path.exists(dumpStdErrFile), "unexpected, dump file exists in new random dir. filename=%s" % dumpStdErrFile
dumpCmd = "psana -m psana_examples.DumpDgram exp=amob5114:run=%d:dir=%s:live" % (run, destDir)
dumpCmd += " > %s 2> %s" % (dumpStdOutFile, dumpStdErrFile)
liveModeProcess.start()
os.system(dumpCmd)
liveModeProcess.join()
# we're done. Check for unexpected error output:
liveDumpStderr = file(dumpStdErrFile,'r').read().strip()
self.assertEqual(liveDumpStderr, '', msg="%s: There were errors in the live mode DumpDgram\ncmd=%s" % (testlabel, dumpCmd))
# The only difference between the DumpDgram output on
# live mode should be mention of the .inprogress files. If we subsititute that
# back, it should look just like the offline output. We want to check that a lot
# of datagrams show up from the inprogress files:
seeInProgressCmd = '''grep '.xtc.inprogress' %s | wc''' % dumpStdOutFile
inProgressStdout, inProgressStderr = ptl.cmdTimeOut(seeInProgressCmd)
numberLines = int(inProgressStdout.split()[0])
expected = 10
self.assertGreaterEqual(numberLines, expected,
msg="expected at least %d lines with .xtc.inprogress in DumpDamage output, but only found %d\ngrep cmd was:\n%s" % (expected, numberLines, seeInProgressCmd))
# replace .xtc.inprogress with .xtc so we can compare output
replacedStdOutFile = os.path.join(destDir, "livemode-%s-sub-inprogress.stdout" % testlabel)
cmd = '''sed 's/\.xtc\.inprogress/\.xtc/' %s >%s''' % (dumpStdOutFile, replacedStdOutFile)
os.system(cmd)
md5replaced = ptl.get_md5sum(replacedStdOutFile)
self.assertEqual(md5replaced, md5sum_offline_mode,
msg="md5 of DumpDgram output with .xtc.inprogress replaced with .xtc is not equal to offline result.\n sed replace cmd:\n%s\n Compare files:\nlive mode=%s\noffline=%s" % \
(cmd, replacedStdOutFile, offlineStdoutFile))
##### -- end helper function -- ###
# a test where we delay s80 and then it catches up
testLiveModeHelper(self,
'delay_s80_then_catchup',
srcDir, destDir, run,
md5sum_offline_mode,
daqInitialDelay = 0.3,
s80InitialDelay = 1.2,
mbPerWrites = .05,
daqDelayBetweenWrites=.1,
s80DelayBetweenWrites=.06,
offlineStdoutFile=offlineStdoutFilename)
os.system('rm %s' % os.path.join(destDir, '*.xtc'))
# a test where we delay the DAQ and then it catches up
testLiveModeHelper(self,
'delay_DAQ_then_catchup',
srcDir, destDir, run,
md5sum_offline_mode,
daqInitialDelay = 1.2,
s80InitialDelay = .3,
mbPerWrites = .05,
daqDelayBetweenWrites=.06,
s80DelayBetweenWrites=.1,
offlineStdoutFile=offlineStdoutFilename)
# now that we've finished and tests have passed, remove the temp directory under data/psana_test/liveModeSim
shutil.rmtree(destDir)
if __name__ == "__main__":
unittest.main(argv=[sys.argv[0], '-v'])
#live mode=/reg/neh/home/davidsch/rel/liveMode/data/psana_test/liveModeSim/tmpq_1lty/livemode-delay_s80_then_catchup-sub-inprogress.stdout
# offline=/reg/neh/home/davidsch/rel/liveMode/data/psana_test/liveModeSim/tmpq_1lty/offline_DumpDgram.stdout
| [
"davidsch@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] | davidsch@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7 |
2c078cc28bf5d3b25dce2dc028c83c1da2334d13 | 7006366dffa1576d54d5b8e619de10d999f9ccd7 | /application.py | 12d8f3fe68f0a4783d6ab98688fc638405061705 | [] | no_license | gaolinjie/webeta | 47e01d98cf20cd892b5005048d9729480e3ead2c | 3038e36abda5118be2b1075ca93f57b79da370b9 | refs/heads/master | 2021-01-10T11:14:34.450441 | 2016-04-09T17:07:04 | 2016-04-09T17:07:04 | 53,676,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,246 | py | #!/usr/bin/env python
# coding=utf-8
#
# Copyright 2016 webeta
# cat /etc/mime.types
# application/octet-stream crx
import sys
reload(sys)
sys.setdefaultencoding("utf8")
import os.path
import re
import memcache
import torndb
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import handler.index
from tornado.options import define, options
from lib.loader import Loader
from lib.session import Session, SessionManager
from jinja2 import Environment, FileSystemLoader
define("port", default = 80, help = "run on the given port", type = int)
define("mysql_host", default = "localhost", help = "community database host")
define("mysql_database", default = "webeta", help = "community database name")
define("mysql_user", default = "webeta", help = "community database user")
define("mysql_password", default = "webeta", help = "community database password")
class Application(tornado.web.Application):
def __init__(self):
settings = dict(
blog_title = u"webeta",
template_path = os.path.join(os.path.dirname(__file__), "templates"),
static_path = os.path.join(os.path.dirname(__file__), "static"),
root_path = os.path.join(os.path.dirname(__file__), "/"),
xsrf_cookies = False,
cookie_secret = "cookie_secret_code",
login_url = "/login",
autoescape = None,
jinja2 = Environment(loader = FileSystemLoader(os.path.join(os.path.dirname(__file__), "templates")), trim_blocks = True),
reserved = ["user", "topic", "home", "setting", "forgot", "login", "logout", "register", "admin"],
debug=True,
)
handlers = [
(r"/(favicon\.ico)", tornado.web.StaticFileHandler, dict(path = settings["static_path"])),
(r"/(sitemap.*$)", tornado.web.StaticFileHandler, dict(path = settings["static_path"])),
(r"/(bdsitemap\.txt)", tornado.web.StaticFileHandler, dict(path = settings["static_path"])),
(r"/(orca\.txt)", tornado.web.StaticFileHandler, dict(path = settings["static_path"])),
(r"/", handler.index.IndexHandler),
(r"/weixin", handler.index.WeixinHandler),
(r"/shareit", handler.index.ShareItHandler),
(r"/t/(.*)", handler.index.TopicHandler),
(r"/addad", handler.index.AddAdHandler),
(r"/myshares", handler.index.MySharesHandler),
(r"/myads", handler.index.MyAdsHandler),
(r"/tb/(.*)", handler.index.TaobaoHandler),
(r"/prompt/(.*)", handler.index.TaobaoPromptHandler),
(r"/addtb", handler.index.AddTbHandler),
(r"/get/shop", handler.index.GetShopUUIDHandler),
(r"/shop/(.*)", handler.index.ShopHandler),
(r"/api/shop/(.*)", handler.index.GetShopItemsHandler),
(r"/mytbs", handler.index.MyTabaosHandler),
(r"/edit/tb/(.*)", handler.index.TaobaoEditHandler),
]
tornado.web.Application.__init__(self, handlers, **settings)
# Have one global connection to the blog DB across all handlers
self.db = torndb.Connection(
host = options.mysql_host, database = options.mysql_database,
user = options.mysql_user, password = options.mysql_password
)
# Have one global loader for loading models and handles
self.loader = Loader(self.db)
# Have one global model for db query
self.user_model = self.loader.use("user.model")
self.topic_model = self.loader.use("topic.model")
self.ad_model = self.loader.use("ad.model")
self.taobao_model = self.loader.use("taobao.model")
self.shop_model = self.loader.use("shop.model")
# Have one global session controller
self.session_manager = SessionManager(settings["cookie_secret"], ["127.0.0.1:11211"], 0)
# Have one global memcache controller
self.mc = memcache.Client(["127.0.0.1:11211"])
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| [
"gaolinjie@gmail.com"
] | gaolinjie@gmail.com |
ce2033c9bbcab7449f71973ced2d77fe349b5e39 | d476c93aa2aecd253508da0cc35071e456199318 | /test_autoarray/plot/wrap/base/test_colorbar.py | 44e53778fc90d48d4852ab5580bf6399337c6ff2 | [
"MIT"
] | permissive | Jammy2211/PyAutoArray | 82916f3f5530f938786f61f870df353b26732e37 | 6639dd86d21ea28e942155753ec556752735b4e4 | refs/heads/main | 2023-08-19T10:51:05.520942 | 2023-08-14T09:12:15 | 2023-08-14T09:12:15 | 210,980,464 | 6 | 5 | MIT | 2023-09-13T14:07:39 | 2019-09-26T02:18:10 | Python | UTF-8 | Python | false | false | 1,601 | py | import autoarray.plot as aplt
import matplotlib.pyplot as plt
import numpy as np
def test__loads_values_from_config_if_not_manually_input():
colorbar = aplt.Colorbar()
assert colorbar.config_dict["fraction"] == 3.0
assert colorbar.manual_tick_values == None
assert colorbar.manual_tick_labels == None
colorbar = aplt.Colorbar(
manual_tick_values=(1.0, 2.0), manual_tick_labels=(3.0, 4.0)
)
assert colorbar.manual_tick_values == (1.0, 2.0)
assert colorbar.manual_tick_labels == (3.0, 4.0)
colorbar = aplt.Colorbar()
colorbar.is_for_subplot = True
assert colorbar.config_dict["fraction"] == 0.1
colorbar = aplt.Colorbar(fraction=6.0)
colorbar.is_for_subplot = True
assert colorbar.config_dict["fraction"] == 6.0
def test__plot__works_for_reasonable_range_of_values():
figure = aplt.Figure()
fig, ax = figure.open()
plt.imshow(np.ones((2, 2)))
cb = aplt.Colorbar(fraction=1.0, pad=2.0)
cb.set(ax=ax, units=None)
figure.close()
fig, ax = figure.open()
plt.imshow(np.ones((2, 2)))
cb = aplt.Colorbar(
fraction=0.1,
pad=0.5,
manual_tick_values=[0.25, 0.5, 0.75],
manual_tick_labels=[1.0, 2.0, 3.0],
)
cb.set(ax=ax, units=aplt.Units())
figure.close()
fig, ax = figure.open()
plt.imshow(np.ones((2, 2)))
cb = aplt.Colorbar(fraction=0.1, pad=0.5)
cb.set_with_color_values(
cmap=aplt.Cmap().cmap, color_values=[1.0, 2.0, 3.0], ax=ax, units=None
)
figure.close()
| [
"james.w.nightingale@durham.ac.uk"
] | james.w.nightingale@durham.ac.uk |
16a13dac0d327baadd156e0e33859f5aa14afdd1 | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/22Jul/down/emb/DoubleMuParked/StoreResults-Run2012D_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0_1374500587/HTT_22Jul_manzoni_Down_Jobs/Job_166/run_cfg.py | 17a95d827e3dc65c8a222b6cce830c36abd1c45f | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69,042 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/22Jul/down/emb/DoubleMuParked/StoreResults-Run2012D_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0_1374500587/HTT_22Jul_manzoni_Down_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
lumisToProcess = cms.untracked.VLuminosityBlockRange( ("190645:10-190645:110", "190646:1-190646:111", "190659:33-190659:167", "190679:1-190679:55", "190688:69-190688:249",
"190702:51-190702:53", "190702:55-190702:122", "190702:124-190702:169", "190703:1-190703:252", "190704:1-190704:3",
"190705:1-190705:5", "190705:7-190705:65", "190705:81-190705:336", "190705:338-190705:350", "190705:353-190705:383",
"190706:1-190706:126", "190707:1-190707:237", "190707:239-190707:257", "190708:1-190708:189", "190733:71-190733:96",
"190733:99-190733:389", "190733:392-190733:460", "190736:1-190736:80", "190736:83-190736:185", "190738:1-190738:130",
"190738:133-190738:226", "190738:229-190738:349", "190782:55-190782:181", "190782:184-190782:233", "190782:236-190782:399",
"190782:401-190782:409", "190895:64-190895:202", "190895:210-190895:302", "190895:305-190895:584", "190895:587-190895:948",
"190906:73-190906:256", "190906:259-190906:354", "190906:356-190906:496", "190945:124-190945:207", "190949:1-190949:81",
"191043:45-191043:46", "191046:1-191046:21", "191046:24-191046:82", "191046:84-191046:88", "191046:92-191046:116",
"191046:119-191046:180", "191046:183", "191046:185-191046:239", "191056:1", "191056:4-191056:9",
"191056:16-191056:17", "191056:19", "191057:1", "191057:4-191057:40", "191062:1",
"191062:3", "191062:5-191062:214", "191062:216-191062:541", "191090:1-191090:55", "191201:38-191201:49",
"191201:52-191201:79", "191202:1-191202:64", "191202:66-191202:68", "191202:87-191202:105", "191202:108-191202:118",
"191226:77-191226:78", "191226:81-191226:831", "191226:833-191226:1454", "191226:1456-191226:1466", "191226:1469-191226:1507",
"191226:1510-191226:1686", "191247:1-191247:153", "191247:156-191247:280", "191247:283-191247:606", "191247:608-191247:620",
"191247:622-191247:818", "191247:821-191247:834", "191247:837-191247:1031", "191247:1034-191247:1046", "191247:1049-191247:1140",
"191247:1143-191247:1187", "191247:1190-191247:1214", "191247:1217-191247:1224", "191248:1-191248:103", "191264:59-191264:79",
"191264:82-191264:152", "191264:155-191264:189", "191271:56-191271:223", "191271:225-191271:363", "191276:1-191276:16",
"191277:1-191277:28", "191277:30-191277:164", "191277:167-191277:253", "191277:255-191277:457", "191277:460-191277:535",
"191277:537-191277:576", "191277:579-191277:775", "191277:778-191277:811", "191277:813-191277:849", "191367:1-191367:2",
"191411:1-191411:23", "191695:1", "191718:43-191718:95", "191718:98-191718:207", "191720:1",
"191720:3-191720:15", "191720:17-191720:181", "191721:1", "191721:3-191721:34", "191721:36-191721:183",
"191721:186-191721:189", "191726:1-191726:13", "191810:15", "191810:22-191810:49", "191810:52-191810:92",
"191830:54-191830:242", "191830:245-191830:301", "191830:304-191830:393", "191833:1", "191833:3-191833:103",
"191834:1-191834:30", "191834:33-191834:74", "191834:77-191834:299", "191834:302-191834:352", "191837:1-191837:44",
"191837:47-191837:53", "191837:56-191837:65", "191856:1-191856:133", "191859:1-191859:28", "191859:31-191859:126",
"193093:1-193093:33", "193123:1-193123:27", "193124:1-193124:52", "193192:58-193192:86", "193193:1-193193:6",
"193193:8", "193193:11-193193:83", "193193:86-193193:120", "193193:122-193193:160", "193193:162-193193:274",
"193193:276-193193:495", "193193:497-193193:506", "193207:54-193207:182", "193334:29-193334:172", "193336:1-193336:264",
"193336:267-193336:492", "193336:495-193336:684", "193336:687-193336:729", "193336:732-193336:951", "193541:77-193541:101",
"193541:103-193541:413", "193541:416-193541:575", "193541:578-193541:619", "193556:41-193556:83", "193557:1-193557:84",
"193575:48-193575:173", "193575:176-193575:349", "193575:351-193575:394", "193575:397-193575:415", "193575:417-193575:658",
"193575:660-193575:752", "193621:60-193621:570", "193621:573-193621:769", "193621:772-193621:976", "193621:979-193621:1053",
"193621:1056-193621:1137", "193621:1139-193621:1193", "193621:1195-193621:1371", "193621:1373-193621:1654", "193834:1-193834:35",
"193835:1-193835:20", "193835:22-193835:26", "193836:1-193836:2", "193998:66-193998:113", "193998:115-193998:278",
"193999:1-193999:45", "194027:57-194027:113", "194050:53-194050:113", "194050:116-194050:273", "194050:275-194050:355",
"194050:357-194050:369", "194050:372-194050:391", "194050:394-194050:490", "194050:492-194050:814", "194050:816-194050:1435",
"194050:1437-194050:1735", "194050:1760-194050:1888", "194051:1-194051:12", "194052:1-194052:99", "194052:102-194052:166",
"194075:48-194075:101", "194075:103", "194075:105-194075:107", "194075:109", "194075:111",
"194076:1-194076:9", "194076:11-194076:55", "194076:58-194076:163", "194076:165-194076:228", "194076:230-194076:264",
"194076:267-194076:507", "194076:509-194076:527", "194076:530-194076:538", "194076:541-194076:562", "194076:565-194076:748",
"194108:81-194108:161", "194108:164-194108:264", "194108:266-194108:373", "194108:376-194108:396", "194108:398-194108:433",
"194108:436-194108:452", "194108:454-194108:577", "194108:579-194108:590", "194108:593-194108:668", "194108:671-194108:872",
"194115:66-194115:184", "194115:186-194115:338", "194115:340-194115:346", "194115:348-194115:493", "194115:496-194115:731",
"194115:819-194115:857", "194117:1-194117:38", "194119:1-194119:229", "194119:232-194119:261", "194120:1-194120:162",
"194120:165-194120:406", "194150:42-194150:127", "194150:129-194150:261", "194150:264-194150:311", "194151:47-194151:72",
"194151:75-194151:191", "194151:193-194151:238", "194151:240-194151:617", "194151:619", "194151:621",
"194151:623", "194153:1-194153:115", "194199:96-194199:227", "194199:229-194199:336", "194199:339-194199:402",
"194210:3-194210:195", "194210:198-194210:217", "194210:220-194210:359", "194210:361-194210:555", "194223:61-194223:112",
"194224:1-194224:126", "194224:129-194224:206", "194224:208-194224:250", "194224:253-194224:309", "194224:312-194224:386",
"194224:389-194224:412", "194225:1-194225:23", "194225:26-194225:47", "194225:49-194225:85", "194225:88-194225:149",
"194270:56-194270:68", "194303:56-194303:66", "194303:69-194303:102", "194304:1-194304:43", "194304:46",
"194305:1-194305:84", "194314:52-194314:130", "194314:133-194314:300", "194315:1-194315:10", "194315:13-194315:314",
"194315:317-194315:428", "194315:431-194315:452", "194315:455-194315:467", "194317:1-194317:20", "194424:63-194424:141",
"194424:144-194424:195", "194424:198-194424:266", "194424:268-194424:421", "194424:424-194424:478", "194424:481-194424:531",
"194424:534-194424:553", "194424:556-194424:706", "194424:708", "194428:1-194428:85", "194428:87-194428:122",
"194428:125-194428:294", "194428:296-194428:465", "194429:1-194429:4", "194429:7-194429:54", "194429:57-194429:147",
"194429:150-194429:411", "194429:413-194429:742", "194429:745-194429:986", "194429:988-194429:1019", "194439:46-194439:77",
"194439:79-194439:106", "194455:45-194455:64", "194455:67-194455:140", "194455:142-194455:255", "194455:293-194455:303",
"194464:1-194464:127", "194464:130-194464:142", "194464:145-194464:210", "194479:1-194479:44", "194479:165-194479:232",
"194479:235-194479:262", "194479:265-194479:374", "194479:377-194479:431", "194479:434-194479:489", "194479:492-194479:529",
"194479:531-194479:566", "194480:1-194480:32", "194480:34-194480:205", "194480:207-194480:375", "194480:377-194480:387",
"194480:389-194480:759", "194480:762-194480:956", "194480:959-194480:1402", "194533:46-194533:379", "194533:382-194533:415",
"194533:417-194533:618", "194533:620-194533:872", "194619:31-194619:110", "194631:1-194631:42", "194631:44-194631:100",
"194631:102-194631:169", "194631:171-194631:222", "194643:1-194643:287", "194644:1-194644:168", "194644:171-194644:181",
"194644:184-194644:185", "194644:187-194644:319", "194644:321-194644:421", "194691:61-194691:104", "194691:107-194691:155",
"194691:158-194691:251", "194691:254-194691:268", "194691:271-194691:272", "194691:275-194691:289", "194691:292-194691:313",
"194699:1-194699:30", "194699:32-194699:52", "194699:55-194699:64", "194699:67-194699:71", "194699:73-194699:154",
"194699:157-194699:215", "194699:218-194699:238", "194699:241-194699:259", "194702:1-194702:138", "194702:141-194702:191",
"194704:1-194704:41", "194704:44-194704:545", "194704:548-194704:592", "194711:1-194711:7", "194711:9-194711:619",
"194712:1-194712:56", "194712:61-194712:418", "194712:420-194712:625", "194712:627-194712:759", "194735:44-194735:71",
"194735:74-194735:101", "194735:104-194735:130", "194778:60-194778:118", "194778:120-194778:219", "194789:1-194789:18",
"194789:21-194789:32", "194789:34-194789:80", "194789:82-194789:166", "194789:168-194789:269", "194789:272-194789:405",
"194789:409-194789:414", "194789:417-194789:427", "194789:430-194789:566", "194790:1-194790:45", "194825:72-194825:117",
"194825:120-194825:221", "194896:34-194896:55", "194896:58-194896:79", "194896:82-194896:103", "194897:1-194897:6",
"194897:8-194897:78", "194897:80-194897:96", "194897:98-194897:102", "194912:53-194912:70", "194912:72-194912:96",
"194912:98-194912:444", "194912:446-194912:450", "194912:453-194912:467", "194912:470-194912:561", "194912:564-194912:660",
"194912:663-194912:813", "194912:815-194912:840", "194912:843-194912:864", "194912:866-194912:1004", "194912:1007-194912:1025",
"194912:1027-194912:1067", "194912:1069-194912:1137", "194912:1140-194912:1166", "194912:1168-194912:1249", "194912:1251-194912:1304",
"194912:1307-194912:1444", "194912:1447-194912:1487", "194912:1489-194912:1503", "194912:1506-194912:1662", "194914:1-194914:38",
"194915:1-194915:74", "195013:94-195013:144", "195013:146-195013:185", "195013:187-195013:206", "195013:208-195013:299",
"195013:302-195013:324", "195013:326-195013:366", "195013:369-195013:447", "195013:450-195013:526", "195013:528-195013:541",
"195014:1-195014:6", "195014:9-195014:119", "195014:121-195014:148", "195015:1-195015:13", "195016:1-195016:21",
"195016:23-195016:55", "195016:58-195016:63", "195016:65-195016:174", "195016:177-195016:184", "195016:186-195016:241",
"195016:243-195016:246", "195016:248-195016:251", "195016:254-195016:367", "195016:370-195016:422", "195016:425-195016:560",
"195016:563-195016:569", "195099:70-195099:144", "195099:147-195099:186", "195099:189-195099:208", "195099:211-195099:224",
"195099:227-195099:248", "195109:98-195109:241", "195112:1-195112:12", "195112:15-195112:26", "195113:1-195113:209",
"195113:212-195113:388", "195113:391-195113:403", "195113:406-195113:419", "195113:422-195113:492", "195113:495-195113:579",
"195114:1-195114:69", "195114:72-195114:103", "195115:1-195115:7", "195115:10-195115:22", "195147:132-195147:282",
"195147:285-195147:294", "195147:297-195147:331", "195147:334-195147:363", "195147:366-195147:442", "195147:445-195147:536",
"195147:539-195147:559", "195163:72-195163:138", "195163:140-195163:224", "195163:227-195163:240", "195163:243",
"195163:246-195163:347", "195164:1-195164:64", "195165:1-195165:4", "195165:7-195165:41", "195165:44-195165:54",
"195165:56-195165:153", "195165:156-195165:260", "195165:263-195165:266", "195251:1-195251:131", "195251:134-195251:137",
"195251:140-195251:152", "195251:154-195251:165", "195251:167-195251:242", "195303:109-195303:191", "195303:194-195303:277",
"195303:280-195303:310", "195303:312-195303:316", "195303:318-195303:409", "195304:1-195304:3", "195304:6-195304:22",
"195304:27-195304:80", "195304:83-195304:100", "195304:103-195304:154", "195304:157-195304:341", "195304:344-195304:588",
"195304:590-195304:727", "195304:729-195304:1003", "195304:1006-195304:1079", "195304:1083-195304:1140", "195304:1143-195304:1229",
"195378:90-195378:117", "195378:120-195378:127", "195378:130-195378:185", "195378:187-195378:204", "195378:206-195378:302",
"195378:305-195378:542", "195378:544-195378:565", "195378:567-195378:645", "195378:647-195378:701", "195378:703-195378:734",
"195378:737-195378:1120", "195378:1122-195378:1133", "195390:1", "195390:4-195390:27", "195390:30-195390:145",
"195390:147-195390:183", "195390:186-195390:187", "195390:190-195390:208", "195390:210-195390:213", "195390:215-195390:400",
"195396:49-195396:55", "195396:58-195396:63", "195396:66-195396:131", "195397:1-195397:10", "195397:12-195397:89",
"195397:92-195397:120", "195397:123-195397:141", "195397:143-195397:251", "195397:253", "195397:256-195397:475",
"195397:478-195397:525", "195397:527-195397:608", "195397:611-195397:776", "195397:779-195397:970", "195397:972-195397:1121",
"195397:1123-195397:1181", "195397:1184-195397:1198", "195397:1200-195397:1209", "195398:3-195398:137", "195398:139-195398:494",
"195398:497-195398:585", "195398:587-195398:817", "195398:820-195398:824", "195398:827-195398:1225", "195398:1228-195398:1307",
"195398:1309-195398:1712", "195398:1721-195398:1736", "195398:1741-195398:1752", "195398:1767-195398:1795", "195399:1-195399:192",
"195399:194-195399:382", "195530:1-195530:80", "195530:82-195530:104", "195530:107-195530:156", "195530:159-195530:300",
"195530:302-195530:405", "195540:68-195540:123", "195540:126-195540:137", "195540:140-195540:283", "195540:286-195540:319",
"195551:91-195551:106", "195552:1-195552:21", "195552:23-195552:27", "195552:30-195552:147", "195552:149-195552:155",
"195552:158-195552:182", "195552:185-195552:287", "195552:290-195552:349", "195552:352-195552:469", "195552:472-195552:815",
"195552:818-195552:823", "195552:825-195552:883", "195552:885-195552:1152", "195552:1154-195552:1300", "195552:1303-195552:1789",
"195633:40-195633:42", "195647:1-195647:41", "195649:1-195649:69", "195649:72-195649:151", "195649:154-195649:181",
"195649:183-195649:247", "195655:1-195655:129", "195655:131-195655:184", "195655:186-195655:260", "195655:263-195655:350",
"195655:353-195655:446", "195655:448-195655:483", "195655:485-195655:498", "195656:1-195656:362", "195658:1-195658:37",
"195658:40-195658:362", "195658:364-195658:382", "195658:384-195658:386", "195749:1-195749:8", "195749:10-195749:33",
"195749:36-195749:131", "195757:1-195757:82", "195757:85-195757:115", "195757:118-195757:161", "195757:163-195757:206",
"195758:1-195758:18", "195774:1-195774:13", "195774:16-195774:137", "195774:139-195774:151", "195774:154-195774:162",
"195774:164-195774:256", "195774:258-195774:276", "195774:279-195774:362", "195774:365-195774:466", "195774:469-195774:618",
"195774:620-195774:649", "195774:651-195774:830", "195775:1-195775:57", "195775:60-195775:100", "195775:103-195775:170",
"195776:1-195776:63", "195776:66-195776:283", "195776:286-195776:337", "195776:340-195776:399", "195776:401-195776:409",
"195776:411-195776:477", "195841:74-195841:85", "195868:1-195868:88", "195868:90-195868:107", "195868:110-195868:205",
"195915:1-195915:109", "195915:111-195915:275", "195915:278-195915:390", "195915:393-195915:417", "195915:419-195915:429",
"195915:432-195915:505", "195915:507-195915:747", "195915:749-195915:785", "195915:787-195915:828", "195915:830-195915:850",
"195916:1-195916:16", "195916:19-195916:68", "195916:71-195916:212", "195917:1-195917:4", "195918:1-195918:44",
"195918:46", "195918:49-195918:64", "195919:1-195919:15", "195923:1-195923:14", "195925:1-195925:12",
"195926:1", "195926:3-195926:19", "195926:21-195926:34", "195929:1-195929:29", "195930:1-195930:77",
"195930:80-195930:176", "195930:179-195930:526", "195930:529-195930:596", "195937:1-195937:28", "195937:31-195937:186",
"195937:188-195937:396", "195947:23-195947:62", "195947:64-195947:88", "195948:51-195948:116", "195948:119-195948:144",
"195948:147", "195948:150-195948:352", "195948:355-195948:369", "195948:372-195948:402", "195948:404-195948:500",
"195948:503-195948:540", "195948:543-195948:565", "195948:567-195948:602", "195948:605-195948:615", "195950:1-195950:71",
"195950:73-195950:138", "195950:141-195950:169", "195950:172-195950:332", "195950:335-195950:350", "195950:353-195950:382",
"195950:385-195950:421", "195950:424-195950:450", "195950:453-195950:483", "195950:485-195950:616", "195950:619-195950:715",
"195950:718-195950:787", "195950:789-195950:800", "195950:803-195950:829", "195950:831", "195950:833-195950:1587",
"195963:54-195963:58", "195970:44-195970:49", "195970:51-195970:85", "196019:54-196019:68", "196027:1-196027:55",
"196027:58-196027:119", "196027:121-196027:155", "196027:158-196027:186", "196046:12-196046:40", "196047:1-196047:64",
"196047:70-196047:75", "196048:1-196048:44", "196048:46-196048:48", "196197:58-196197:122", "196197:125-196197:179",
"196197:181-196197:311", "196197:313-196197:516", "196197:519-196197:562", "196199:1-196199:33", "196199:36-196199:83",
"196199:86-196199:118", "196199:121-196199:147", "196199:150-196199:237", "196199:239-196199:285", "196199:287-196199:534",
"196200:1-196200:68", "196202:3-196202:61", "196202:64-196202:108", "196203:1-196203:102", "196203:107-196203:117",
"196218:55-196218:199", "196218:201-196218:224", "196218:226-196218:393", "196218:396-196218:494", "196218:496-196218:741",
"196218:744-196218:752", "196218:754-196218:757", "196218:759-196218:820", "196239:1-196239:59", "196239:62-196239:154",
"196239:157-196239:272", "196239:274-196239:373", "196239:375-196239:432", "196239:435-196239:465", "196239:468-196239:647",
"196239:650-196239:706", "196239:709-196239:1025", "196249:63-196249:77", "196249:80-196249:99", "196250:1-196250:2",
"196250:5-196250:265", "196250:267-196250:426", "196252:1-196252:35", "196334:59-196334:111", "196334:113-196334:123",
"196334:126-196334:132", "196334:135-196334:167", "196334:170-196334:193", "196334:196-196334:257", "196334:259-196334:267",
"196334:270-196334:289", "196334:292-196334:342", "196349:65-196349:84", "196349:86-196349:154", "196349:157-196349:244",
"196349:246-196349:258", "196357:1-196357:4", "196359:1-196359:2", "196362:1-196362:88", "196363:1-196363:8",
"196363:11-196363:34", "196364:1-196364:93", "196364:96-196364:136", "196364:139-196364:365", "196364:368-196364:380",
"196364:382-196364:601", "196364:603-196364:795", "196364:798-196364:884", "196364:887-196364:1196", "196364:1199-196364:1200",
"196364:1203-196364:1299", "196437:1", "196437:3-196437:74", "196437:77-196437:169", "196438:1-196438:181",
"196438:184-196438:699", "196438:701-196438:1269", "196452:82-196452:112", "196452:114-196452:490", "196452:493-196452:586",
"196452:589-196452:618", "196452:622-196452:668", "196452:671-196452:716", "196452:718-196452:726", "196452:728-196452:956",
"196452:958-196452:1004", "196452:1007-196452:1091", "196453:1-196453:74", "196453:77-196453:145", "196453:147-196453:669",
"196453:673-196453:714", "196453:717-196453:799", "196453:802-196453:988", "196453:991-196453:1178", "196453:1180",
"196453:1182-196453:1248", "196453:1250-196453:1528", "196453:1531-196453:1647", "196495:114-196495:180", "196495:182-196495:272",
"196509:1-196509:68", "196531:62-196531:150", "196531:152-196531:253", "196531:256-196531:285", "196531:288-196531:302",
"196531:305-196531:422", "196531:425-196531:440", "198049:1-198049:11", "198049:14-198049:57", "198050:2-198050:155",
"198063:1-198063:37", "198063:40-198063:72", "198063:74-198063:124", "198063:127-198063:294", "198116:36-198116:52",
"198116:54-198116:55", "198116:58-198116:96", "198116:98-198116:112", "198207:1-198207:97", "198208:1-198208:92",
"198208:94-198208:134", "198208:137-198208:147", "198208:150-198208:209", "198210:1-198210:221", "198212:1-198212:574",
"198213:1-198213:107", "198215:1-198215:12", "198230:1-198230:33", "198230:36-198230:57", "198230:60-198230:235",
"198230:237-198230:324", "198230:326-198230:388", "198230:390-198230:459", "198230:462-198230:625", "198230:627-198230:651",
"198230:653-198230:805", "198230:808-198230:811", "198230:814-198230:948", "198230:950-198230:1090", "198230:1093-198230:1103",
"198230:1106-198230:1332", "198230:1335-198230:1380", "198249:1-198249:7", "198269:3-198269:198", "198271:1-198271:91",
"198271:93-198271:170", "198271:173-198271:299", "198271:301-198271:450", "198271:453-198271:513", "198271:516-198271:616",
"198271:619-198271:628", "198271:631-198271:791", "198271:793-198271:797", "198272:1-198272:185", "198272:188-198272:245",
"198272:248-198272:314", "198272:317-198272:433", "198272:436-198272:444", "198272:454-198272:620", "198346:44-198346:47",
"198372:57-198372:110", "198485:68-198485:109", "198485:112-198485:134", "198485:136-198485:181", "198485:184-198485:239",
"198487:1-198487:145", "198487:147-198487:514", "198487:517-198487:668", "198487:671-198487:733", "198487:736-198487:757",
"198487:760-198487:852", "198487:854-198487:994", "198487:997-198487:1434", "198487:1437-198487:1610", "198522:65-198522:144",
"198522:147-198522:208", "198941:102-198941:189", "198941:191-198941:220", "198941:222-198941:241", "198941:243-198941:249",
"198941:252-198941:284", "198954:108-198954:156", "198954:159-198954:277", "198955:1-198955:45", "198955:47-198955:50",
"198955:53-198955:220", "198955:223-198955:269", "198955:271-198955:284", "198955:286-198955:338", "198955:340-198955:580",
"198955:583-198955:742", "198955:744-198955:910", "198955:913-198955:946", "198955:949-198955:1162", "198955:1165-198955:1169",
"198955:1172-198955:1182", "198955:1185-198955:1188", "198955:1190-198955:1246", "198955:1249-198955:1304", "198955:1306-198955:1467",
"198955:1470-198955:1485", "198955:1487-198955:1552", "198969:58-198969:81", "198969:84-198969:247", "198969:249-198969:323",
"198969:325-198969:365", "198969:367-198969:413", "198969:416-198969:466", "198969:468-198969:643", "198969:646-198969:918",
"198969:920-198969:1011", "198969:1013-198969:1175", "198969:1178-198969:1236", "198969:1239-198969:1253", "199008:75-199008:93",
"199008:95-199008:121", "199008:124-199008:208", "199008:211-199008:331", "199008:333-199008:373", "199008:376-199008:482",
"199008:485-199008:605", "199008:608-199008:644", "199011:1-199011:11", "199011:13-199011:24", "199021:59-199021:88",
"199021:91-199021:128", "199021:130-199021:133", "199021:136-199021:309", "199021:311-199021:333", "199021:335-199021:410",
"199021:414-199021:469", "199021:471-199021:533", "199021:535-199021:563", "199021:565-199021:1223", "199021:1226-199021:1479",
"199021:1481-199021:1494", "199318:65-199318:138", "199319:1-199319:7", "199319:9-199319:223", "199319:226-199319:277",
"199319:280-199319:348", "199319:351-199319:358", "199319:360-199319:422", "199319:424-199319:490", "199319:492-199319:493",
"199319:496-199319:612", "199319:615-199319:642", "199319:645-199319:720", "199319:723-199319:728", "199319:730-199319:731",
"199319:734-199319:741", "199319:744-199319:752", "199319:754-199319:943", "199319:945-199319:997", "199336:1-199336:33",
"199336:36-199336:122", "199336:125-199336:231", "199336:234-199336:614", "199336:617-199336:789", "199336:791-199336:977",
"199356:95-199356:121", "199356:123-199356:168", "199356:171-199356:205", "199356:208-199356:231", "199409:25-199409:54",
"199409:56-199409:89", "199409:91-199409:204", "199409:206-199409:290", "199409:293-199409:583", "199409:586-199409:602",
"199409:604-199409:1014", "199409:1016-199409:1300", "199428:61-199428:197", "199428:200-199428:210", "199428:212-199428:382",
"199428:387-199428:414", "199428:417-199428:436", "199428:439-199428:530", "199428:533-199428:648", "199429:1-199429:28",
"199429:30-199429:36", "199429:39-199429:55", "199429:58-199429:101", "199429:103-199429:148", "199429:151-199429:154",
"199435:63-199435:106", "199435:109-199435:261", "199435:263-199435:579", "199435:582-199435:654", "199435:656-199435:696",
"199435:699-199435:1034", "199435:1037-199435:1144", "199435:1147-199435:1327", "199435:1330-199435:1411", "199435:1414-199435:1431",
"199435:1434-199435:1441", "199435:1444-199435:1487", "199435:1489-199435:1610", "199436:1-199436:113", "199436:116-199436:254",
"199436:257-199436:675", "199436:678-199436:748", "199564:1-199564:3", "199569:1-199569:2", "199569:5-199569:136",
"199569:139-199569:367", "199570:1-199570:17", "199571:1-199571:184", "199571:186-199571:360", "199571:363-199571:561",
"199572:1-199572:317", "199573:1-199573:22", "199574:1-199574:53", "199574:56-199574:153", "199574:156-199574:246",
"199608:60-199608:157", "199608:159-199608:209", "199608:211-199608:341", "199608:344-199608:390", "199608:392-199608:461",
"199608:464-199608:800", "199608:802-199608:1064", "199608:1067-199608:1392", "199608:1395-199608:1630", "199608:1633-199608:1904",
"199608:1907-199608:1962", "199608:1965-199608:2252", "199608:2255-199608:2422", "199698:72-199698:94", "199698:96-199698:127",
"199699:1-199699:154", "199699:157-199699:169", "199699:172-199699:410", "199699:412-199699:756", "199703:1-199703:94",
"199703:97-199703:482", "199703:485-199703:529", "199739:66-199739:133", "199751:103-199751:119", "199751:121-199751:127",
"199752:1-199752:141", "199752:144-199752:180", "199752:182-199752:186", "199752:188-199752:211", "199752:214-199752:322",
"199753:1-199753:59", "199754:1-199754:203", "199754:205-199754:325", "199754:328-199754:457", "199754:459-199754:607",
"199754:610-199754:613", "199754:615-199754:806", "199754:808-199754:998", "199804:78-199804:88", "199804:90-199804:181",
"199804:183-199804:235", "199804:238-199804:278", "199804:281-199804:290", "199804:292-199804:519", "199804:522-199804:575",
"199804:577-199804:628", "199804:631-199804:632", "199812:70-199812:141", "199812:144-199812:163", "199812:182-199812:211",
"199812:214-199812:471", "199812:474-199812:505", "199812:508-199812:557", "199812:560-199812:571", "199812:574-199812:623",
"199812:626-199812:751", "199812:754-199812:796", "199832:58-199832:62", "199832:65-199832:118", "199832:121-199832:139",
"199832:142-199832:286", "199833:1-199833:13", "199833:16-199833:103", "199833:105-199833:250", "199833:253-199833:493",
"199833:496-199833:794", "199833:797-199833:1032", "199833:1034-199833:1185", "199833:1188-199833:1239", "199834:1-199834:9",
"199834:11", "199834:14-199834:18", "199834:21-199834:54", "199834:56-199834:57", "199834:62-199834:65",
"199834:69-199834:284", "199834:286-199834:503", "199834:505-199834:942", "199862:59-199862:141", "199864:1-199864:87",
"199864:89", "199864:92-199864:103", "199864:106-199864:372", "199864:374-199864:385", "199864:388-199864:486",
"199867:1-199867:134", "199867:136-199867:172", "199867:174-199867:218", "199867:221-199867:320", "199868:1-199868:21",
"199875:70-199875:150", "199875:152-199875:334", "199876:1-199876:19", "199876:22-199876:95", "199876:97-199876:249",
"199876:252-199876:272", "199876:274-199876:340", "199876:343-199876:362", "199876:365-199876:376", "199877:1-199877:173",
"199877:175-199877:605", "199877:607-199877:701", "199877:703-199877:871", "199960:72-199960:139", "199960:141-199960:197",
"199960:204-199960:232", "199960:235-199960:363", "199960:365-199960:367", "199960:370-199960:380", "199960:383-199960:459",
"199960:461-199960:466", "199960:469-199960:485", "199961:1-199961:211", "199961:213-199961:287", "199967:60-199967:120",
"199967:122-199967:170", "199967:172-199967:198", "199973:73-199973:89", "200041:62-200041:83", "200041:85-200041:157",
"200041:162-200041:274", "200041:277-200041:318", "200041:321-200041:335", "200041:337-200041:386", "200041:388-200041:389",
"200041:392-200041:400", "200041:402-200041:568", "200041:571-200041:593", "200041:595-200041:646", "200041:649-200041:728",
"200041:731-200041:860", "200041:862-200041:930", "200041:932-200041:1096", "200042:1-200042:110", "200042:112-200042:536",
"200049:1-200049:177", "200075:76-200075:139", "200075:142-200075:232", "200075:256-200075:326", "200075:329-200075:422",
"200075:425-200075:431", "200075:434-200075:500", "200075:502-200075:605", "200091:67", "200091:70-200091:151",
"200091:154-200091:172", "200091:174-200091:187", "200091:190-200091:196", "200091:199-200091:201", "200091:204-200091:425",
"200091:428-200091:535", "200091:537-200091:607", "200091:610-200091:879", "200091:881-200091:943", "200091:946-200091:999",
"200091:1001-200091:1025", "200091:1027-200091:1132", "200091:1135-200091:1339", "200091:1341-200091:1433", "200091:1435-200091:1450",
"200091:1453-200091:1523", "200091:1526-200091:1664", "200091:1667-200091:1680", "200091:1683-200091:1710", "200152:74-200152:116",
"200160:52-200160:68", "200161:1-200161:97", "200161:100-200161:112", "200174:81-200174:84", "200177:1-200177:56",
"200178:1-200178:38", "200180:1-200180:18", "200186:1-200186:3", "200186:6-200186:24", "200188:1-200188:24",
"200188:27-200188:28", "200188:31-200188:76", "200188:79-200188:271", "200188:274-200188:352", "200190:1-200190:4",
"200190:6-200190:76", "200190:79-200190:143", "200190:146-200190:159", "200190:162-200190:256", "200190:258-200190:321",
"200190:324-200190:401", "200190:403-200190:453", "200190:456-200190:457", "200190:460-200190:565", "200190:567-200190:588",
"200190:591", "200190:593-200190:595", "200190:597-200190:646", "200190:649-200190:878", "200229:1-200229:33",
"200229:41-200229:219", "200229:222-200229:244", "200229:247-200229:290", "200229:293-200229:624", "200229:627-200229:629",
"200243:69-200243:103", "200243:106-200243:139", "200244:3-200244:304", "200244:307-200244:442", "200244:445-200244:507",
"200244:510-200244:619", "200245:1-200245:103", "200245:105-200245:128", "200245:131-200245:248", "200245:251-200245:357",
"200368:72-200368:180", "200369:1-200369:5", "200369:8-200369:61", "200369:64-200369:360", "200369:363-200369:439",
"200369:441-200369:578", "200369:580-200369:603", "200369:606-200369:684", "200369:686", "200381:8-200381:15",
"200381:18-200381:36", "200381:38-200381:89", "200381:91-200381:195", "200466:134-200466:274", "200473:96-200473:157",
"200473:159-200473:224", "200473:226-200473:304", "200473:306-200473:469", "200473:472-200473:524", "200473:527-200473:542",
"200473:545-200473:619", "200473:622-200473:688", "200473:691-200473:730", "200473:733-200473:738", "200473:740-200473:1324",
"200491:87-200491:107", "200491:110-200491:149", "200491:152-200491:157", "200491:160-200491:197", "200491:199-200491:237",
"200491:240-200491:270", "200491:273", "200491:276-200491:334", "200491:336-200491:360", "200491:363-200491:419",
"200515:97-200515:183", "200519:1-200519:111", "200519:114-200519:126", "200519:129-200519:136", "200519:138-200519:224",
"200519:227-200519:258", "200519:261-200519:350", "200519:353-200519:611", "200519:613-200519:747", "200525:77-200525:149",
"200525:151-200525:164", "200525:166-200525:190", "200525:193-200525:276", "200525:278-200525:311", "200525:314-200525:464",
"200525:467-200525:488", "200525:491-200525:674", "200525:676-200525:704", "200525:707-200525:755", "200525:757-200525:895",
"200525:898-200525:937", "200525:939-200525:990", "200532:1-200532:37", "200599:75-200599:129", "200599:132-200599:137",
"200600:1-200600:183", "200600:186-200600:299", "200600:302-200600:313", "200600:316-200600:324", "200600:327-200600:334",
"200600:336-200600:397", "200600:399-200600:417", "200600:420-200600:526", "200600:529-200600:591", "200600:594-200600:596",
"200600:598-200600:609", "200600:611-200600:660", "200600:663-200600:823", "200600:826-200600:900", "200600:902-200600:943",
"200600:945-200600:1139", "200961:1-200961:115", "200976:94-200976:164", "200990:75-200990:143", "200991:1-200991:42",
"200991:44", "200991:47-200991:80", "200991:83-200991:175", "200991:178-200991:181", "200991:184-200991:252",
"200991:255-200991:632", "200991:635-200991:916", "200991:918-200991:1017", "200991:1019-200991:1048", "200992:1-200992:405",
"200992:408-200992:434", "200992:436-200992:581", "201062:78-201062:268", "201097:83-201097:136", "201097:138-201097:245",
"201097:248-201097:300", "201097:303-201097:370", "201097:372-201097:429", "201097:432-201097:497", "201114:1-201114:14",
"201115:1-201115:73", "201159:70-201159:211", "201164:1-201164:8", "201164:10-201164:94", "201164:96-201164:125",
"201164:128-201164:178", "201164:180-201164:198", "201164:200-201164:271", "201164:274-201164:416", "201164:418",
"201168:1-201168:37", "201168:39-201168:275", "201168:278-201168:481", "201168:483-201168:558", "201168:560-201168:730",
"201173:1-201173:194", "201173:197-201173:586", "201174:1-201174:214", "201174:216-201174:263", "201174:265-201174:339",
"201174:342-201174:451", "201191:75-201191:98", "201191:100-201191:216", "201191:218-201191:389", "201191:392-201191:492",
"201191:494-201191:506", "201191:509-201191:585", "201191:587-201191:594", "201191:597-201191:607", "201191:609-201191:794",
"201191:796-201191:838", "201191:841-201191:974", "201191:977-201191:1105", "201191:1108-201191:1117", "201191:1120-201191:1382",
"201191:1385-201191:1386", "201193:1-201193:19", "201196:1-201196:238", "201196:241-201196:278", "201196:286-201196:299",
"201196:302-201196:338", "201196:341-201196:515", "201196:518-201196:720", "201196:723-201196:789", "201196:803-201196:841",
"201197:1-201197:23", "201202:1-201202:437", "201229:1-201229:5", "201229:8-201229:26", "201229:29-201229:73",
"201278:62-201278:163", "201278:166-201278:229", "201278:232-201278:256", "201278:259-201278:316", "201278:318-201278:595",
"201278:598-201278:938", "201278:942-201278:974", "201278:976-201278:1160", "201278:1163-201278:1304", "201278:1306-201278:1793",
"201278:1796-201278:1802", "201278:1805-201278:1906", "201278:1909-201278:1929", "201278:1932-201278:2174", "201554:70-201554:86",
"201554:88-201554:114", "201554:116-201554:126", "201602:76-201602:81", "201602:83-201602:194", "201602:196-201602:494",
"201602:496-201602:614", "201602:617-201602:635", "201611:87-201611:145", "201611:149-201611:182", "201611:184-201611:186",
"201613:1-201613:42", "201613:44-201613:49", "201613:53-201613:210", "201613:213-201613:215", "201613:218-201613:225",
"201613:228-201613:646", "201624:83-201624:92", "201624:95-201624:240", "201624:270", "201625:211-201625:312",
"201625:315-201625:348", "201625:351-201625:416", "201625:418-201625:588", "201625:591-201625:671", "201625:673-201625:758",
"201625:760-201625:791", "201625:793-201625:944", "201657:77-201657:93", "201657:95-201657:108", "201657:110-201657:118",
"201658:1-201658:19", "201658:21-201658:118", "201658:121-201658:136", "201658:139-201658:288", "201668:78-201668:157",
"201669:1-201669:9", "201669:12-201669:136", "201669:139-201669:141", "201669:143-201669:165", "201671:1-201671:120",
"201671:122-201671:174", "201671:177-201671:462", "201671:464-201671:482", "201671:485-201671:499", "201671:501-201671:545",
"201671:547-201671:571", "201671:574-201671:614", "201671:617-201671:766", "201671:768-201671:896", "201671:899-201671:911",
"201671:914-201671:1007", "201678:1-201678:120", "201679:1-201679:110", "201679:112-201679:241", "201679:244-201679:298",
"201679:302-201679:321", "201679:324-201679:461", "201679:463-201679:483", "201692:78-201692:81", "201692:83-201692:179",
"201705:65-201705:73", "201705:75-201705:109", "201705:111-201705:187", "201706:1-201706:62", "201707:1-201707:23",
"201707:26-201707:42", "201707:45-201707:115", "201707:118-201707:130", "201707:133-201707:160", "201707:163-201707:276",
"201707:279-201707:471", "201707:473-201707:511", "201707:514-201707:545", "201707:547-201707:570", "201707:572-201707:622",
"201707:625-201707:735", "201707:738-201707:806", "201707:809-201707:876", "201707:879-201707:964", "201708:1-201708:79",
"201718:58-201718:108", "201727:67-201727:185", "201729:6-201729:20", "201729:22-201729:75", "201729:77-201729:126",
"201729:129-201729:154", "201729:156-201729:216", "201729:219-201729:244", "201794:58-201794:94", "201802:68-201802:209",
"201802:211-201802:214", "201802:216-201802:220", "201802:223-201802:288", "201802:290-201802:296", "201816:1-201816:72",
"201816:74-201816:105", "201816:107-201816:157", "201817:1-201817:274", "201818:1", "201819:1-201819:94",
"201819:96-201819:241", "201824:1-201824:139", "201824:141-201824:176", "201824:179-201824:286", "201824:289-201824:492",
"202012:98-202012:121", "202012:126-202012:131", "202013:1-202013:2", "202013:5-202013:35", "202013:38-202013:57",
"202014:1-202014:5", "202014:8-202014:14", "202014:16-202014:18", "202014:20-202014:77", "202014:79-202014:102",
"202014:104-202014:174", "202014:177-202014:190", "202014:192-202014:196", "202016:1-202016:48", "202016:51-202016:134",
"202016:137-202016:177", "202016:179-202016:743", "202016:745-202016:831", "202016:834-202016:890", "202016:893-202016:896",
"202016:898-202016:932", "202016:934-202016:1010", "202044:84-202044:101", "202044:104-202044:266", "202044:268-202044:461",
"202044:463-202044:466", "202045:1-202045:30", "202045:33-202045:72", "202045:75-202045:528", "202045:531-202045:601",
"202045:603-202045:785", "202045:788-202045:809", "202045:822-202045:823", "202054:6-202054:266", "202054:268-202054:489",
"202054:492-202054:605", "202054:608-202054:631", "202060:76-202060:142", "202060:144-202060:154", "202060:156-202060:244",
"202060:246-202060:497", "202060:499-202060:642", "202060:644-202060:682", "202060:684-202060:743", "202060:746-202060:936",
"202074:66-202074:174", "202075:1-202075:18", "202075:21-202075:187", "202075:189-202075:214", "202075:217-202075:247",
"202075:250-202075:342", "202075:345-202075:406", "202075:409-202075:497", "202075:500-202075:537", "202075:539",
"202075:542-202075:560", "202075:562-202075:615", "202075:618-202075:628", "202084:83-202084:156", "202084:159-202084:177",
"202084:179-202084:180", "202084:182-202084:239", "202087:1-202087:25", "202087:28-202087:208", "202087:210-202087:357",
"202087:359-202087:652", "202087:655-202087:853", "202087:856-202087:1093", "202088:1-202088:286", "202093:1-202093:104",
"202093:107-202093:320", "202093:322-202093:360", "202116:59-202116:60", "202178:67-202178:78", "202178:80-202178:88",
"202178:91-202178:177", "202178:180-202178:186", "202178:188-202178:337", "202178:340-202178:377", "202178:379-202178:425",
"202178:428-202178:475", "202178:478-202178:548", "202178:551-202178:717", "202178:720-202178:965", "202178:967-202178:1444",
"202178:1447-202178:1505", "202178:1508-202178:1519", "202178:1522-202178:1555", "202205:94-202205:114", "202209:1-202209:48",
"202209:51-202209:142", "202237:39-202237:128", "202237:131", "202237:134-202237:219", "202237:222-202237:235",
"202237:238-202237:275", "202237:277-202237:289", "202237:291-202237:316", "202237:319-202237:419", "202237:422-202237:538",
"202237:540-202237:936", "202237:939-202237:950", "202237:952-202237:976", "202237:979-202237:1079", "202272:76-202272:112",
"202272:115-202272:141", "202272:144-202272:185", "202272:188-202272:205", "202272:208-202272:305", "202272:307-202272:313",
"202272:315-202272:371", "202272:436-202272:480", "202272:483-202272:555", "202272:558-202272:577", "202272:579-202272:683",
"202272:686-202272:705", "202272:707-202272:740", "202272:742-202272:890", "202272:937-202272:1295", "202272:1299-202272:1481",
"202299:68-202299:84", "202299:87-202299:141", "202299:143-202299:193", "202299:196-202299:358", "202299:361-202299:379",
"202299:382-202299:414", "202299:416-202299:452", "202299:455-202299:555", "202305:1-202305:89", "202305:92-202305:130",
"202305:133-202305:323", "202314:67-202314:104", "202314:107-202314:265", "202314:268-202314:278", "202328:46-202328:89",
"202328:92-202328:156", "202328:158-202328:276", "202328:278-202328:291", "202328:294-202328:434", "202328:437-202328:460",
"202328:463-202328:586", "202328:588-202328:610", "202328:612-202328:614", "202333:1-202333:235", "202389:81-202389:182",
"202389:185-202389:190", "202389:192-202389:199", "202469:87-202469:158", "202469:160-202469:174", "202469:177-202469:352",
"202472:1-202472:96", "202472:99-202472:112", "202477:1-202477:129", "202477:131-202477:150", "202478:1-202478:177",
"202478:180-202478:183", "202478:186-202478:219", "202478:222-202478:360", "202478:362-202478:506", "202478:509-202478:531",
"202478:534-202478:718", "202478:720-202478:927", "202478:929-202478:973", "202478:975-202478:1029", "202478:1031-202478:1186",
"202478:1189-202478:1212", "202478:1215-202478:1248", "202504:77-202504:96", "202504:99-202504:133", "202504:135-202504:182",
"202504:184-202504:211", "202504:213-202504:241", "202504:243-202504:392", "202504:395-202504:527", "202504:529-202504:617",
"202504:620-202504:715", "202504:718-202504:763", "202504:766-202504:1172", "202504:1174-202504:1247", "202504:1250-202504:1471",
"202504:1474-202504:1679", "202504:1682-202504:1704", "202972:1-202972:30", "202972:33-202972:184", "202972:186-202972:290",
"202972:292-202972:295", "202972:298-202972:371", "202972:374-202972:429", "202972:431-202972:544", "202973:1-202973:234",
"202973:237-202973:305", "202973:308-202973:437", "202973:439-202973:530", "202973:532-202973:541", "202973:544-202973:552",
"202973:555-202973:851", "202973:853-202973:1408", "203002:77-203002:128", "203002:130-203002:141", "203002:144-203002:207",
"203002:209-203002:267", "203002:270-203002:360", "203002:362-203002:501", "203002:504-203002:641", "203002:643-203002:669",
"203002:671", "203002:674-203002:717", "203002:720-203002:1034", "203002:1037-203002:1070", "203002:1073-203002:1370",
"203002:1372-203002:1392", "203002:1395-203002:1410", "203002:1413-203002:1596", "203709:1-203709:121", "203742:1-203742:29",
"203777:103-203777:113", "203830:82-203830:182", "203832:1-203832:11", "203833:1-203833:70", "203833:73-203833:128",
"203834:1-203834:40", "203835:1-203835:70", "203835:73-203835:358", "203853:122-203853:222", "203894:82-203894:272",
"203894:275-203894:477", "203894:480-203894:902", "203894:905-203894:1319", "203909:79-203909:113", "203909:116-203909:117",
"203909:120-203909:140", "203909:143-203909:382", "203912:1-203912:306", "203912:308-203912:566", "203912:569-203912:609",
"203912:611-203912:698", "203912:701-203912:820", "203912:823-203912:865", "203912:867-203912:1033", "203912:1035-203912:1321",
"203987:1-203987:9", "203987:12-203987:241", "203987:243-203987:339", "203987:342-203987:781", "203987:784-203987:1014",
"203992:1-203992:15", "203994:1-203994:56", "203994:59-203994:136", "203994:139-203994:304", "203994:306-203994:342",
"203994:344-203994:425", "204100:117-204100:139", "204101:1-204101:74", "204113:82-204113:96", "204113:98-204113:102",
"204113:105-204113:127", "204113:129-204113:191", "204113:194-204113:258", "204113:261-204113:327", "204113:329-204113:388",
"204113:390-204113:400", "204113:402-204113:583", "204113:585-204113:690", "204114:1-204114:358", "204238:23-204238:52",
"204238:55", "204250:92-204250:118", "204250:121-204250:177", "204250:179-204250:285", "204250:287-204250:336",
"204250:339-204250:400", "204250:403-204250:521", "204250:524-204250:543", "204250:546-204250:682", "204250:684-204250:801",
"204511:1-204511:56", "204541:5-204541:39", "204541:42", "204541:44-204541:139", "204541:142-204541:149",
"204541:151-204541:204", "204544:1-204544:11", "204544:13-204544:93", "204544:96-204544:195", "204544:197-204544:224",
"204544:226-204544:334", "204544:337-204544:426", "204552:1-204552:9", "204553:1-204553:51", "204553:53-204553:60",
"204553:63-204553:101", "204554:1-204554:5", "204554:7-204554:221", "204554:224-204554:455", "204554:458-204554:470",
"204554:472-204554:481", "204554:483-204554:514", "204555:1-204555:329", "204555:331-204555:334", "204563:91-204563:99",
"204563:102-204563:178", "204563:180-204563:219", "204563:222-204563:229", "204563:231-204563:364", "204563:366",
"204563:369-204563:470", "204563:473-204563:524", "204563:527-204563:571", "204564:1-204564:84", "204564:87-204564:89",
"204564:92-204564:159", "204564:161-204564:187", "204564:190-204564:191", "204564:193-204564:293", "204564:296-204564:315",
"204564:317-204564:340", "204564:343-204564:427", "204564:429-204564:434", "204564:437-204564:735", "204564:737-204564:855",
"204564:858-204564:1206", "204564:1209-204564:1248", "204564:1251-204564:1284", "204565:1-204565:48", "204566:1-204566:12",
"204567:1-204567:38", "204576:49-204576:192", "204576:195-204576:301", "204577:1-204577:46", "204577:49-204577:64",
"204577:67-204577:105", "204577:107-204577:170", "204577:173-204577:181", "204577:183-204577:193", "204577:196-204577:653",
"204577:656-204577:669", "204577:671-204577:740", "204577:742-204577:913", "204577:915-204577:1057", "204577:1059-204577:1115",
"204577:1117-204577:1282", "204599:73-204599:83", "204599:85-204599:94", "204599:97-204599:121", "204599:124-204599:125",
"204599:128-204599:173", "204599:175-204599:240", "204599:243-204599:245", "204599:248-204599:264", "204599:266-204599:292",
"204599:294-204599:334", "204601:1-204601:25", "204601:28-204601:62", "204601:65-204601:80", "204601:83-204601:89",
"204601:92-204601:290", "204601:292-204601:563", "204601:565-204601:591", "204601:593-204601:652", "204601:655-204601:780",
"204601:783-204601:812", "204601:814-204601:892", "204601:894-204601:984", "204601:986-204601:1003", "204601:1006-204601:1038",
"204601:1040-204601:1088", "204601:1091-204601:1102", "204601:1105-204601:1161", "204601:1164-204601:1250", "205086:95-205086:149",
"205111:88-205111:390", "205111:392-205111:441", "205111:444-205111:446", "205158:81-205158:289", "205158:292-205158:313",
"205158:315-205158:473", "205158:476-205158:591", "205158:594-205158:595", "205158:597-205158:612", "205158:615-205158:663",
"205158:665-205158:667", "205158:672-205158:685", "205158:687-205158:733", "205193:80-205193:109", "205193:111-205193:349",
"205193:352-205193:486", "205193:488-205193:650", "205193:652-205193:712", "205193:714-205193:902", "205217:1-205217:12",
"205217:16-205217:111", "205217:113-205217:171", "205217:174-205217:250", "205217:253-205217:318", "205233:94-205233:153",
"205236:1-205236:190", "205236:193-205236:207", "205236:209-205236:260", "205236:263-205236:331", "205236:334-205236:352",
"205238:1-205238:6", "205238:9-205238:199", "205238:202-205238:254", "205238:256-205238:304", "205238:306-205238:355",
"205238:358-205238:381", "205238:384-205238:596", "205238:598-205238:617", "205303:35-205303:54", "205303:90-205303:132",
"205303:135-205303:144", "205310:76-205310:306", "205310:309-205310:313", "205310:316", "205310:319-205310:321",
"205310:324-205310:457", "205310:460-205310:559", "205311:1-205311:85", "205311:88-205311:92", "205311:95-205311:183",
"205311:186-205311:395", "205311:397-205311:592", "205311:595-205311:910", "205311:913-205311:1260", "205339:71-205339:175",
"205339:178-205339:213", "205339:216-205339:230", "205339:233-205339:262", "205339:265-205339:404", "205344:1-205344:83",
"205344:86-205344:104", "205344:106-205344:359", "205344:362-205344:431", "205344:433-205344:949", "205344:951-205344:967",
"205344:969-205344:1127", "205344:1129-205344:1346", "205344:1348-205344:1586", "205515:82-205515:201", "205515:203-205515:216",
"205519:1-205519:47", "205519:50-205519:172", "205519:175-205519:367", "205519:370-205519:386", "205519:389-205519:472",
"205526:1-205526:269", "205526:272-205526:277", "205526:280-205526:332", "205614:1-205614:4", "205614:7-205614:40",
"205617:1-205617:29", "205617:32-205617:102", "205617:105-205617:123", "205617:125-205617:140", "205617:143-205617:264",
"205617:266-205617:448", "205617:451-205617:532", "205617:534-205617:547", "205618:1-205618:12", "205620:1-205620:175",
"205666:60-205666:119", "205666:122-205666:165", "205666:168-205666:259", "205666:261-205666:322", "205666:325-205666:578",
"205666:580-205666:594", "205666:597-205666:721", "205666:724-205666:739", "205667:1-205667:165", "205667:168-205667:282",
"205667:285-205667:318", "205667:321-205667:412", "205667:415-205667:689", "205667:692-205667:751", "205667:754-205667:774",
"205667:777-205667:1109", "205683:76-205683:82", "205683:85-205683:178", "205683:181-205683:198", "205683:201-205683:305",
"205690:1-205690:40", "205694:1-205694:205", "205694:208-205694:230", "205694:233-205694:347", "205694:350-205694:452",
"205694:455-205694:593", "205694:595-205694:890", "205718:49-205718:75", "205718:78-205718:97", "205718:100-205718:103",
"205718:105-205718:176", "205718:178-205718:338", "205718:341-205718:361", "205718:363-205718:524", "205718:527-205718:531",
"205718:534-205718:589", "205718:591-205718:694", "205774:1-205774:80", "205777:1-205777:8", "205781:1-205781:89",
"205781:91-205781:197", "205781:200-205781:502", "205826:80-205826:232", "205826:235-205826:303", "205826:306-205826:468",
"205833:84-205833:86", "205833:89-205833:121", "205833:123-205833:155", "205833:157-205833:165", "205833:167-205833:173",
"205833:176-205833:219", "205833:221-205833:267", "205833:270-205833:312", "205833:315-205833:346", "205833:350-205833:355",
"205833:360-205833:366", "205834:1-205834:12", "205834:14-205834:195", "205908:68-205908:200", "205908:202-205908:209",
"205921:22-205921:73", "205921:76-205921:268", "205921:271-205921:394", "205921:397-205921:401", "205921:410-205921:428",
"205921:431-205921:498", "205921:500-205921:571", "205921:574-205921:779", "205921:782-205921:853", "206066:89-206066:146",
"206088:86-206088:159", "206088:161-206088:178", "206088:181-206088:199", "206088:202-206088:286", "206102:83-206102:116",
"206102:120-206102:130", "206102:133-206102:208", "206102:211-206102:235", "206102:238-206102:246", "206102:249-206102:278",
"206102:281-206102:349", "206187:107-206187:169", "206187:172-206187:242", "206187:245-206187:288", "206187:290-206187:340",
"206187:343-206187:427", "206187:429-206187:435", "206187:437-206187:486", "206187:489-206187:569", "206187:571-206187:647",
"206187:649-206187:662", "206187:664-206187:708", "206188:1-206188:40", "206188:42-206188:55", "206199:1-206199:75",
"206199:77-206199:82", "206199:85-206199:114", "206207:82-206207:130", "206207:132-206207:176", "206207:179-206207:194",
"206207:196-206207:388", "206207:390-206207:419", "206207:422-206207:447", "206207:450-206207:569", "206207:572-206207:690",
"206208:1-206208:470", "206208:472-206208:518", "206210:11-206210:25", "206210:28-206210:275", "206210:277-206210:298",
"206210:300-206210:383", "206210:386-206210:466", "206243:62-206243:169", "206243:172-206243:196", "206243:199-206243:354",
"206243:357-206243:433", "206243:435-206243:448", "206243:451-206243:533", "206243:536-206243:554", "206243:557-206243:723",
"206243:726-206243:905", "206245:1-206245:62", "206246:1-206246:14", "206246:16-206246:237", "206246:240-206246:285",
"206246:288-206246:407", "206246:412-206246:676", "206246:678-206246:704", "206246:706-206246:785", "206246:787-206246:962",
"206246:965-206246:997", "206246:1000-206246:1198", "206246:1201-206246:1290", "206257:1-206257:29", "206258:1-206258:36",
"206258:39-206258:223", "206258:226-206258:249", "206302:1-206302:8", "206302:11-206302:33", "206302:36-206302:44",
"206302:47-206302:82", "206302:84-206302:108", "206302:110-206302:149", "206302:151-206302:186", "206302:189-206302:229",
"206302:231-206302:232", "206302:234-206302:241", "206302:243-206302:276", "206303:1-206303:19", "206303:23-206303:286",
"206304:1-206304:4", "206304:6-206304:62", "206331:91-206331:222", "206331:225-206331:312", "206389:88-206389:185",
"206389:187-206389:249", "206389:252-206389:272", "206389:275-206389:392", "206391:1-206391:55", "206391:57-206391:91",
"206401:69-206401:90", "206401:92-206401:194", "206401:197-206401:210", "206401:212-206401:249", "206401:251-206401:265",
"206401:267-206401:409", "206446:92-206446:141", "206446:143-206446:159", "206446:162-206446:205", "206446:208-206446:301",
"206446:304-206446:442", "206446:445", "206446:448-206446:474", "206446:476-206446:616", "206446:619-206446:872",
"206446:874-206446:910", "206446:912-206446:948", "206446:950-206446:989", "206446:992-206446:1030", "206446:1033-206446:1075",
"206446:1109-206446:1149", "206448:1-206448:143", "206448:145-206448:559", "206448:561-206448:1170", "206448:1173-206448:1231",
"206448:1235-206448:1237", "206466:24-206466:137", "206466:140-206466:277", "206466:280-206466:296", "206466:299-206466:303",
"206466:306-206466:405", "206466:407-206466:419", "206466:422-206466:477", "206466:480-206466:511", "206466:514-206466:676",
"206476:73-206476:129", "206476:133-206476:137", "206476:140-206476:141", "206476:143-206476:219", "206477:1-206477:14",
"206477:16-206477:31", "206477:33-206477:41", "206477:44-206477:51", "206477:53-206477:70", "206477:73-206477:75",
"206477:77-206477:89", "206477:91-206477:94", "206477:97-206477:115", "206477:118-206477:184", "206478:1-206478:27",
"206478:29-206478:136", "206478:139-206478:144", "206484:73-206484:95", "206484:98-206484:133", "206484:136-206484:163",
"206484:166-206484:186", "206484:189-206484:384", "206484:387-206484:463", "206484:465-206484:551", "206484:554",
"206484:556-206484:669", "206512:91-206512:123", "206512:125-206512:133", "206512:136-206512:161", "206512:163-206512:190",
"206512:193-206512:201", "206512:203-206512:212", "206512:214-206512:332", "206512:334-206512:584", "206512:587-206512:604",
"206512:607-206512:1005", "206512:1008-206512:1123", "206512:1126-206512:1163", "206512:1165-206512:1211", "206513:3-206513:39",
"206513:42-206513:188", "206513:191-206513:234", "206513:237-206513:238", "206513:241-206513:323", "206542:1-206542:115",
"206542:117-206542:165", "206542:168-206542:511", "206542:514-206542:547", "206542:550-206542:603", "206542:606-206542:668",
"206542:671-206542:727", "206542:730-206542:739", "206542:741-206542:833", "206550:77-206550:132", "206550:135-206550:144",
"206572:37-206572:47", "206573:2-206573:14", "206574:1-206574:87", "206575:1-206575:7", "206575:10",
"206575:12-206575:69", "206594:72-206594:107", "206594:110-206594:246", "206594:249-206594:281", "206595:1-206595:34",
"206595:37-206595:42", "206595:45-206595:193", "206596:1-206596:13", "206596:15-206596:220", "206596:222-206596:228",
"206596:231-206596:236", "206596:239-206596:292", "206596:295-206596:695", "206596:697-206596:728", "206596:730-206596:810",
"206598:1-206598:81", "206598:83-206598:103", "206598:105-206598:588", "206598:591-206598:657", "206598:659-206598:719",
"206605:1-206605:36", "206605:39-206605:78", "206744:49-206744:157", "206744:160-206744:192", "206744:195-206744:395",
"206744:398-206744:452", "206745:1-206745:81", "206745:84-206745:199", "206745:202-206745:224", "206745:227-206745:237",
"206745:240-206745:304", "206745:306-206745:318", "206745:321-206745:720", "206745:723-206745:796", "206745:799-206745:894",
"206745:897-206745:944", "206745:946-206745:1106", "206745:1108-206745:1524", "206745:1527-206745:1862", "206745:1988-206745:1996",
"206859:79-206859:210", "206859:212-206859:258", "206859:260-206859:323", "206859:325-206859:356", "206859:359-206859:609",
"206859:612-206859:681", "206859:684-206859:732", "206859:734-206859:768", "206859:771-206859:808", "206859:811-206859:827",
"206859:830-206859:848", "206866:1-206866:30", "206866:33-206866:113", "206866:115-206866:274", "206868:1-206868:3",
"206868:10-206868:16", "206869:1-206869:251", "206869:253-206869:271", "206869:274-206869:502", "206869:507-206869:520",
"206869:522-206869:566", "206869:568-206869:752", "206897:1-206897:34", "206897:38-206897:61", "206897:63-206897:102",
"206897:109", "206897:111-206897:112", "206897:114-206897:131", "206897:133-206897:137", "206901:1-206901:98",
"206906:1-206906:31", "206906:38-206906:94", "206906:96-206906:136", "206906:138-206906:139", "206906:142-206906:149",
"206906:151-206906:175", "206906:177-206906:206", "206940:1-206940:151", "206940:153", "206940:155-206940:298",
"206940:301-206940:382", "206940:384-206940:712", "206940:715-206940:803", "206940:805-206940:960", "206940:963-206940:1027",
"207099:83-207099:134", "207099:137-207099:172", "207099:175-207099:213", "207099:216-207099:314", "207099:316-207099:320",
"207099:323-207099:330", "207099:333-207099:367", "207099:370-207099:481", "207099:484-207099:602", "207099:605-207099:755",
"207099:757-207099:1046", "207099:1048-207099:1171", "207100:1-207100:91", "207100:94", "207214:57-207214:112",
"207214:114-207214:177", "207214:179-207214:181", "207214:184-207214:196", "207214:199-207214:220", "207214:223-207214:262",
"207214:265-207214:405", "207214:408-207214:482", "207214:485-207214:640", "207214:643-207214:708", "207214:718-207214:757",
"207214:759-207214:808", "207214:811-207214:829", "207217:1-207217:32", "207219:1-207219:112", "207220:1-207220:160",
"207221:1-207221:102", "207222:1-207222:17", "207222:20-207222:289", "207231:70-207231:84", "207231:86-207231:121",
"207231:123-207231:184", "207231:187-207231:189", "207231:192-207231:303", "207231:306-207231:354", "207231:357-207231:481",
"207231:484-207231:504", "207231:508-207231:549", "207231:552-207231:626", "207231:628-207231:690", "207231:693-207231:875",
"207231:878-207231:1000", "207231:1003-207231:1170", "207231:1173-207231:1187", "207231:1189-207231:1227", "207231:1229-207231:1415",
"207231:1418-207231:1445", "207231:1447-207231:1505", "207233:1-207233:119", "207233:121-207233:148", "207269:80-207269:394",
"207269:397-207269:436", "207269:439-207269:463", "207269:466-207269:551", "207269:568-207269:577", "207273:3-207273:877",
"207279:68-207279:138", "207279:141-207279:149", "207279:151-207279:237", "207279:240-207279:266", "207279:269-207279:307",
"207279:309-207279:416", "207279:498-207279:551", "207279:554-207279:640", "207279:643-207279:961", "207279:963-207279:1095",
"207279:1098-207279:1160", "207320:1-207320:110", "207320:112-207320:350", "207371:72-207371:117", "207371:120-207371:124",
"207372:1-207372:27", "207372:30-207372:113", "207372:116-207372:154", "207372:156-207372:174", "207372:176-207372:478",
"207372:480-207372:496", "207397:32-207397:77", "207397:80-207397:140", "207397:143-207397:179", "207398:1-207398:14",
"207398:16-207398:33", "207454:79-207454:95", "207454:98-207454:123", "207454:126-207454:259", "207454:261-207454:363",
"207454:365-207454:458", "207454:461-207454:498", "207454:501-207454:609", "207454:612-207454:632", "207454:635-207454:781",
"207454:784-207454:866", "207454:869-207454:974", "207454:977-207454:1064", "207454:1067-207454:1079", "207454:1081-207454:1321",
"207454:1323-207454:1464", "207454:1467-207454:1569", "207454:1571-207454:1604", "207454:1607-207454:1712", "207454:1714-207454:1988",
"207469:1-207469:31", "207469:34-207469:45", "207477:76-207477:104", "207477:107-207477:111", "207477:114-207477:147",
"207477:150-207477:295", "207477:298-207477:483", "207477:486-207477:494", "207477:497-207477:527", "207477:530-207477:563",
"207477:565-207477:570", "207487:50-207487:98", "207487:101-207487:311", "207487:313-207487:359", "207487:363-207487:468",
"207487:471-207487:472", "207488:1-207488:63", "207488:66-207488:92", "207488:95-207488:113", "207488:116-207488:198",
"207488:200-207488:250", "207488:252-207488:288", "207488:291-207488:365", "207488:368-207488:377", "207488:379-207488:440",
"207490:1-207490:48", "207490:51-207490:111", "207491:1-207491:176", "207491:179-207491:458", "207492:1-207492:20",
"207492:23-207492:298", "207515:79-207515:109", "207515:112-207515:132", "207515:134-207515:208", "207515:211-207515:225",
"207515:228-207515:320", "207515:322-207515:381", "207515:383-207515:498", "207515:500-207515:730", "207515:733-207515:849",
"207515:851-207515:954", "207515:957-207515:994", "207515:997-207515:1052", "207515:1055-207515:1143", "207515:1145-207515:1211",
"207517:1-207517:12", "207517:15-207517:57", "207518:1-207518:59", "207518:61-207518:83", "207882:22-207882:45",
"207883:1", "207883:3-207883:4", "207883:7-207883:75", "207884:1-207884:106", "207884:108-207884:183",
"207885:1-207885:90", "207886:1-207886:30", "207886:32-207886:90", "207886:92-207886:156", "207886:158-207886:166",
"207886:168-207886:171", "207889:1-207889:43", "207889:47-207889:57", "207889:60-207889:303", "207889:306-207889:442",
"207889:445", "207889:447-207889:551", "207889:553-207889:731", "207889:733-207889:907", "207889:910-207889:945",
"207898:1-207898:33", "207898:36-207898:57", "207898:60-207898:235", "207898:239-207898:257", "207898:260-207898:277",
"207905:75-207905:196", "207905:198-207905:281", "207905:284-207905:329", "207905:331-207905:402", "207905:404-207905:565",
"207905:568-207905:672", "207905:675-207905:805", "207905:807-207905:850", "207905:852-207905:861", "207905:864-207905:884",
"207905:886-207905:1180", "207905:1183-207905:1283", "207905:1285-207905:1331", "207905:1333-207905:1515", "207905:1518-207905:1734",
"207905:1737-207905:1796", "207920:84-207920:146", "207920:149-207920:241", "207920:243-207920:261", "207920:264-207920:291",
"207920:294-207920:486", "207920:489-207920:518", "207920:520-207920:598", "207920:600-207920:708", "207920:710-207920:826",
"207921:1-207921:37", "207921:40-207921:58", "207922:1-207922:69", "207922:71-207922:100", "207922:103-207922:126",
"207922:129-207922:242", "207922:274-207922:291", "207924:1-207924:52", "207924:54-207924:171", "207924:173-207924:178",
"207924:181-207924:339", "208307:2-208307:42", "208307:45", "208307:47-208307:70", "208307:72-208307:147",
"208307:150-208307:252", "208307:256-208307:259", "208307:262-208307:275", "208307:278-208307:342", "208307:345-208307:450",
"208307:453-208307:527", "208307:530-208307:583", "208307:586-208307:605", "208307:608-208307:616", "208307:618-208307:667",
"208307:670-208307:761", "208307:763-208307:798", "208307:800-208307:889", "208307:891-208307:893", "208307:896-208307:1055",
"208307:1057-208307:1205", "208307:1208-208307:1294", "208307:1297-208307:1328", "208339:77-208339:89", "208339:91-208339:122",
"208339:125-208339:208", "208339:211-208339:346", "208339:349-208339:363", "208341:1-208341:84", "208341:87-208341:117",
"208341:120-208341:513", "208341:515-208341:685", "208341:688-208341:693", "208341:695-208341:775", "208341:777-208341:824",
"208351:83-208351:97", "208351:100-208351:356", "208351:359-208351:367", "208351:369", "208352:1-208352:15",
"208352:17", "208352:19", "208353:1-208353:76", "208353:78-208353:269", "208353:271-208353:348",
"208357:1-208357:70", "208357:73-208357:507", "208390:72-208390:128", "208390:130-208390:169", "208391:52-208391:82",
"208391:84-208391:162", "208391:164-208391:216", "208391:219-208391:493", "208391:495-208391:498", "208391:500-208391:523",
"208391:526-208391:533", "208391:535-208391:588", "208391:591-208391:660", "208391:663-208391:869", "208427:49-208427:89",
"208427:92-208427:161", "208427:164", "208427:166-208427:173", "208427:175-208427:268", "208427:271-208427:312",
"208427:315", "208427:317-208427:335", "208427:337-208427:361", "208427:364-208427:402", "208427:404-208427:422",
"208427:425-208427:577", "208427:580-208427:647", "208428:1-208428:58", "208428:61-208428:68", "208428:70-208428:156",
"208428:159-208428:227", "208429:1-208429:56", "208429:59-208429:139", "208429:141-208429:159", "208429:162-208429:237",
"208429:240-208429:440", "208429:442-208429:452", "208429:455-208429:589", "208429:592-208429:712", "208429:715-208429:922",
"208487:2-208487:26", "208487:29-208487:159", "208487:161-208487:307", "208487:309-208487:459", "208487:462-208487:476",
"208487:479-208487:621", "208509:71-208509:232", "208538:2-208538:43", "208540:1-208540:26", "208540:29-208540:98",
"208541:1-208541:57", "208541:59-208541:173", "208541:175-208541:376", "208541:378-208541:413", "208551:119-208551:193",
"208551:195-208551:212", "208551:215-208551:300", "208551:303-208551:354", "208551:356-208551:554", "208551:557-208551:580",
"208686:73-208686:79", "208686:82-208686:181", "208686:183-208686:224", "208686:227-208686:243", "208686:246-208686:311",
"208686:313-208686:459" ) ),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012D_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_546.root',
'/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012D_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_547.root',
'/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012D_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_548.root')
)
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
f76e79a9d93f4eb209b8e63f9e1991e9feaf8695 | 05536893d069dd87256ba74ecdee06bdf481d44b | /args_api/SSEXP/SSEXP-DBTES.py | 0628cdd66ce0ae757dda369ebcb5cd9ce329ab92 | [] | no_license | TheRockStarDBA/DataBuddy | b2d889e11745d0afe1b39a11aab5945e2bd08cf7 | 38fa7adfdd228e2b2e4b4408393505163c5702e8 | refs/heads/master | 2020-12-25T23:27:02.363977 | 2015-05-28T19:14:12 | 2015-05-28T19:14:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,175 | py | #do not change
aa={'SSEXP_QueryDir.DBTES_Table': [{'field_term': ('-t', '--field_term', '"|"', 'Field terminator.'), 'num_of_shards': ('-r', '--num_of_shards', 1, 'Number of shards.'), 'pool_size': ('-o', '--pool_size', 1, 'Pool size.'), 'copy_vector': ('-w', '--copy_vector', 'ssexp-dbtes', 'Data copy direction.'), 'keep_data_file': ('-K', '--keep_data_file', 1, 'Keep data dump.'), 'default_spool_dir': ('-F', '--default_spool_dir', 'C:\\tmp\\TEST_default_spool', 'Default data dump dir (default_spool_dir/job_name/timestamp).'), 'time_stamp': ('-Y', '--time_stamp', '20150515_220001_163000', 'Timestamp (log_dir/job_name/timestamp).'), 'host_map': ('-5', '--host_map', '".\\config\\host_map_v2.py"', 'Host-to-shard map.'), 'job_name': ('-B', '--job_name', 'qc_job', 'Job name (log_dir/job_name).'), 'log_dir': ('-M', '--log_dir', 'C:\\Temp\\qc_log', 'Log destination.')}, {'from_db_name': ('-b', '--from_db_name', 'master', 'SQL Server Express source database.'), 'source_client_home': ('-z', '--source_client_home', '"C:\\Program Files\\Microsoft SQL Server\\Client SDK\\ODBC\\110\\Tools\\Binn"', 'Path to SQL Server Express client home.'), 'from_user': ('-j', '--from_user', 'sa', 'SQL Server Express source user.'), 'from_passwd': ('-x', '--from_passwd', '198Morgan', 'SQL Server Express source user password.'), 'from_db_server': ('-n', '--from_db_server', 'ALEX_BUZ-PC\\SQLEXPRESS', 'SQL Server Express source instance name.'), 'query_sql_dir': ('-Q', '--query_sql_dir', 'C:\\Python27\\data_migrator_1239_12c\\test\\v101\\query\\query_dir_ss', 'Input dir with SQL Server Express query sqls.')}, {'to_db_name': ('-d', '--to_db_name', '"SAMPLE"', 'Target DB2 Enterprise Server database.'), 'target_client_home': ('-Z', '--target_client_home', '"C:\\Program Files (x86)\\IBM\\SQLLIB_01\\BIN"', 'Path to DB2 Enterprise Server client home bin dir.'), 'to_user': ('-u', '--to_user', '"ALEX_BUZ"', 'Target DB2 Enterprise Server db user.'), 'to_passwd': ('-p', '--to_passwd', '"198Morgan"', 'Target DB2 Enterprise Server db user password.'), 'to_db_server': ('-s', '--to_db_server', '"DB2"', 'Target DB2 Enterprise Server db instance name.'), 'to_table': ('-a', '--to_table', 'ALEX_BUZ.Timestamp_test_to', 'Target DB2 Enterprise Server table.')}], 'SSEXP_QueryFile.DBTES_Table': [{'field_term': ('-t', '--field_term', '"|"', 'Field terminator.'), 'num_of_shards': ('-r', '--num_of_shards', 1, 'Number of shards.'), 'pool_size': ('-o', '--pool_size', 1, 'Pool size.'), 'copy_vector': ('-w', '--copy_vector', 'ssexp-dbtes', 'Data copy direction.'), 'keep_data_file': ('-K', '--keep_data_file', 1, 'Keep data dump.'), 'default_spool_dir': ('-F', '--default_spool_dir', 'C:\\tmp\\TEST_default_spool', 'Default data dump dir (default_spool_dir/job_name/timestamp).'), 'time_stamp': ('-Y', '--time_stamp', '20150515_220001_184000', 'Timestamp (log_dir/job_name/timestamp).'), 'host_map': ('-5', '--host_map', '".\\config\\host_map_v2.py"', 'Host-to-shard map.'), 'job_name': ('-B', '--job_name', 'qc_job', 'Job name (log_dir/job_name).'), 'log_dir': ('-M', '--log_dir', 'C:\\Temp\\qc_log', 'Log destination.')}, {'query_sql_file': ('-q', '--query_sql_file', 'C:\\Python27\\data_migrator_1239_12c\\test\\v101\\query\\ss_query.sql', 'Input file with SQL Server Express query sql.'), 'from_db_name': ('-b', '--from_db_name', 'master', 'SQL Server Express source database.'), 'source_client_home': ('-z', '--source_client_home', '"C:\\Program Files\\Microsoft SQL Server\\Client SDK\\ODBC\\110\\Tools\\Binn"', 'Path to SQL Server Express client home.'), 'from_user': ('-j', '--from_user', 'sa', 'SQL Server Express source user.'), 'from_passwd': ('-x', '--from_passwd', '198Morgan', 'SQL Server Express source user password.'), 'from_db_server': ('-n', '--from_db_server', 'ALEX_BUZ-PC\\SQLEXPRESS', 'SQL Server Express source instance name.')}, {'to_db_name': ('-d', '--to_db_name', '"SAMPLE"', 'Target DB2 Enterprise Server database.'), 'target_client_home': ('-Z', '--target_client_home', '"C:\\Program Files (x86)\\IBM\\SQLLIB_01\\BIN"', 'Path to DB2 Enterprise Server client home bin dir.'), 'to_user': ('-u', '--to_user', '"ALEX_BUZ"', 'Target DB2 Enterprise Server db user.'), 'to_passwd': ('-p', '--to_passwd', '"198Morgan"', 'Target DB2 Enterprise Server db user password.'), 'to_db_server': ('-s', '--to_db_server', '"DB2"', 'Target DB2 Enterprise Server db instance name.'), 'to_table': ('-a', '--to_table', 'ALEX_BUZ.Timestamp_test_to', 'Target DB2 Enterprise Server table.')}], 'SSEXP_ShardedTable_Limit50.DBTES_Table': [{'lame_duck': ('-l', '--lame_duck', 50, 'Limit rows (lame duck run).'), 'field_term': ('-t', '--field_term', '"|"', 'Field terminator.'), 'num_of_shards': ('-r', '--num_of_shards', 3, 'Number of shards.'), 'pool_size': ('-o', '--pool_size', 3, 'Pool size.'), 'copy_vector': ('-w', '--copy_vector', 'ssexp-dbtes', 'Data copy direction.'), 'keep_data_file': ('-K', '--keep_data_file', 1, 'Keep data dump.'), 'default_spool_dir': ('-F', '--default_spool_dir', 'C:\\tmp\\TEST_default_spool', 'Default data dump dir (default_spool_dir/job_name/timestamp).'), 'time_stamp': ('-Y', '--time_stamp', '20150515_220001_347000', 'Timestamp (log_dir/job_name/timestamp).'), 'host_map': ('-5', '--host_map', '".\\config\\host_map_v2.py"', 'Host-to-shard map.'), 'job_name': ('-B', '--job_name', 'qc_job', 'Job name (log_dir/job_name).'), 'log_dir': ('-M', '--log_dir', 'C:\\Temp\\qc_log', 'Log destination.')}, {'from_db_name': ('-b', '--from_db_name', 'master', 'SQL Server Express source database.'), 'from_table': ('-c', '--from_table', 'Timestamp_test_from', 'From table.'), 'source_client_home': ('-z', '--source_client_home', '"C:\\Program Files\\Microsoft SQL Server\\Client SDK\\ODBC\\110\\Tools\\Binn"', 'Path to SQL Server Express client home.'), 'from_user': ('-j', '--from_user', 'sa', 'SQL Server Express source user.'), 'from_passwd': ('-x', '--from_passwd', '198Morgan', 'SQL Server Express source user password.'), 'from_db_server': ('-n', '--from_db_server', 'ALEX_BUZ-PC\\SQLEXPRESS', 'SQL Server Express source instance name.')}, {'to_db_name': ('-d', '--to_db_name', '"SAMPLE"', 'Target DB2 Enterprise Server database.'), 'target_client_home': ('-Z', '--target_client_home', '"C:\\Program Files (x86)\\IBM\\SQLLIB_01\\BIN"', 'Path to DB2 Enterprise Server client home bin dir.'), 'to_user': ('-u', '--to_user', '"ALEX_BUZ"', 'Target DB2 Enterprise Server db user.'), 'to_passwd': ('-p', '--to_passwd', '"198Morgan"', 'Target DB2 Enterprise Server db user password.'), 'to_db_server': ('-s', '--to_db_server', '"DB2"', 'Target DB2 Enterprise Server db instance name.'), 'to_table': ('-a', '--to_table', 'ALEX_BUZ.Timestamp_test_to', 'Target DB2 Enterprise Server table.')}], 'default': [{'ask_to_truncate': ['-E', '--ask_to_truncate', '', 'Ask to truncate.'], 'shard_pre_etl': ['-2', '--shard_pre_etl', '', 'Path to shard level pre-ETL Python script.'], 'keep_data_file': ['-K', '--keep_data_file', '', 'Keep data dump.'], 'default_spool_dir': ['-F', '--default_spool_dir', '', 'Default data dump dir (default_spool_dir/job_name/timestamp).'], 'arg_6': ['-6', '--arg_6', '', 'Generic string argument 1.'], 'lame_duck': ['-l', '--lame_duck', '', 'Limit rows (lame duck run).'], 'copy_vector': ['-w', '--copy_vector', '', 'Data copy direction.'], 'log_dir': ['-M', '--log_dir', '', 'Log destination.'], 'time_stamp': ['-Y', '--time_stamp', '', 'Timestamp (log_dir/job_name/timestamp).'], 'job_name': ['-B', '--job_name', '', 'Job name (log_dir/job_name).'], 'job_pre_etl': ['-1', '--job_pre_etl', '', 'Path to job level pre-ETL Python script.'], 'num_of_shards': ['-r', '--num_of_shards', '', 'Number of shards.'], 'loader_profile': ['-C', '--loader_profile', '', 'SQL*Loader profile (user defined).'], 'email_to': ['-L', '--email_to', '', 'Email job status.'], 'host_map': ['-5', '--host_map', '', 'Host-to-shard map.'], 'arg_8': ['-8', '--arg_8', '', 'Generic string argument 3.'], 'validate': ['-V', '--validate', '', 'Check if source and target objects exist.'], 'arg_7': ['-7', '--arg_7', '', 'Generic string argument 2.'], 'field_term': ['-t', '--field_term', '', 'Field terminator.'], 'pool_size': ['-o', '--pool_size', '', 'Pool size.'], 'column_buckets': ['-0', '--column_buckets', '', 'Wide row support.'], 'job_post_etl': ['-3', '--job_post_etl', '', 'Jobs post-etl script.'], 'truncate_target': ['-U', '--truncate_target', '', 'Truncate target table/partition/subpartition.'], 'shard_post_etl': ['-4', '--shard_post_etl', '', 'Shards post-etl script.'], 'key_on_exit': ['-X', '--key_on_exit', '', 'Ask for an "Enter" key upon exit.']}, {'query_sql_file': ['-q', '--query_sql_file', '', 'Input file with SQL Server Express query sql.'], 'from_db_name': ['-b', '--from_db_name', '', 'SQL Server Express source database.'], 'from_table': ['-c', '--from_table', '', 'From table.'], 'source_client_home': ['-z', '--source_client_home', '', 'Path to SQL Server Express client home.'], 'from_user': ['-j', '--from_user', '', 'SQL Server Express source user.'], 'from_passwd': ['-x', '--from_passwd', '', 'SQL Server Express source user password.'], 'from_db_server': ['-n', '--from_db_server', '', 'SQL Server Express source instance name.'], 'query_sql_dir': ['-Q', '--query_sql_dir', '', 'Input dir with SQL Server Express query sqls.']}, {'to_db_name': ['-d', '--to_db_name', '', 'Target DB2 Enterprise Server database.'], 'target_client_home': ['-Z', '--target_client_home', '', 'Path to DB2 Enterprise Server client home bin dir.'], 'skip_rows': ['-k', '--skip_rows', '', 'Header size. Number of rows to skip in input file.'], 'to_user': ['-u', '--to_user', '', 'Target DB2 Enterprise Server db user.'], 'to_passwd': ['-p', '--to_passwd', '', 'Target DB2 Enterprise Server db user password.'], 'to_db_server': ['-s', '--to_db_server', '', 'Target DB2 Enterprise Server db instance name.'], 'to_table': ['-a', '--to_table', '', 'Target DB2 Enterprise Server table.']}], 'SSEXP_QueryFile_Limit15.DBTES_Table': [{'lame_duck': ('-l', '--lame_duck', 15, 'Limit rows (lame duck run).'), 'field_term': ('-t', '--field_term', '"|"', 'Field terminator.'), 'num_of_shards': ('-r', '--num_of_shards', 1, 'Number of shards.'), 'pool_size': ('-o', '--pool_size', 1, 'Pool size.'), 'copy_vector': ('-w', '--copy_vector', 'ssexp-dbtes', 'Data copy direction.'), 'keep_data_file': ('-K', '--keep_data_file', 1, 'Keep data dump.'), 'default_spool_dir': ('-F', '--default_spool_dir', 'C:\\tmp\\TEST_default_spool', 'Default data dump dir (default_spool_dir/job_name/timestamp).'), 'time_stamp': ('-Y', '--time_stamp', '20150515_220001_374000', 'Timestamp (log_dir/job_name/timestamp).'), 'host_map': ('-5', '--host_map', '".\\config\\host_map_v2.py"', 'Host-to-shard map.'), 'job_name': ('-B', '--job_name', 'qc_job', 'Job name (log_dir/job_name).'), 'log_dir': ('-M', '--log_dir', 'C:\\Temp\\qc_log', 'Log destination.')}, {'query_sql_file': ('-q', '--query_sql_file', 'C:\\Python27\\data_migrator_1239_12c\\test\\v101\\query\\ss_query.sql', 'Input file with SQL Server Express query sql.'), 'from_db_name': ('-b', '--from_db_name', 'master', 'SQL Server Express source database.'), 'source_client_home': ('-z', '--source_client_home', '"C:\\Program Files\\Microsoft SQL Server\\Client SDK\\ODBC\\110\\Tools\\Binn"', 'Path to SQL Server Express client home.'), 'from_user': ('-j', '--from_user', 'sa', 'SQL Server Express source user.'), 'from_passwd': ('-x', '--from_passwd', '198Morgan', 'SQL Server Express source user password.'), 'from_db_server': ('-n', '--from_db_server', 'ALEX_BUZ-PC\\SQLEXPRESS', 'SQL Server Express source instance name.')}, {'to_db_name': ('-d', '--to_db_name', '"SAMPLE"', 'Target DB2 Enterprise Server database.'), 'target_client_home': ('-Z', '--target_client_home', '"C:\\Program Files (x86)\\IBM\\SQLLIB_01\\BIN"', 'Path to DB2 Enterprise Server client home bin dir.'), 'to_user': ('-u', '--to_user', '"ALEX_BUZ"', 'Target DB2 Enterprise Server db user.'), 'to_passwd': ('-p', '--to_passwd', '"198Morgan"', 'Target DB2 Enterprise Server db user password.'), 'to_db_server': ('-s', '--to_db_server', '"DB2"', 'Target DB2 Enterprise Server db instance name.'), 'to_table': ('-a', '--to_table', 'ALEX_BUZ.Timestamp_test_to', 'Target DB2 Enterprise Server table.')}], 'SSEXP_ShardedTable.DBTES_Table': [{'field_term': ('-t', '--field_term', '"|"', 'Field terminator.'), 'num_of_shards': ('-r', '--num_of_shards', 3, 'Number of shards.'), 'pool_size': ('-o', '--pool_size', 3, 'Pool size.'), 'copy_vector': ('-w', '--copy_vector', 'ssexp-dbtes', 'Data copy direction.'), 'keep_data_file': ('-K', '--keep_data_file', 1, 'Keep data dump.'), 'default_spool_dir': ('-F', '--default_spool_dir', 'C:\\tmp\\TEST_default_spool', 'Default data dump dir (default_spool_dir/job_name/timestamp).'), 'time_stamp': ('-Y', '--time_stamp', '20150515_220001_250000', 'Timestamp (log_dir/job_name/timestamp).'), 'host_map': ('-5', '--host_map', '".\\config\\host_map_v2.py"', 'Host-to-shard map.'), 'job_name': ('-B', '--job_name', 'qc_job', 'Job name (log_dir/job_name).'), 'log_dir': ('-M', '--log_dir', 'C:\\Temp\\qc_log', 'Log destination.')}, {'from_db_name': ('-b', '--from_db_name', 'master', 'SQL Server Express source database.'), 'from_table': ('-c', '--from_table', 'Timestamp_test_from', 'From table.'), 'source_client_home': ('-z', '--source_client_home', '"C:\\Program Files\\Microsoft SQL Server\\Client SDK\\ODBC\\110\\Tools\\Binn"', 'Path to SQL Server Express client home.'), 'from_user': ('-j', '--from_user', 'sa', 'SQL Server Express source user.'), 'from_passwd': ('-x', '--from_passwd', '198Morgan', 'SQL Server Express source user password.'), 'from_db_server': ('-n', '--from_db_server', 'ALEX_BUZ-PC\\SQLEXPRESS', 'SQL Server Express source instance name.')}, {'to_db_name': ('-d', '--to_db_name', '"SAMPLE"', 'Target DB2 Enterprise Server database.'), 'target_client_home': ('-Z', '--target_client_home', '"C:\\Program Files (x86)\\IBM\\SQLLIB_01\\BIN"', 'Path to DB2 Enterprise Server client home bin dir.'), 'to_user': ('-u', '--to_user', '"ALEX_BUZ"', 'Target DB2 Enterprise Server db user.'), 'to_passwd': ('-p', '--to_passwd', '"198Morgan"', 'Target DB2 Enterprise Server db user password.'), 'to_db_server': ('-s', '--to_db_server', '"DB2"', 'Target DB2 Enterprise Server db instance name.'), 'to_table': ('-a', '--to_table', 'ALEX_BUZ.Timestamp_test_to', 'Target DB2 Enterprise Server table.')}], 'SSEXP_Table_KeepSpoolFile.DBTES_Table': [{'field_term': ('-t', '--field_term', '"|"', 'Field terminator.'), 'num_of_shards': ('-r', '--num_of_shards', 1, 'Number of shards.'), 'pool_size': ('-o', '--pool_size', 1, 'Pool size.'), 'copy_vector': ('-w', '--copy_vector', 'ssexp-dbtes', 'Data copy direction.'), 'keep_data_file': ('-K', '--keep_data_file', 1, 'Keep data dump.'), 'default_spool_dir': ('-F', '--default_spool_dir', 'C:\\tmp\\TEST_default_spool', 'Default data dump dir (default_spool_dir/job_name/timestamp).'), 'time_stamp': ('-Y', '--time_stamp', '20150515_220001_286000', 'Timestamp (log_dir/job_name/timestamp).'), 'host_map': ('-5', '--host_map', '".\\config\\host_map_v2.py"', 'Host-to-shard map.'), 'job_name': ('-B', '--job_name', 'qc_job', 'Job name (log_dir/job_name).'), 'log_dir': ('-M', '--log_dir', 'C:\\Temp\\qc_log', 'Log destination.')}, {'from_db_name': ('-b', '--from_db_name', 'master', 'SQL Server Express source database.'), 'from_table': ('-c', '--from_table', 'Timestamp_test_from', 'From table.'), 'source_client_home': ('-z', '--source_client_home', '"C:\\Program Files\\Microsoft SQL Server\\Client SDK\\ODBC\\110\\Tools\\Binn"', 'Path to SQL Server Express client home.'), 'from_user': ('-j', '--from_user', 'sa', 'SQL Server Express source user.'), 'from_passwd': ('-x', '--from_passwd', '198Morgan', 'SQL Server Express source user password.'), 'from_db_server': ('-n', '--from_db_server', 'ALEX_BUZ-PC\\SQLEXPRESS', 'SQL Server Express source instance name.')}, {'to_db_name': ('-d', '--to_db_name', '"SAMPLE"', 'Target DB2 Enterprise Server database.'), 'target_client_home': ('-Z', '--target_client_home', '"C:\\Program Files (x86)\\IBM\\SQLLIB_01\\BIN"', 'Path to DB2 Enterprise Server client home bin dir.'), 'to_user': ('-u', '--to_user', '"ALEX_BUZ"', 'Target DB2 Enterprise Server db user.'), 'to_passwd': ('-p', '--to_passwd', '"198Morgan"', 'Target DB2 Enterprise Server db user password.'), 'to_db_server': ('-s', '--to_db_server', '"DB2"', 'Target DB2 Enterprise Server db instance name.'), 'to_table': ('-a', '--to_table', 'ALEX_BUZ.Timestamp_test_to', 'Target DB2 Enterprise Server table.')}], 'SSEXP_Table_Limit10.DBTES_Table': [{'lame_duck': ('-l', '--lame_duck', 10, 'Limit rows (lame duck run).'), 'field_term': ('-t', '--field_term', '"|"', 'Field terminator.'), 'num_of_shards': ('-r', '--num_of_shards', 1, 'Number of shards.'), 'pool_size': ('-o', '--pool_size', 1, 'Pool size.'), 'copy_vector': ('-w', '--copy_vector', 'ssexp-dbtes', 'Data copy direction.'), 'keep_data_file': ('-K', '--keep_data_file', 1, 'Keep data dump.'), 'default_spool_dir': ('-F', '--default_spool_dir', 'C:\\tmp\\TEST_default_spool', 'Default data dump dir (default_spool_dir/job_name/timestamp).'), 'time_stamp': ('-Y', '--time_stamp', '20150515_220001_316000', 'Timestamp (log_dir/job_name/timestamp).'), 'host_map': ('-5', '--host_map', '".\\config\\host_map_v2.py"', 'Host-to-shard map.'), 'job_name': ('-B', '--job_name', 'qc_job', 'Job name (log_dir/job_name).'), 'log_dir': ('-M', '--log_dir', 'C:\\Temp\\qc_log', 'Log destination.')}, {'from_db_name': ('-b', '--from_db_name', 'master', 'SQL Server Express source database.'), 'from_table': ('-c', '--from_table', 'Timestamp_test_from', 'From table.'), 'source_client_home': ('-z', '--source_client_home', '"C:\\Program Files\\Microsoft SQL Server\\Client SDK\\ODBC\\110\\Tools\\Binn"', 'Path to SQL Server Express client home.'), 'from_user': ('-j', '--from_user', 'sa', 'SQL Server Express source user.'), 'from_passwd': ('-x', '--from_passwd', '198Morgan', 'SQL Server Express source user password.'), 'from_db_server': ('-n', '--from_db_server', 'ALEX_BUZ-PC\\SQLEXPRESS', 'SQL Server Express source instance name.')}, {'to_db_name': ('-d', '--to_db_name', '"SAMPLE"', 'Target DB2 Enterprise Server database.'), 'target_client_home': ('-Z', '--target_client_home', '"C:\\Program Files (x86)\\IBM\\SQLLIB_01\\BIN"', 'Path to DB2 Enterprise Server client home bin dir.'), 'to_user': ('-u', '--to_user', '"ALEX_BUZ"', 'Target DB2 Enterprise Server db user.'), 'to_passwd': ('-p', '--to_passwd', '"198Morgan"', 'Target DB2 Enterprise Server db user password.'), 'to_db_server': ('-s', '--to_db_server', '"DB2"', 'Target DB2 Enterprise Server db instance name.'), 'to_table': ('-a', '--to_table', 'ALEX_BUZ.Timestamp_test_to', 'Target DB2 Enterprise Server table.')}], 'SSEXP_Table.DBTES_Table': [{'field_term': ('-t', '--field_term', '"|"', 'Field terminator.'), 'num_of_shards': ('-r', '--num_of_shards', 1, 'Number of shards.'), 'pool_size': ('-o', '--pool_size', 1, 'Pool size.'), 'copy_vector': ('-w', '--copy_vector', 'ssexp-dbtes', 'Data copy direction.'), 'keep_data_file': ('-K', '--keep_data_file', 1, 'Keep data dump.'), 'default_spool_dir': ('-F', '--default_spool_dir', 'C:\\tmp\\TEST_default_spool', 'Default data dump dir (default_spool_dir/job_name/timestamp).'), 'time_stamp': ('-Y', '--time_stamp', '20150515_220001_225000', 'Timestamp (log_dir/job_name/timestamp).'), 'host_map': ('-5', '--host_map', '".\\config\\host_map_v2.py"', 'Host-to-shard map.'), 'job_name': ('-B', '--job_name', 'qc_job', 'Job name (log_dir/job_name).'), 'log_dir': ('-M', '--log_dir', 'C:\\Temp\\qc_log', 'Log destination.')}, {'from_db_name': ('-b', '--from_db_name', 'master', 'SQL Server Express source database.'), 'from_table': ('-c', '--from_table', 'Timestamp_test_from', 'From table.'), 'source_client_home': ('-z', '--source_client_home', '"C:\\Program Files\\Microsoft SQL Server\\Client SDK\\ODBC\\110\\Tools\\Binn"', 'Path to SQL Server Express client home.'), 'from_user': ('-j', '--from_user', 'sa', 'SQL Server Express source user.'), 'from_passwd': ('-x', '--from_passwd', '198Morgan', 'SQL Server Express source user password.'), 'from_db_server': ('-n', '--from_db_server', 'ALEX_BUZ-PC\\SQLEXPRESS', 'SQL Server Express source instance name.')}, {'to_db_name': ('-d', '--to_db_name', '"SAMPLE"', 'Target DB2 Enterprise Server database.'), 'target_client_home': ('-Z', '--target_client_home', '"C:\\Program Files (x86)\\IBM\\SQLLIB_01\\BIN"', 'Path to DB2 Enterprise Server client home bin dir.'), 'to_user': ('-u', '--to_user', '"ALEX_BUZ"', 'Target DB2 Enterprise Server db user.'), 'to_passwd': ('-p', '--to_passwd', '"198Morgan"', 'Target DB2 Enterprise Server db user password.'), 'to_db_server': ('-s', '--to_db_server', '"DB2"', 'Target DB2 Enterprise Server db instance name.'), 'to_table': ('-a', '--to_table', 'ALEX_BUZ.Timestamp_test_to', 'Target DB2 Enterprise Server table.')}], 'SSEXP_QueryDir_Limit25.DBTES_Table': [{'lame_duck': ('-l', '--lame_duck', 25, 'Limit rows (lame duck run).'), 'field_term': ('-t', '--field_term', '"|"', 'Field terminator.'), 'num_of_shards': ('-r', '--num_of_shards', 1, 'Number of shards.'), 'pool_size': ('-o', '--pool_size', 1, 'Pool size.'), 'copy_vector': ('-w', '--copy_vector', 'ssexp-dbtes', 'Data copy direction.'), 'keep_data_file': ('-K', '--keep_data_file', 1, 'Keep data dump.'), 'default_spool_dir': ('-F', '--default_spool_dir', 'C:\\tmp\\TEST_default_spool', 'Default data dump dir (default_spool_dir/job_name/timestamp).'), 'time_stamp': ('-Y', '--time_stamp', '20150515_220001_130000', 'Timestamp (log_dir/job_name/timestamp).'), 'host_map': ('-5', '--host_map', '".\\config\\host_map_v2.py"', 'Host-to-shard map.'), 'job_name': ('-B', '--job_name', 'qc_job', 'Job name (log_dir/job_name).'), 'log_dir': ('-M', '--log_dir', 'C:\\Temp\\qc_log', 'Log destination.')}, {'from_db_name': ('-b', '--from_db_name', 'master', 'SQL Server Express source database.'), 'source_client_home': ('-z', '--source_client_home', '"C:\\Program Files\\Microsoft SQL Server\\Client SDK\\ODBC\\110\\Tools\\Binn"', 'Path to SQL Server Express client home.'), 'from_user': ('-j', '--from_user', 'sa', 'SQL Server Express source user.'), 'from_passwd': ('-x', '--from_passwd', '198Morgan', 'SQL Server Express source user password.'), 'from_db_server': ('-n', '--from_db_server', 'ALEX_BUZ-PC\\SQLEXPRESS', 'SQL Server Express source instance name.'), 'query_sql_dir': ('-Q', '--query_sql_dir', 'C:\\Python27\\data_migrator_1239_12c\\test\\v101\\query\\query_dir_ss', 'Input dir with SQL Server Express query sqls.')}, {'to_db_name': ('-d', '--to_db_name', '"SAMPLE"', 'Target DB2 Enterprise Server database.'), 'target_client_home': ('-Z', '--target_client_home', '"C:\\Program Files (x86)\\IBM\\SQLLIB_01\\BIN"', 'Path to DB2 Enterprise Server client home bin dir.'), 'to_user': ('-u', '--to_user', '"ALEX_BUZ"', 'Target DB2 Enterprise Server db user.'), 'to_passwd': ('-p', '--to_passwd', '"198Morgan"', 'Target DB2 Enterprise Server db user password.'), 'to_db_server': ('-s', '--to_db_server', '"DB2"', 'Target DB2 Enterprise Server db instance name.'), 'to_table': ('-a', '--to_table', 'ALEX_BUZ.Timestamp_test_to', 'Target DB2 Enterprise Server table.')}]} | [
"alexbuzunov@gmail.com"
] | alexbuzunov@gmail.com |
74efb24cfcc456c3777f5202802beb8da836dca9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02633/s493088951.py | d53bbb4349c3e01d0bf0e8192f14d379d2450aff | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | x = int(input())
ans = 0
while True:
ans += 1
if ans*x % 360 == 0:
break
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
72d42e23b47cf5c666a99a2493fc2ba13e552941 | a8fffbce7bd4d4e7e91f07b7aaaf0801ca64686e | /0x01-python-if_else_loops_functions/4-print_hexa.py | 19710345b8751a2df176d7845cec2204e07435b4 | [] | no_license | bmuha1/holbertonschool-higher_level_programming | 8f603c07e4b3cb87d89c3a1fff9fd5cdef5bc9f5 | 79cca6ecb77ed8de65b55bcdd715a3a923c5cb3a | refs/heads/master | 2020-07-22T15:52:04.069523 | 2020-02-13T23:29:50 | 2020-02-13T23:29:50 | 207,251,416 | 2 | 7 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | #!/usr/bin/python3
for i in range(0, 99):
print('{:d} = 0x{:x}'.format(i, i))
| [
"800@holbertonschool.com"
] | 800@holbertonschool.com |
c48fdcf193858cfb50927df777850acb30ebd52e | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4254/codes/1637_1055.py | e44021d6d961a9fd0df484c0a0024cf18b907868 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | # Teste seu código aos poucos.
# Não teste tudo no final, pois fica mais difícil de identificar erros.
# Use as mensagens de erro para corrigir seu código.
from math import*
vi = float(input("Valor da vel. inicial: "))
ang = radians(float(input("Valor do angulo: ")))
d = float(input("Valor da distancia horizontal: "))
g = 9.8
r = (( (vi**2) * ( sin(2*ang) ) )/g)
if(abs(d - r) <= 0.1):
print("sim")
else:
print("nao")
| [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
c1f078d04fb1349eb369dd17b1730b136b109c1b | bbf1153379eb6f8d0da97e7f608d8714f69bea2f | /masker.py | 3e8dbdfa38d1bb6e3504834ad4cca692e6ff1937 | [] | no_license | rubythonode/Fairy-zepeto-tech | 234fd96a26ba58f1267d723a7f9f8faeb6584fcc | b7f5e64fe9ae9ddeca91cb5c5a8629d9762f984e | refs/heads/master | 2020-12-23T11:56:46.095451 | 2019-05-11T09:08:12 | 2019-05-11T09:08:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,428 | py | import cv2
from imutils.video import VideoStream
import imutils
import dlib
def draw_dlib_rect(frame, rect):
x, y = rect.left(), rect.top()
w, h = rect.right() - x, rect.bottom() - y
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
def main():
detector = dlib.get_frontal_face_detector()
# predictor = dlib.shape_predictor(
# './shape_predictor_68_face_landmarks.dat')
vs = VideoStream(src=0, resolution=(1280, 960)).start()
fileStream = False
cv2.namedWindow('Frame', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Frame', 1000, 800)
prev_face = None
prev_idx = 0
PREV_MAX = 100
mask = cv2.imread('./mask.png')
mask_h, mask_w, _ = mask.shape
mask_x, mask_y = mask_w / 2, mask_h / 2
while True:
if fileStream and not vs.more():
break
frame = vs.read()
frame = imutils.resize(frame, width=960)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
try:
rects = detector(gray, 0)
rects = sorted(
rects,
key=lambda rect: rect.width() * rect.height(),
reverse=True)
# 면적(인식된 범위)이 가장 커다란 사각형(얼굴)을 가져옴
rect = rects[0]
except IndexError:
rect = None
if rect:
prev_idx = 0
if not rect:
if prev_face is not None and prev_idx < PREV_MAX:
rect = prev_face # 결과가 없는 경우 적절히 오래된(PREV_MAX) 이전 결과를 사용
prev_idx += 1
if rect: # 얼굴을 인식한 경우(prev_face를 사용하는 경우 포함)
prev_face = rect # 저장
# shape = get_shape(predictor, gray, rect)
draw_dlib_rect(frame, rect)
frame_x, frame_y = int(
(rect.right() + rect.left()) / 2), int(rect.top() + rect.bottom() / 2)
cv2.circle(frame, (frame_x, frame_y), 5, (0, 255, 0), -1)
dx = (frame_x - mask_x)
dy = (frame_y - mask_y)
frame[int(dy):int(dy + mask_h), int(dx):int(dx + mask_w)] = mask
cv2.imshow("Frame", frame) # 프레임 표시
# q 키를 눌러 종료
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.stop()
if __name__ == '__main__':
main()
| [
"32605822+JunhoYeo@users.noreply.github.com"
] | 32605822+JunhoYeo@users.noreply.github.com |
91f7e272e123bcc82effd0aa0590229161bc20a0 | 177d7066f6a0326ed937a56174d7e2241653929a | /Tree&Graphs/lc733.py | e1405eccb1370ebee80dc19fea3328286363cdba | [] | no_license | jasonusaco/Leetcode-Practice | 276bcdb62b28806b3d297338882f4b1eef56cc13 | 91dc73202eb9952a6064013ef4ed20dfa4137c01 | refs/heads/master | 2020-07-06T08:29:09.419062 | 2019-10-10T01:43:03 | 2019-10-10T01:43:03 | 202,955,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | class Solution:
def floodFill(self, image, sr, sc, newColor):
R=len(image)
C=len(image[0])
oldColor=image[sr][sc]
if oldColor==newColor:
return image
def dfs(r,c):
if image[r][c]==oldColor:
image[r][c]=newColor
if r>=1:
dfs(r-1,c)
if r<R-1:
dfs(r+1,c)
if c>=1:
dfs(r,c-1)
if c<C-1:
dfs(r,c+1)
dfs(sr,sc)
return image
| [
"yangyx@raysdata.com"
] | yangyx@raysdata.com |
63ee5b6e6fb9e38aace4cf21ce53147853078367 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02576/s038383120.py | 9145f7248eee02d00cc812c74f809dbb43d03476 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py |
n,x,t = map(int,input().split())
if n % x ==0:
print(int(n/x*t))
else:
print(int((n//x+1)*t))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
323874b2e2f36c12b541fc62524c7fcb8b55acf7 | e1fada3a9846a5593e3d3d2fdc32b23b832e38b4 | /otx/mpa/seg/stage.py | d590b95c4f69f193b4c83ac34f749ec3408f023a | [
"Apache-2.0"
] | permissive | GalyaZalesskaya/openvino_training_extensions | fd1ebb189900008b16b85568449e5c62d8edbad5 | 6116639caeff100b06a6c10a96c7e7f5951f20c7 | refs/heads/develop | 2023-09-03T19:32:44.702497 | 2023-03-15T06:48:24 | 2023-03-15T06:48:24 | 202,568,309 | 0 | 0 | Apache-2.0 | 2019-10-28T16:16:27 | 2019-08-15T15:41:59 | Python | UTF-8 | Python | false | false | 5,579 | py | # Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from mmcv import ConfigDict
from otx.algorithms.segmentation.adapters.mmseg.utils.builder import build_segmentor
from otx.mpa.stage import Stage
from otx.mpa.utils.config_utils import recursively_update_cfg
from otx.mpa.utils.logger import get_logger
logger = get_logger()
class SegStage(Stage):
MODEL_BUILDER = build_segmentor
def configure(self, model_cfg, model_ckpt, data_cfg, training=True, **kwargs):
"""Create MMCV-consumable config from given inputs"""
logger.info(f"configure!: training={training}")
cfg = self.cfg
self.configure_model(cfg, model_cfg, training, **kwargs)
self.configure_ckpt(cfg, model_ckpt, kwargs.get("pretrained", None))
self.configure_data(cfg, training, data_cfg)
self.configure_task(cfg, training, **kwargs)
self.configure_hook(cfg)
return cfg
def configure_model(self, cfg, model_cfg, training, **kwargs):
if model_cfg:
if hasattr(model_cfg, "model"):
cfg.merge_from_dict(model_cfg._cfg_dict)
else:
raise ValueError(
"Unexpected config was passed through 'model_cfg'. "
"it should have 'model' attribute in the config"
)
cfg.model_task = cfg.model.pop("task", "segmentation")
if cfg.model_task != "segmentation":
raise ValueError(f"Given model_cfg ({model_cfg.filename}) is not supported by segmentation recipe")
# OV-plugin
ir_model_path = kwargs.get("ir_model_path")
if ir_model_path:
def is_mmov_model(k, v):
if k == "type" and v.startswith("MMOV"):
return True
return False
ir_weight_path = kwargs.get("ir_weight_path", None)
ir_weight_init = kwargs.get("ir_weight_init", False)
recursively_update_cfg(
cfg,
is_mmov_model,
{"model_path": ir_model_path, "weight_path": ir_weight_path, "init_weight": ir_weight_init},
)
def configure_data(self, cfg, training, data_cfg, **kwargs): # noqa: C901
# Data
if data_cfg:
cfg.merge_from_dict(data_cfg)
# Dataset
super().configure_data(cfg, training, **kwargs)
src_data_cfg = Stage.get_data_cfg(cfg, "train")
for mode in ["train", "val", "test"]:
if src_data_cfg.type == "MPASegDataset" and cfg.data.get(mode, False):
if cfg.data[mode]["type"] != "MPASegDataset":
# Wrap original dataset config
org_type = cfg.data[mode]["type"]
cfg.data[mode]["type"] = "MPASegDataset"
cfg.data[mode]["org_type"] = org_type
def configure_task(self, cfg, training, **kwargs):
"""Adjust settings for task adaptation"""
if cfg.get("task_adapt", None):
logger.info(f"task config!!!!: training={training}")
task_adapt_op = cfg["task_adapt"].get("op", "REPLACE")
# Task classes
self.configure_classes(cfg, task_adapt_op)
# Ignored mode
self.configure_ignore(cfg)
def configure_classes(self, cfg, task_adapt_op):
# Task classes
org_model_classes = self.get_model_classes(cfg)
data_classes = self.get_data_classes(cfg)
if "background" not in org_model_classes:
org_model_classes = ["background"] + org_model_classes
if "background" not in data_classes:
data_classes = ["background"] + data_classes
# Model classes
if task_adapt_op == "REPLACE":
if len(data_classes) == 1: # 'background'
model_classes = org_model_classes.copy()
else:
model_classes = data_classes.copy()
elif task_adapt_op == "MERGE":
model_classes = org_model_classes + [cls for cls in data_classes if cls not in org_model_classes]
else:
raise KeyError(f"{task_adapt_op} is not supported for task_adapt options!")
cfg.task_adapt.final = model_classes
cfg.model.task_adapt = ConfigDict(
src_classes=org_model_classes,
dst_classes=model_classes,
)
# Model architecture
if "decode_head" in cfg.model:
decode_head = cfg.model.decode_head
if isinstance(decode_head, dict):
decode_head.num_classes = len(model_classes)
elif isinstance(decode_head, list):
for head in decode_head:
head.num_classes = len(model_classes)
# For SupConDetCon
if "SupConDetCon" in cfg.model.type:
cfg.model.num_classes = len(model_classes)
# Task classes
self.org_model_classes = org_model_classes
self.model_classes = model_classes
def configure_ignore(self, cfg):
# Change to incremental loss (ignore mode)
if cfg.get("ignore", False):
cfg_loss_decode = ConfigDict(
type="CrossEntropyLossWithIgnore",
use_sigmoid=False,
loss_weight=1.0,
)
if "decode_head" in cfg.model:
decode_head = cfg.model.decode_head
if decode_head.type == "FCNHead":
decode_head.type = "CustomFCNHead"
decode_head.loss_decode = cfg_loss_decode
| [
"noreply@github.com"
] | GalyaZalesskaya.noreply@github.com |
4c19f566aefa3011a9090945688d333dec852953 | d2e4206ce78451b08bc742e4376316077236e418 | /RemoveAllAdjacentDuplicatesinStringGreaterThanEqualToK.py | 5650df59145581dd769a0da1af47d40e884cdfb9 | [] | no_license | coolsgupta/leetcode | cb25a62999bc0fe20fd7b250c5056c4b6cc3bdca | 64ad78da317c783ffc68357524daa38be0074417 | refs/heads/master | 2023-04-20T11:26:25.638424 | 2021-05-12T05:04:56 | 2021-05-12T05:04:56 | 238,100,015 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | class Solution:
def removeDuplicates(self, s: str, k: int) -> str:
stack = []
i = 0
while (i < len(s)):
x = s[i]
if not stack:
stack.append([x, 1])
elif stack[-1][0] != x:
if stack[-1][1] >= k:
stack.pop()
if stack and stack[-1][0] == x:
stack[-1][1] += 1
else:
stack.append([x, 1])
elif stack[-1][0] == x:
stack[-1][1] += 1
i += 1
if stack[-1][1] >= k:
stack.pop()
return ''.join(map(lambda x: x[0] * x[1], stack))
| [
"cool.sgupta@gmail.com"
] | cool.sgupta@gmail.com |
cc357f6f1e4bea37c08347ba9e8dad0df543adc7 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1_neat/16_0_1_aleph5381_a.py | 92a8b86a7abbd771359c6c39ae5adc41704a46a0 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 661 | py | #!/usr/bin/env python3
import sys
rl = lambda: sys.stdin.readline()
T = int(rl())
def solve(casei):
line = rl().split()
N = int(line[0])
if N is 0:
print("Case #{}: INSOMNIA".format(casei))
return
check = [0] * 10
x = 0
fin = False
while fin is False:
x += N # x == ans*N
tmp = x
while tmp > 0:
check[tmp%10] = 1
tmp = tmp // 10
fin = True
for i in range(10):
if check[i] == 0:
fin = False
break
print("Case #{}: {}".format(casei, x))
for i in range(1, T+1):
solve(i)
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
bef6de11300c0be1b68a28fd535972aa2b0cad51 | 7bb9ffe61491817e999de40266653063ae4f8cfb | /swamp_angel/cc_wrf_swe/allNcFiles_wrf_snowRainSep.py | f6aab080fbbd97a257f332ddc3ba0c95f888f631 | [] | no_license | cycle13/chapter1 | b0e43d7c3297aa24109d0fd00e9bfaa9a95cea2c | 18638f5ef806fa16d7b3b14b43501674478e220e | refs/heads/master | 2022-04-09T23:55:21.936738 | 2020-03-31T01:44:44 | 2020-03-31T01:44:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,863 | py | av_ncfiles_h_rs = ["C:/Users/HHS/summaTestCases_2.x/output/swampAngel/rainSnow/sa_rs_ljc_12p_h_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/rainSnow/sa_rs_ljc_12p_h_2007-2008_senatorVariableDecayRate_1.nc",
]
av_ncfiles_T_rs = ["C:/Users/HHS/summaTestCases_2.x/output/swampAngel/rainSnow/sa_rs_ljc_12p_T_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/rainSnow/sa_rs_ljc_12p_T_2007-2008_senatorVariableDecayRate_1.nc",
]
av_ncfiles_T_al_rs = ["C:/Users/HHS/summaTestCases_2.x/output/swampAngel/rainSnow/sa_rs_ljc_12p_T_al_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/rainSnow/sa_rs_ljc_12p_T_al_2007-2008_senatorVariableDecayRate_1.nc",
]
av_ncfiles_T_al_P_rs = ["C:/Users/HHS/summaTestCases_2.x/output/swampAngel/rainSnow/sa_rs_ljc_12p_T_al_P_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/rainSnow/sa_rs_ljc_12p_T_al_P_2007-2008_senatorVariableDecayRate_1.nc",
]
av_ncfiles_h = ["C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsc_13p_wrf_h_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsc_13p_wrf_h_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsh_13p_wrf_h_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsh_13p_wrf_h_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsp_13p_wrf_h_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsp_13p_wrf_h_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssc_13p_wrf_h_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssc_13p_wrf_h_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssh_13p_wrf_h_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssh_13p_wrf_h_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssp_13p_wrf_h_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssp_13p_wrf_h_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ljc_12p_wrf_h_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ljc_12p_wrf_h_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ljp_12p_wrf_h_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ljp_12p_wrf_h_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lth_12p_wrf_h_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lth_12p_wrf_h_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_sjh_12p_wrf_h_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_sjh_12p_wrf_h_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_sjp_12p_wrf_h_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_sjp_12p_wrf_h_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_stp_12p_wrf_h_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_stp_12p_wrf_h_2007-2008_senatorVariableDecayRate_1.nc",
]
av_ncfiles_T = ["C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsc_13p_wrf_T_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsc_13p_wrf_T_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsh_13p_wrf_T_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsh_13p_wrf_T_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsp_13p_wrf_T_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsp_13p_wrf_T_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssc_13p_wrf_T_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssc_13p_wrf_T_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssh_13p_wrf_T_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssh_13p_wrf_T_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssp_13p_wrf_T_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssp_13p_wrf_T_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ljc_12p_wrf_T_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ljc_12p_wrf_T_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ljp_12p_wrf_T_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ljp_12p_wrf_T_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lth_12p_wrf_T_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lth_12p_wrf_T_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_sjh_12p_wrf_T_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_sjh_12p_wrf_T_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_sjp_12p_wrf_T_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_sjp_12p_wrf_T_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_stp_12p_wrf_T_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_stp_12p_wrf_T_2007-2008_senatorVariableDecayRate_1.nc",
]
av_ncfiles_T_al = ["C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsc_13p_wrf_T_al_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsc_13p_wrf_T_al_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsh_13p_wrf_T_al_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsh_13p_wrf_T_al_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsp_13p_wrf_T_al_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsp_13p_wrf_T_al_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssc_13p_wrf_T_al_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssc_13p_wrf_T_al_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssh_13p_wrf_T_al_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssh_13p_wrf_T_al_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssp_13p_wrf_T_al_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssp_13p_wrf_T_al_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ljc_12p_wrf_T_al_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ljc_12p_wrf_T_al_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ljp_12p_wrf_T_al_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ljp_12p_wrf_T_al_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lth_12p_wrf_T_al_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lth_12p_wrf_T_al_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_sjh_12p_wrf_T_al_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_sjh_12p_wrf_T_al_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_sjp_12p_wrf_T_al_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_sjp_12p_wrf_T_al_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_stp_12p_wrf_T_al_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_stp_12p_wrf_T_al_2007-2008_senatorVariableDecayRate_1.nc",
]
av_ncfiles_T_al_P = ["C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsc_13p_wrf_T_al_P_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsc_13p_wrf_T_al_P_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsh_13p_wrf_T_al_P_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsh_13p_wrf_T_al_P_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsp_13p_wrf_T_al_P_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lsp_13p_wrf_T_al_P_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssc_13p_wrf_T_al_P_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssc_13p_wrf_T_al_P_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssh_13p_wrf_T_al_P_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssh_13p_wrf_T_al_P_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssp_13p_wrf_T_al_P_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ssp_13p_wrf_T_al_P_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ljc_12p_wrf_T_al_P_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ljc_12p_wrf_T_al_P_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ljp_12p_wrf_T_al_P_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_ljp_12p_wrf_T_al_P_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lth_12p_wrf_T_al_P_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_lth_12p_wrf_T_al_P_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_sjh_12p_wrf_T_al_P_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_sjh_12p_wrf_T_al_P_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_sjp_12p_wrf_T_al_P_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_sjp_12p_wrf_T_al_P_2007-2008_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_stp_12p_wrf_T_al_P_2006-2007_senatorVariableDecayRate_1.nc",
"C:/Users/HHS/summaTestCases_2.x/output/swampAngel/cc_wrf/sa_swe_stp_12p_wrf_T_al_P_2007-2008_senatorVariableDecayRate_1.nc",
]
| [
"safa.hamideh@gmail.com"
] | safa.hamideh@gmail.com |
f9e8a99edebd2f573d6d2be807df5f607a9f8f7c | af4abf0a22db1cebae466c56b45da2f36f02f323 | /storage/fase2/team14/storage/Blockchain.py | e165b05dd9bb4ba6701acce857ef42272e491acf | [
"MIT"
] | permissive | joorgej/tytus | 0c29408c09a021781bd3087f419420a62194d726 | 004efe1d73b58b4b8168f32e01b17d7d8a333a69 | refs/heads/main | 2023-02-17T14:00:00.571200 | 2021-01-09T00:48:47 | 2021-01-09T00:48:47 | 322,429,634 | 3 | 0 | MIT | 2021-01-09T00:40:50 | 2020-12-17T22:40:05 | Python | UTF-8 | Python | false | false | 5,175 | py | import json
import hashlib
import os
import csv
def turn_on_safe_mode(database, table):
blockchain = {}
with open('data/info/safeModeTables/' + database + table + '.json', 'w') as file:
json.dump(blockchain, file, indent=4)
def turn_off_safe_mode(database, table):
os.remove('data/info/safeModeTables/' + database + table + '.json')
def concat_register(register):
concat_string = ''
for i in register:
concat_string += str(i)
return concat_string
def generate_hash(string_data):
return hashlib.sha256(string_data.encode()).hexdigest()
def generate_chain(database, table, registers):
blockchain = {}
blockId = 1
previous = '0000000000000000000000000000000000000000000000000000000000000000'
for register in registers:
hash = generate_hash(concat_register(register))
blockchain[blockId] = {'blockId': blockId, 'data': register, 'previous': previous, 'hash': hash, 'status': 0}
blockId += 1
previous = hash
with open('data/info/safeModeTables/' + database + table + '.json', 'w') as file:
json.dump(blockchain, file, indent=4)
def update_block(database, table, newRegister, oldRegister):
oldHash = generate_hash(concat_register(oldRegister))
newHash = generate_hash(concat_register(newRegister))
with open('data/info/safeModeTables/' + database + table + '.json', 'r') as file:
blockchain = json.load(file)
for blockId in blockchain:
if blockchain[blockId]['hash'] == oldHash:
blockchain[blockId]['data'] = newRegister
blockchain[blockId]['hash'] = newHash
blockchain[blockId]['status'] = 1
break
with open('data/info/safeModeTables/' + database + table + '.json', 'w') as file:
json.dump(blockchain, file, indent=4)
def delete_block(database, table, register):
hash = generate_hash(concat_register(register))
with open('data/info/safeModeTables/' + database + table + '.json') as file:
blockchain = json.load(file)
for blockId in blockchain:
if blockchain[blockId]['hash'] == hash:
del blockchain[blockId]
break
with open('data/info/safeModeTables/' + database + table + '.json', 'w') as file:
json.dump(blockchain, file, indent=4)
def insert_block(database, table, register):
with open('data/info/safeModeTables/' + database + table + '.json') as file:
blockchain = json.load(file)
if len(blockchain) == 0:
previous = '0000000000000000000000000000000000000000000000000000000000000000'
blockId = 1
else:
previousId = int(list(blockchain.keys())[-1])
previous = blockchain[str(previousId)]['hash']
blockId = previousId + 1
hash = generate_hash(concat_register(register))
blockchain[blockId] = {'blockId': blockId, 'data': register, 'previous': previous, 'hash': hash, 'status': 0}
with open('data/info/safeModeTables/' + database + table + '.json', 'w') as file:
json.dump(blockchain, file, indent=4)
def chartBlockchain(database, table):
blockchain = None
with open('data/info/safeModeTables/' + database + table + '.json') as file:
blockchain = json.load(file)
file = open('blockchain.dot', 'w')
file.write('digraph blockchain {\n')
file.write('rankdir=LR;\n')
file.write('node[shape=box]\n')
color = '#DCF0C2'
previous = '0000000000000000000000000000000000000000000000000000000000000000'
if len(blockchain) > 0:
for i in blockchain.values():
if color == '#DCF0C2' and (i['status'] == 1 or i['previous'] != previous):
color = '#F3ABAB'
file.write(str(i['blockId']) + '[label=<')
file.write('<TABLE BORDER="0" BGCOLOR=' + '"' + color + '" ' +
'CELLBORDER="1" CELLSPACING="0" CELLPADDING="4">')
file.write('<TR><TD>' + 'Bloque: ' + '</TD><TD>' + '# ' + str(i['blockId']) + '</TD></TR>')
file.write('<TR><TD>' + 'Datos: ' + '</TD><TD>' + str(i['data']) + '</TD></TR>')
file.write('<TR><TD>' + 'Anterior: ' + '</TD><TD>' + str(i['previous']) + '</TD></TR>')
file.write('<TR><TD>' + 'Hash: ' + '</TD><TD>' + str(i['hash']) + '</TD></TR>')
file.write('</TABLE>')
file.write('>, ];')
previous = i['hash']
count = 0
nodes = list(blockchain.keys())
for i in nodes:
if count + 1 < len(nodes):
file.write(nodes[count] + '->' + nodes[count + 1] + '\n')
count += 1
file.write('}')
file.close()
os.system("dot -Tpng blockchain.dot -o blockchain.png")
os.system('blockchain.png')
def insert_block_CSV(results, file, database, table):
count = 0
with open(file, 'r') as f:
reader = csv.reader(f, delimiter=',')
for i in reader:
if results[count] == 0:
insert_block(database, table, i)
count += 1
| [
"noreply@github.com"
] | joorgej.noreply@github.com |
e40541251a9d5577e3a17f6424b708ed485084f5 | 34ef54c04b369a6161c6f8a649868a47122a2d89 | /.venv/Lib/site-packages/astroid/brain/brain_hashlib.py | a6582de9504bc79b6cb428620bbacdf4b4b44262 | [
"MIT"
] | permissive | abner-lucas/tp-cruzi-db | f70ad269c50a2db24debd1455daeddaa2ebd3923 | 595c5c46794ae08a1f19716636eac7430cededa1 | refs/heads/bioinformatica | 2023-05-18T23:23:23.458394 | 2021-06-14T02:13:17 | 2021-06-14T02:13:17 | 351,864,250 | 2 | 2 | MIT | 2021-06-13T19:52:18 | 2021-03-26T17:40:20 | Python | UTF-8 | Python | false | false | 2,400 | py | # Copyright (c) 2016, 2018, 2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2018 David Poirier <david-poirier-csn@users.noreply.github.com>
# Copyright (c) 2018 wgehalo <wgehalo@gmail.com>
# Copyright (c) 2018 Ioana Tagirta <ioana.tagirta@gmail.com>
# Copyright (c) 2020-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2020 David Gilman <davidgilman1@gmail.com>
# Copyright (c) 2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
import sys
import astroid
PY36 = sys.version_info >= (3, 6)
def _hashlib_transform():
signature = "value=''"
template = """
class %(name)s(object):
def __init__(self, %(signature)s): pass
def digest(self):
return %(digest)s
def copy(self):
return self
def update(self, value): pass
def hexdigest(self):
return ''
@property
def name(self):
return %(name)r
@property
def block_size(self):
return 1
@property
def digest_size(self):
return 1
"""
algorithms_with_signature = dict.fromkeys(
["md5", "sha1", "sha224", "sha256", "sha384", "sha512"], signature
)
if PY36:
blake2b_signature = "data=b'', *, digest_size=64, key=b'', salt=b'', \
person=b'', fanout=1, depth=1, leaf_size=0, node_offset=0, \
node_depth=0, inner_size=0, last_node=False"
blake2s_signature = "data=b'', *, digest_size=32, key=b'', salt=b'', \
person=b'', fanout=1, depth=1, leaf_size=0, node_offset=0, \
node_depth=0, inner_size=0, last_node=False"
new_algorithms = dict.fromkeys(
["sha3_224", "sha3_256", "sha3_384", "sha3_512", "shake_128", "shake_256"],
signature,
)
algorithms_with_signature.update(new_algorithms)
algorithms_with_signature.update(
{"blake2b": blake2b_signature, "blake2s": blake2s_signature}
)
classes = "".join(
template % {"name": hashfunc, "digest": 'b""', "signature": signature}
for hashfunc, signature in algorithms_with_signature.items()
)
return astroid.parse(classes)
astroid.register_module_extender(astroid.MANAGER, "hashlib", _hashlib_transform)
| [
"abnerlucas.cad@gmail.com"
] | abnerlucas.cad@gmail.com |
c66fb5735b664f20d647f3bc49bc6ba93a005a3d | f6703b2afca284bf75e0dbf8f61d77e5251f905c | /euler55.py | 8409d725d3672ffc5ec2ce87dddc8a3c639332e4 | [] | no_license | rwieckowski/project-euler-python | 2a7aa73670b4684f076ad819bfc464aa0778f96c | be9a455058b20adfd32c814effd8753cc9d39890 | refs/heads/master | 2021-01-10T21:10:44.875335 | 2015-06-23T13:29:58 | 2015-06-23T13:29:58 | 37,920,684 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,406 | py | """
If we take 47 reverse and add 47 74 121 which is palindromic
Not all numbers produce palindromes so quickly For example
349 943 1292
1292 2921 4213
4213 3124 7337
That is 349 took three iterations to arrive at a palindrome
Although no one has proved it yet it is thought that some numbers like
196 never produce a palindrome A number that never forms a palindrome
through the reverse and add process is called a Lychrel number Due to
the theoretical nature of these numbers and for the purpose of this
problem we shall assume that a number is Lychrel until proven
otherwise In addition you are given that for every number below ten
thousand it will either i become a palindrome in less than fifty
iterations or ii no one with all the computing power that exists has
managed so far to map it to a palindrome In fact 10677 is the first
number to be shown to require over fifty iterations before producing a
palindrome 4668731596684224866951378664 53 iterations 28digits
Surprisingly there are palindromic numbers that are themselves Lychrel
numbers the first example is 4994
How many Lychrel numbers are there below tenthousand
NOTE Wording was modified slightly on 24 April 2007 to emphasise the
theoretical nature of Lychrel numbers
"""
def euler55():
"""
>>> euler55()
'to-do'
"""
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | [
"rwieckowski@ivmx.pl"
] | rwieckowski@ivmx.pl |
c40ee3ce0fccd75e6378521c50021da41918068a | b02a5015ecc61414834c4b24e5f33168eb99070a | /CCscripts/MakeAgreementGraph.py | e6b906884fb9acab5b5c0c84e68a61196f62ba47 | [
"MIT"
] | permissive | mrvollger/SDA | f1aa8edf9989125d7e0c0f6ae159bca495915826 | 3d5e9ec8d1e7ac97121c33c6be80d635392631cf | refs/heads/master | 2023-05-13T05:24:54.665854 | 2023-05-07T23:40:25 | 2023-05-07T23:40:25 | 101,452,926 | 29 | 5 | MIT | 2019-11-21T18:08:13 | 2017-08-26T00:58:01 | Python | UTF-8 | Python | false | false | 3,729 | py | #!/usr/bin/env python
import argparse
import ABPUtils
import numpy as np
import sys
import networkx as nx
import matplotlib.pyplot as plt
ap = argparse.ArgumentParser(description="Sort by haplotype")
ap.add_argument("mat", help="snv matrix file")
ap.add_argument("--out", help="Output file", default="/dev/stdout")
ap.add_argument("--graph", help="Write graph to this file.", default=None)
ap.add_argument("--alph", help="Alphabet to use, 3 characters: ref,alt,gap", default='.1n')
ap.add_argument("--cov", help="Average coverage.", type=float, default=60)
ap.add_argument("--score_cutoff", help="Prune connections below this score.",type=int, default=15)
#args = ap.parse_args('assembly.consensus.fragments.snv.mat.categorized')
args = ap.parse_args()
alph = list(args.alph)
mat = open(args.mat)
outFile = open(args.out, 'w')
#freqLine = mat.readline()
#freq = np.array(freqLine.split())
#print freq
gtList = []
groups = {}
index = 0
groupList = []
coverage = []
(gt, readNames, groupList, groups) = ABPUtils.ReadGenotypeMatrix(mat)
altList = []
refList = []
r=alph[0]
a=alph[1]
g=alph[2]
(altList, refList) = ABPUtils.GetRefAltLists(gt)
coverage = np.array([len(np.where(gt[:,i]!=g)[0]) for i in range(0,gt.shape[1])])
ngt = len(gt)
# Compare distance to members in the group
allGroups = np.array(groups.keys())
allScores = []
nScores = []
scoreIndices = []
for i in range(0,ngt):
innerMat = []
innerMis = []
scores = []
for j in range(0,ngt):
if (j == i):
continue
nMatch = len(np.intersect1d(altList[i],altList[j], assume_unique=True))
nMis = len(np.intersect1d(altList[i],refList[j], assume_unique=True))+\
len(np.intersect1d(refList[i],altList[j], assume_unique=True))
scores.append([nMatch - nMis, j])
minAlt = 0
minRef = 0
scoreMat = np.array(sorted(scores, reverse=True))
if (len(altList[i]) > 0 and len(refList[i]) > 0):
(start,end) = ABPUtils.GetRange(gt[i], g)
readCov = coverage[start]
bestN = min(readCov, int(args.cov))
readScores = scoreMat[0:bestN,0]
minScore = min(readScores)
scores = np.array([minScore]*int(args.cov))
scores[0:bestN] = readScores
outFile.write("\t".join([str(x) for x in scores]) + "\n")
# record this in a matrix
scoreIndices.append(i)
nScores.append(bestN)
allScores.append(scoreMat[0:bestN,:])
def TripleToHex(x):
return "#{:02x}{:02x}{:02x}{:02x}".format(int(x[0]*255),int(x[1]*255),int(x[2]*255),int(x[3]*255))
def TripleToTuple(x):
return "{},{},{}".format(int(x[0]*255),int(x[1]*255),int(x[2]*255))
if (args.graph is not None):
g = nx.Graph()
nColors = len(groups.keys())
groupNames = groups.keys()
cm = plt.get_cmap("Set1")
colors = [TripleToHex(cm(int(i*float(float(cm.N)/nColors)))) for i in range(0,len(groupNames))]
print colors
groupCM = { groupNames[i]: colors[i] for i in range(0,len(colors))}
print groupCM
for i in range(0,ngt):
g.add_node(i, color=groupCM[groupList[i]])
idx = 0
for i in range(0,ngt):
if (idx >= len(scoreIndices)):
break
if (scoreIndices[idx] != i):
continue
for j in range(0,len(allScores[idx])):
if (allScores[idx][j][0] < args.score_cutoff):
break
g.add_edge(i,allScores[idx][j][1])
idx+=1
if (args.graph.find("gml") >= 0):
nx.write_gml(g, args.graph)
elif (args.graph.find("gexf") >= 0):
nx.write_gexf(g, args.graph)
elif (args.graph.find("graphml") >= 0):
nx.write_graphml(g, args.graph)
| [
"mrvollger@gmail.com"
] | mrvollger@gmail.com |
9eae891cd50c3307935c030b24af13498a8781a3 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/Handy/CenteringPolicy.py | 1f4d15a624e31aae89848e0823774339bcdc6110 | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 12,809 | py | # encoding: utf-8
# module gi.repository.Handy
# from /usr/lib64/girepository-1.0/Handy-0.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.overrides.Gtk as __gi_overrides_Gtk
import gi.repository.Gtk as __gi_repository_Gtk
import gobject as __gobject
class CenteringPolicy(__gobject.GEnum):
# no doc
def as_integer_ratio(self): # real signature unknown; restored from __doc__
"""
Return integer ratio.
Return a pair of integers, whose ratio is exactly equal to the original int
and with a positive denominator.
>>> (10).as_integer_ratio()
(10, 1)
>>> (-10).as_integer_ratio()
(-10, 1)
>>> (0).as_integer_ratio()
(0, 1)
"""
pass
def bit_length(self): # real signature unknown; restored from __doc__
"""
Number of bits necessary to represent self in binary.
>>> bin(37)
'0b100101'
>>> (37).bit_length()
6
"""
pass
def conjugate(self, *args, **kwargs): # real signature unknown
""" Returns self, the complex conjugate of any int. """
pass
def from_bytes(self, *args, **kwargs): # real signature unknown
"""
Return the integer represented by the given array of bytes.
bytes
Holds the array of bytes to convert. The argument must either
support the buffer protocol or be an iterable object producing bytes.
Bytes and bytearray are examples of built-in objects that support the
buffer protocol.
byteorder
The byte order used to represent the integer. If byteorder is 'big',
the most significant byte is at the beginning of the byte array. If
byteorder is 'little', the most significant byte is at the end of the
byte array. To request the native byte order of the host system, use
`sys.byteorder' as the byte order value.
signed
Indicates whether two's complement is used to represent the integer.
"""
pass
def to_bytes(self, *args, **kwargs): # real signature unknown
"""
Return an array of bytes representing an integer.
length
Length of bytes object to use. An OverflowError is raised if the
integer is not representable with the given number of bytes.
byteorder
The byte order used to represent the integer. If byteorder is 'big',
the most significant byte is at the beginning of the byte array. If
byteorder is 'little', the most significant byte is at the end of the
byte array. To request the native byte order of the host system, use
`sys.byteorder' as the byte order value.
signed
Determines whether two's complement is used to represent the integer.
If signed is False and a negative integer is given, an OverflowError
is raised.
"""
pass
def __abs__(self, *args, **kwargs): # real signature unknown
""" abs(self) """
pass
def __add__(self, *args, **kwargs): # real signature unknown
""" Return self+value. """
pass
def __and__(self, *args, **kwargs): # real signature unknown
""" Return self&value. """
pass
def __bool__(self, *args, **kwargs): # real signature unknown
""" self != 0 """
pass
def __ceil__(self, *args, **kwargs): # real signature unknown
""" Ceiling of an Integral returns itself. """
pass
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __divmod__(self, *args, **kwargs): # real signature unknown
""" Return divmod(self, value). """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __float__(self, *args, **kwargs): # real signature unknown
""" float(self) """
pass
def __floordiv__(self, *args, **kwargs): # real signature unknown
""" Return self//value. """
pass
def __floor__(self, *args, **kwargs): # real signature unknown
""" Flooring an Integral returns itself. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __index__(self, *args, **kwargs): # real signature unknown
""" Return self converted to an integer, if self is suitable for use as an index into a list. """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __int__(self, *args, **kwargs): # real signature unknown
""" int(self) """
pass
def __invert__(self, *args, **kwargs): # real signature unknown
""" ~self """
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lshift__(self, *args, **kwargs): # real signature unknown
""" Return self<<value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
def __mod__(self, *args, **kwargs): # real signature unknown
""" Return self%value. """
pass
def __mul__(self, *args, **kwargs): # real signature unknown
""" Return self*value. """
pass
def __neg__(self, *args, **kwargs): # real signature unknown
""" -self """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __or__(self, *args, **kwargs): # real signature unknown
""" Return self|value. """
pass
def __pos__(self, *args, **kwargs): # real signature unknown
""" +self """
pass
def __pow__(self, *args, **kwargs): # real signature unknown
""" Return pow(self, value, mod). """
pass
def __radd__(self, *args, **kwargs): # real signature unknown
""" Return value+self. """
pass
def __rand__(self, *args, **kwargs): # real signature unknown
""" Return value&self. """
pass
def __rdivmod__(self, *args, **kwargs): # real signature unknown
""" Return divmod(value, self). """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __rfloordiv__(self, *args, **kwargs): # real signature unknown
""" Return value//self. """
pass
def __rlshift__(self, *args, **kwargs): # real signature unknown
""" Return value<<self. """
pass
def __rmod__(self, *args, **kwargs): # real signature unknown
""" Return value%self. """
pass
def __rmul__(self, *args, **kwargs): # real signature unknown
""" Return value*self. """
pass
def __ror__(self, *args, **kwargs): # real signature unknown
""" Return value|self. """
pass
def __round__(self, *args, **kwargs): # real signature unknown
"""
Rounding an Integral returns itself.
Rounding with an ndigits argument also returns an integer.
"""
pass
def __rpow__(self, *args, **kwargs): # real signature unknown
""" Return pow(value, self, mod). """
pass
def __rrshift__(self, *args, **kwargs): # real signature unknown
""" Return value>>self. """
pass
def __rshift__(self, *args, **kwargs): # real signature unknown
""" Return self>>value. """
pass
def __rsub__(self, *args, **kwargs): # real signature unknown
""" Return value-self. """
pass
def __rtruediv__(self, *args, **kwargs): # real signature unknown
""" Return value/self. """
pass
def __rxor__(self, *args, **kwargs): # real signature unknown
""" Return value^self. """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Returns size in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __sub__(self, *args, **kwargs): # real signature unknown
""" Return self-value. """
pass
def __truediv__(self, *args, **kwargs): # real signature unknown
""" Return self/value. """
pass
def __trunc__(self, *args, **kwargs): # real signature unknown
""" Truncating an Integral returns itself. """
pass
def __xor__(self, *args, **kwargs): # real signature unknown
""" Return self^value. """
pass
denominator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the denominator of a rational number in lowest terms"""
imag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the imaginary part of a complex number"""
numerator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the numerator of a rational number in lowest terms"""
real = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the real part of a complex number"""
value_name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
value_nick = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
LOOSE = 0
STRICT = 1
__class__ = type
__dict__ = None # (!) real value is "mappingproxy({'__module__': 'gi.repository.Handy', '__dict__': <attribute '__dict__' of 'CenteringPolicy' objects>, '__doc__': None, '__gtype__': <GType HdyCenteringPolicy (93893295010912)>, '__enum_values__': {0: <enum HDY_CENTERING_POLICY_LOOSE of type Handy.CenteringPolicy>, 1: <enum HDY_CENTERING_POLICY_STRICT of type Handy.CenteringPolicy>}, '__info__': gi.EnumInfo(CenteringPolicy), 'LOOSE': <enum HDY_CENTERING_POLICY_LOOSE of type Handy.CenteringPolicy>, 'STRICT': <enum HDY_CENTERING_POLICY_STRICT of type Handy.CenteringPolicy>})"
__enum_values__ = {
0: 0,
1: 1,
}
__gtype__ = None # (!) real value is '<GType HdyCenteringPolicy (93893295010912)>'
__info__ = gi.EnumInfo(CenteringPolicy)
| [
"ttys3@outlook.com"
] | ttys3@outlook.com |
64683810f9c43df3333a32d6404f1f6af85bc005 | f1c20d0836f4815b81c895ffe22a29005db3746d | /backend/main/settings/base.py | ed93056e2f01bdbdcf212ab4fe28dc7d14f35e97 | [] | no_license | pavelm2007/leadersofdigital_2020_04 | 6ceacf0858ea46bd73c5a0e0ab120cae802e85bd | 0132d1b3361518b109b0632daaf13ed8e849192d | refs/heads/main | 2023-04-04T21:12:54.890040 | 2021-04-17T20:37:02 | 2021-04-17T20:37:02 | 358,649,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,933 | py | # https://docs.djangoproject.com/en/1.10/ref/settings/
import os
from decouple import config # noqa
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
def base_dir_join(*args):
return os.path.join(BASE_DIR, *args)
SITE_ID = 1
SECURE_HSTS_PRELOAD = True
DEBUG = True
ADMINS = (("Admin", "foo@example.com"),)
AUTH_USER_MODEL = "users.User"
ALLOWED_HOSTS = []
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django_js_reverse",
"webpack_loader",
"import_export",
"apps.common",
"apps.users",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "main.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [base_dir_join("templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"apps.common.context_processors.sentry_dsn",
"apps.common.context_processors.commit_sha",
],
},
},
]
WSGI_APPLICATION = "main.wsgi.application"
AUTH_PASSWORD_VALIDATORS = [
{"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", },
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", },
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", },
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", },
]
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATICFILES_DIRS = (base_dir_join("../frontend"),)
# Webpack
WEBPACK_LOADER = {
"DEFAULT": {
"CACHE": False, # on DEBUG should be False
"STATS_FILE": base_dir_join("../webpack-stats.json"),
"POLL_INTERVAL": 0.1,
"IGNORE": [".+\.hot-update.js", ".+\.map"],
}
}
# Celery
CELERY_ACCEPT_CONTENT = ["json"]
CELERY_TASK_SERIALIZER = "json"
CELERY_RESULT_SERIALIZER = "json"
CELERY_ACKS_LATE = True
CELERY_TIMEZONE = TIME_ZONE
# Sentry
SENTRY_DSN = config("SENTRY_DSN", default="")
COMMIT_SHA = config("HEROKU_SLUG_COMMIT", default="")
| [
"pavelm2007@yandex.ru"
] | pavelm2007@yandex.ru |
64649915912e14ca161b5972b75805aaa8f7bc29 | c92b6c0b59d25018de5b51c6d88f4764e5b713d7 | /ligo/skymap/conftest.py | 102ff41df148897c8cdc94c7f0d69d5122e5549c | [] | no_license | lpsinger/ligo.skymap | 9ecb3480859a3bc7e09332118aa151b47cf50dc8 | 35d451804acb859141a39296f8d6f760802fc78c | refs/heads/main | 2023-08-30T21:01:00.223367 | 2023-08-21T14:03:13 | 2023-08-21T14:03:13 | 124,963,286 | 24 | 18 | null | 2023-07-08T12:53:23 | 2018-03-12T23:17:14 | Python | UTF-8 | Python | false | false | 2,321 | py | # This file is used to configure the behavior of pytest when using the Astropy
# test infrastructure. It needs to live inside the package in order for it to
# get picked up when running the tests inside an interpreter using
# packagename.test
import warnings
from astropy.version import version as astropy_version
import pytest
try:
from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS
ASTROPY_HEADER = True
except ImportError:
ASTROPY_HEADER = False
def pytest_configure(config):
if ASTROPY_HEADER:
config.option.astropy_header = True
# Customize the following lines to add/remove entries from the list of
# packages for which version numbers are displayed when running the tests.
PYTEST_HEADER_MODULES.pop('Pandas', None)
PYTEST_HEADER_MODULES['astropy'] = 'astropy'
PYTEST_HEADER_MODULES['astropy-healpix'] = 'astropy_healpix'
PYTEST_HEADER_MODULES['healpy'] = 'healpy'
PYTEST_HEADER_MODULES['reproject'] = 'reproject'
from . import __version__
packagename = 'ligo.skymap'
TESTED_VERSIONS[packagename] = __version__
# Uncomment the last two lines in this block to treat all DeprecationWarnings as
# exceptions. For Astropy v2.0 or later, there are 2 additional keywords,
# as follow (although default should work for most cases).
# To ignore some packages that produce deprecation warnings on import
# (in addition to 'compiler', 'scipy', 'pygments', 'ipykernel', and
# 'setuptools'), add:
# modules_to_ignore_on_import=['module_1', 'module_2']
# To ignore some specific deprecation warning messages for Python version
# MAJOR.MINOR or later, add:
# warnings_to_ignore_by_pyver={(MAJOR, MINOR): ['Message to ignore']}
# from astropy.tests.helper import enable_deprecations_as_exceptions # noqa
# enable_deprecations_as_exceptions()
@pytest.fixture(autouse=True)
def ignore_unclosed_file_warnings():
"""Ignore unclosed file warnings.
Many of the command-line tools in :mod:`ligo.skymap.tool` use
:class:`arparse.FileType` and therefore leave files opened. Suppress
warnings about unclosed files so that other more interesting warning types
are more noticable.
"""
warnings.filterwarnings('ignore', 'unclosed file .*', ResourceWarning)
| [
"leo.singer@ligo.org"
] | leo.singer@ligo.org |
dedd346d54e7685a9a5faf73d1ec612f64bd2a8b | 02e5b1240db2ef04b4f8b661a9ac4ce060144d74 | /experiments/debug_algorithm_comparison.py | a478002d69b94f10e3cab276763d6d6087c35cfb | [
"MIT"
] | permissive | jayeshchoudhari/pyhawkes | b3b143a5040730826c23a9b3703159dbeb9bf21d | f4b0e6e3ce7f74e647f0ed2254ea334c22d6e82b | refs/heads/master | 2021-06-12T12:55:54.740142 | 2017-03-27T06:47:16 | 2017-03-27T06:47:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,309 | py | """
Compare the various algorithms on a synthetic dataset.
"""
import cPickle
import os
import copy
import gzip
import numpy as np
# Use the Agg backend in running on a server without the DISPLAY variable
if "DISPLAY" not in os.environ:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# import brewer2mpl
# colors = brewer2mpl.get_map("Set1", "Qualitative", 9).mpl_colors
# goodcolors = np.array([0,1,2,4,6,7,8])
# colors = np.array(colors)[goodcolors]
import harness
def load_data(data_path, test_path):
with gzip.open(data_path, 'r') as f:
S, true_model = cPickle.load(f)
with gzip.open(test_path, 'r') as f:
S_test, test_model = cPickle.load(f)
return S, S_test, true_model
def plot_pred_ll_vs_time(models, results, burnin=0,
std_ll=np.nan,
true_ll=np.nan):
from hips.plotting.layout import create_figure
from hips.plotting.colormaps import harvard_colors
# Make the ICML figure
fig = create_figure((4,3))
ax = fig.add_subplot(111)
col = harvard_colors()
plt.grid()
t_start = 0
t_stop = 0
for i, (model, result) in enumerate(zip(models, results)):
plt.plot(result.timestamps[burnin:], result.test_lls[burnin:], lw=2, color=col[i], label=model)
# Update time limits
t_start = min(t_start, result.timestamps[burnin:].min())
t_stop = max(t_stop, result.timestamps[burnin:].max())
# plt.legend(loc="outside right")
# Plot the standard Hawkes test ll
plt.plot([t_start, t_stop], std_ll*np.ones(2), lw=2, color=col[len(models)], label="Std.")
# Plot the true ll
plt.plot([t_start, t_stop], true_ll*np.ones(2), '--k', lw=2, label="True")
ax.set_xlim(t_start, t_stop)
ax.set_xlabel("time [sec]")
ax.set_ylabel("Pred. Log Lkhd.")
plt.show()
def plot_impulse_responses(models, results):
from hips.plotting.layout import create_figure
from hips.plotting.colormaps import harvard_colors
# Make the ICML figure
fig = create_figure((6,6))
col = harvard_colors()
plt.grid()
y_max = 0
for i, (model, result) in enumerate(zip(models, results)):
smpl = result.samples[-1]
W = smpl.W_effective
if "continuous" in str(smpl.__class__).lower():
t, irs = smpl.impulses
for k1 in xrange(K):
for k2 in xrange(K):
plt.subplot(K,K,k1*K + k2 + 1)
plt.plot(t, W[k1,k2] * irs[:,k1,k2], color=col[i], lw=2)
else:
irs = smpl.impulses
for k1 in xrange(K):
for k2 in xrange(K):
plt.subplot(K,K,k1*K + k2 + 1)
plt.plot(W[k1,k2] * irs[:,k1,k2], color=col[i], lw=2)
y_max = max(y_max, (W*irs).max())
for k1 in xrange(K):
for k2 in xrange(K):
plt.subplot(K,K,k1*K+k2+1)
plt.ylim(0,y_max*1.05)
plt.show()
# def run_comparison(data_path, test_path, output_dir, T_train=None, seed=None):
# """
# Run the comparison on the given data file
# :param data_path:
# :return:
# """
if __name__ == "__main__":
seed = None
run = 1
K = 4
C = 1
T = 1000
T_train = 1000
T_test = 1000
data_path = os.path.join("data", "synthetic", "synthetic_K%d_C%d_T%d.pkl.gz" % (K,C,T))
test_path = os.path.join("data", "synthetic", "synthetic_test_K%d_C%d_T%d.pkl.gz" % (K,C,T_test))
output_dir = os.path.join("results", "synthetic_K%d_C%d_T%d" % (K,C,T_train), "run%03d" % run)
# run_comparison(data_path, test_path, output_dir, T_train=T_train, seed=seed)
if seed is None:
seed = np.random.randint(2**32)
print "Setting seed to ", seed
np.random.seed(seed)
assert os.path.exists(os.path.dirname(output_dir)), "Output directory does not exist!"
S, S_test, true_model = load_data(data_path, test_path)
# If T_train is given, only use a fraction of the dataset
if T_train is not None:
S = S[:T_train,:]
# Use the true basis
dt, dt_max = true_model.dt, true_model.dt_max
basis = true_model.basis
network = true_model.network
# First fit the standard model
results = []
output_path = os.path.join(output_dir, "std.pkl.gz")
std_results = \
harness.fit_standard_hawkes_model_bfgs(S, S_test, dt, dt_max, output_path,
model_args={"basis": basis, "alpha": 1.0, "beta": 1.0})
std_model = std_results.samples[0]
# results.append(std_results)
# Now fit the Bayesian models with MCMC or VB,
# initializing with the standard model
models = [
"SS-DTH (Gibbs)",
#"SS-CTH (Gibbs)",
"MoG-DTH (VB)",
"MoG-DTH (SVI)"
]
methods = [
harness.fit_spikeslab_network_hawkes_gibbs,
#harness.fit_ct_network_hawkes_gibbs,
harness.fit_network_hawkes_vb,
harness.fit_network_hawkes_svi
]
inf_args = [
{"N_samples": 3000, "standard_model": std_model},
#{"N_samples": 1000, "standard_model": std_model},
{"N_samples": 3000, "standard_model": std_model},
{"N_samples": 3000, "standard_model": std_model}
]
model_args = [
{"basis": basis, "network": copy.deepcopy(network), "weight_hypers": {"parallel_resampling": False}},
#{"network": copy.deepcopy(network), "impulse_hypers" : {"mu_0": 0., "lmbda_0": 2.0, "alpha_0": 2.0, "beta_0" : 1.0}},
{"basis": basis, "network": copy.deepcopy(network)},
{"basis": basis, "network": copy.deepcopy(network)},
]
assert len(models) == len(methods) == len(inf_args) == len(model_args)
for model, method, iargs, margs in zip(models, methods, inf_args, model_args):
output_path = os.path.join(output_dir, model.lower() + ".pkl.gz")
results.append(method(S, S_test, dt, dt_max, output_path,
model_args=margs,
**iargs))
# Plot the reuslts
plt.ion()
plot_pred_ll_vs_time(models, results, burnin=1,
std_ll=std_results.test_lls[-1],
true_ll=true_model.heldout_log_likelihood(S_test))
# Plot impulse responses
# plot_impulse_responses(models, results)
| [
"scott.linderman@gmail.com"
] | scott.linderman@gmail.com |
8f4254578ec4c86546e4c356104dd919b4821bc6 | 6d80ce7a1f44ddf5741fd190ddfe0d9be8e5f162 | /model/recognition_model/MORAN_V2/models/fracPickup.py | c77c4b2472d2cf2ef4c6501bff03d3cb0602d5ac | [
"MIT"
] | permissive | dun933/FudanOCR | dd8830ca4b8ebb08acd31326fcf5aa3c961886a0 | fd79b679044ea23fd9eb30691453ed0805d2e98b | refs/heads/master | 2021-04-03T19:50:47.646099 | 2020-03-16T08:43:59 | 2020-03-16T08:43:59 | 248,391,401 | 1 | 0 | MIT | 2020-03-19T02:23:11 | 2020-03-19T02:23:10 | null | UTF-8 | Python | false | false | 1,448 | py | import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import numpy.random as npr
class fracPickup(nn.Module):
def __init__(self, CUDA=True):
super(fracPickup, self).__init__()
self.cuda = CUDA
def forward(self, x):
x_shape = x.size()
assert len(x_shape) == 4
assert x_shape[2] == 1
fracPickup_num = 0
h_list = 1.
w_list = np.arange(x_shape[3])*2./(x_shape[3]-1)-1
for i in range(fracPickup_num):
idx = int(npr.rand()*len(w_list))
if idx <= 0 or idx >= x_shape[3]-1:
continue
beta = npr.rand()/4.
value0 = (beta*w_list[idx] + (1-beta)*w_list[idx-1])
value1 = (beta*w_list[idx-1] + (1-beta)*w_list[idx])
# Modified
w_list[idx-1] = value1
w_list[idx] = value0
grid = np.meshgrid(
w_list,
h_list,
indexing='ij'
)
grid = np.stack(grid, axis=-1)
grid = np.transpose(grid, (1, 0, 2))
grid = np.expand_dims(grid, 0)
grid = np.tile(grid, [x_shape[0], 1, 1, 1])
grid = torch.from_numpy(grid).type(x.data.type())
if self.cuda:
grid = grid.cuda()
self.grid = Variable(grid, requires_grad=False)
x_offset = nn.functional.grid_sample(x, self.grid)
return x_offset
| [
"576194329@qq.com"
] | 576194329@qq.com |
9c7f732d96190c1f4b8f7ae7b0edeb7d246afb53 | 7f37e5485cf6140ffad3cd75e1549c3b31ba0758 | /afvink4/venv/Lib/site-packages/bio/SearchIO/HmmerIO/hmmer3_text.py | 17b8162977b2b7f6c4c9015b414eb769a68da519 | [] | no_license | sannepost2001/blok4 | dfd3e760fcec4b4e550d12fc16058234ddcf4999 | 28e3625ca8d60dac6684f5dec85f9757b64d5bdb | refs/heads/master | 2023-05-12T12:51:16.243769 | 2019-06-11T12:50:07 | 2019-06-11T12:50:07 | 185,570,784 | 0 | 0 | null | 2023-05-01T20:34:22 | 2019-05-08T09:05:18 | Python | UTF-8 | Python | false | false | 18,968 | py | # Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Bio.SearchIO parser for HMMER plain text output format."""
import re
from Bio._py3k import _as_bytes, _bytes_to_string
from Bio._utils import read_forward
from Bio.Alphabet import generic_protein
from Bio.SearchIO._model import QueryResult, Hit, HSP, HSPFragment
from ._base import _BaseHmmerTextIndexer
__all__ = ('Hmmer3TextParser', 'Hmmer3TextIndexer')
# precompile regex patterns for faster processing
# regex for program name capture
_RE_PROGRAM = re.compile(r'^# (\w*hmm\w+) :: .*$')
# regex for version string capture
_RE_VERSION = re.compile(r'# \w+ ([\w+\.]+) .*; http.*$')
# regex for option string capture
_RE_OPT = re.compile(r'^# (.+):\s+(.+)$')
# regex for parsing query id and length, for parsing
_QRE_ID_LEN_PTN = r'^Query:\s*(.*)\s+\[\w=(\d+)\]'
_QRE_ID_LEN = re.compile(_QRE_ID_LEN_PTN)
# regex for hsp validation
_HRE_VALIDATE = re.compile(r'score:\s(-?\d+\.?\d+)\sbits.*value:\s(.*)')
# regexes for parsing hsp alignment blocks
_HRE_ANNOT_LINE = re.compile(r'^(\s+)(.+)\s(\w+)')
_HRE_ID_LINE = re.compile(r'^(\s+\S+\s+[0-9-]+ )(.+?)(\s+[0-9-]+)')
class Hmmer3TextParser(object):
"""Parser for the HMMER 3.0 text output."""
def __init__(self, handle):
"""Initialize the class."""
self.handle = handle
self.line = read_forward(self.handle)
self._meta = self._parse_preamble()
def __iter__(self):
"""Iterate over query results."""
for qresult in self._parse_qresult():
yield qresult
def _read_until(self, bool_func):
"""Read the file handle until the given function returns True (PRIVATE)."""
while True:
if not self.line or bool_func(self.line):
return
else:
self.line = read_forward(self.handle)
def _parse_preamble(self):
"""Parse HMMER preamble (lines beginning with '#') (PRIVATE)."""
meta = {}
# bool flag for storing state ~ whether we are parsing the option
# lines or not
has_opts = False
while True:
# no pound sign means we've left the preamble
if not self.line.startswith('#'):
break
# dashes could either mean we are entering or leaving the options
# section ~ so it's a switch for the has_opts flag
elif '- - -' in self.line:
if not has_opts:
# if flag is false, that means we're entering opts
# so switch the flag accordingly
has_opts = True
else:
# if flag is true, that means we've reached the end of opts
# so we can break out of the function
break
elif not has_opts:
# try parsing program
regx = re.search(_RE_PROGRAM, self.line)
if regx:
meta['program'] = regx.group(1)
# try parsing version
regx = re.search(_RE_VERSION, self.line)
if regx:
meta['version'] = regx.group(1)
elif has_opts:
regx = re.search(_RE_OPT, self.line)
# if target in regx.group(1), then we store the key as target
if 'target' in regx.group(1):
meta['target'] = regx.group(2).strip()
else:
meta[regx.group(1)] = regx.group(2)
self.line = read_forward(self.handle)
return meta
def _parse_qresult(self):
"""Parse a HMMER3 query block (PRIVATE)."""
self._read_until(lambda line: line.startswith('Query:'))
while self.line:
regx = re.search(_QRE_ID_LEN, self.line)
while not regx:
self.line = read_forward(self.handle)
regx = re.search(_QRE_ID_LEN, self.line)
# get query id and length
qid = regx.group(1).strip()
# store qresult attributes
qresult_attrs = {
'seq_len': int(regx.group(2)),
'program': self._meta.get('program'),
'version': self._meta.get('version'),
'target': self._meta.get('target'),
}
# get description and accession, if they exist
qdesc = '<unknown description>' # placeholder
while not self.line.startswith('Scores for '):
self.line = read_forward(self.handle)
if self.line.startswith('Accession:'):
acc = self.line.strip().split(' ', 1)[1]
qresult_attrs['accession'] = acc.strip()
elif self.line.startswith('Description:'):
qdesc = self.line.strip().split(' ', 1)[1].strip()
qresult_attrs['description'] = qdesc
# parse the query hits
while self.line and '//' not in self.line:
hit_list = self._parse_hit(qid, qdesc)
# read through the statistics summary
# TODO: parse and store this information?
if self.line.startswith('Internal pipeline'):
while self.line and '//' not in self.line:
self.line = read_forward(self.handle)
# create qresult, set its attributes and yield
# not initializing hit_list directly to handle empty hits
# (i.e. need to set its query description manually)
qresult = QueryResult(id=qid, hits=hit_list)
for attr, value in qresult_attrs.items():
setattr(qresult, attr, value)
yield qresult
self.line = read_forward(self.handle)
# Skip line beginning with '# Alignment of', which are output
# when running phmmer with the '-A' flag.
if self.line.startswith('#'):
self.line = self.handle.readline()
# HMMER >= 3.1 outputs '[ok]' at the end of all results file,
# which means we can break the main loop when we see the line
if '[ok]' in self.line:
break
def _parse_hit(self, qid, qdesc):
"""Parse a HMMER3 hit block, beginning with the hit table (PRIVATE)."""
# get to the end of the hit table delimiter and read one more line
self._read_until(lambda line:
line.startswith(' ------- ------ -----'))
self.line = read_forward(self.handle)
# assume every hit is in inclusion threshold until the inclusion
# threshold line is encountered
is_included = True
# parse the hit table
hit_attr_list = []
while True:
if not self.line:
return []
elif self.line.startswith(' ------ inclusion'):
is_included = False
self.line = read_forward(self.handle)
# if there are no hits, then there are no hsps
# so we forward-read until 'Internal pipeline..'
elif self.line.startswith(' [No hits detected that satisfy '
'reporting'):
while True:
self.line = read_forward(self.handle)
if self.line.startswith('Internal pipeline'):
assert len(hit_attr_list) == 0
return []
elif self.line.startswith('Domain annotation for each '):
hit_list = self._create_hits(hit_attr_list, qid, qdesc)
return hit_list
# entering hit results row
# parse the columns into a list
row = [x for x in self.line.strip().split(' ') if x]
# join the description words if it's >1 word
if len(row) > 10:
row[9] = ' '.join(row[9:])
# if there's no description, set it to an empty string
elif len(row) < 10:
row.append('')
assert len(row) == 10
# create the hit object
hit_attrs = {
'id': row[8],
'query_id': qid,
'evalue': float(row[0]),
'bitscore': float(row[1]),
'bias': float(row[2]),
# row[3:6] is not parsed, since the info is available
# at the HSP level
'domain_exp_num': float(row[6]),
'domain_obs_num': int(row[7]),
'description': row[9],
'is_included': is_included,
}
hit_attr_list.append(hit_attrs)
self.line = read_forward(self.handle)
def _create_hits(self, hit_attrs, qid, qdesc):
"""Parse a HMMER3 hsp block, beginning with the hsp table (PRIVATE)."""
# read through until the beginning of the hsp block
self._read_until(lambda line: line.startswith('Internal pipeline') or
line.startswith('>>'))
# start parsing the hsp block
hit_list = []
while True:
if self.line.startswith('Internal pipeline'):
# by this time we should've emptied the hit attr list
assert len(hit_attrs) == 0
return hit_list
assert self.line.startswith('>>')
hid, hdesc = self.line[len('>> '):].split(' ', 1)
hdesc = hdesc.strip()
# read through the hsp table header and move one more line
self._read_until(lambda line:
line.startswith(' --- ------ ----- --------') or
line.startswith(' [No individual domains'))
self.line = read_forward(self.handle)
# parse the hsp table for the current hit
hsp_list = []
while True:
# break out of hsp parsing if there are no hits, it's the last hsp
# or it's the start of a new hit
if self.line.startswith(' [No targets detected that satisfy') or \
self.line.startswith(' [No individual domains') or \
self.line.startswith('Internal pipeline statistics summary:') or \
self.line.startswith(' Alignments for each domain:') or \
self.line.startswith('>>'):
hit_attr = hit_attrs.pop(0)
hit = Hit(hsp_list)
for attr, value in hit_attr.items():
if attr == "description":
cur_val = getattr(hit, attr)
if cur_val and value and cur_val.startswith(value):
continue
setattr(hit, attr, value)
if not hit:
hit.query_description = qdesc
hit_list.append(hit)
break
parsed = [x for x in self.line.strip().split(' ') if x]
assert len(parsed) == 16
# parsed column order:
# index, is_included, bitscore, bias, evalue_cond, evalue
# hmmfrom, hmmto, query_ends, hit_ends, alifrom, alito,
# envfrom, envto, acc_avg
frag = HSPFragment(hid, qid)
# set query and hit descriptions if they are defined / nonempty string
if qdesc:
frag.query_description = qdesc
if hdesc:
frag.hit_description = hdesc
# HMMER3 alphabets are always protein alphabets
frag.alphabet = generic_protein
# depending on whether the program is hmmsearch, hmmscan, or phmmer
# {hmm,ali}{from,to} can either be hit_{from,to} or query_{from,to}
# for hmmscan, hit is the hmm profile, query is the sequence
if self._meta.get('program') == 'hmmscan':
# adjust 'from' and 'to' coordinates to 0-based ones
frag.hit_start = int(parsed[6]) - 1
frag.hit_end = int(parsed[7])
frag.query_start = int(parsed[9]) - 1
frag.query_end = int(parsed[10])
elif self._meta.get('program') in ['hmmsearch', 'phmmer']:
# adjust 'from' and 'to' coordinates to 0-based ones
frag.hit_start = int(parsed[9]) - 1
frag.hit_end = int(parsed[10])
frag.query_start = int(parsed[6]) - 1
frag.query_end = int(parsed[7])
# strand is always 0, since HMMER now only handles protein
frag.hit_strand = frag.query_strand = 0
hsp = HSP([frag])
hsp.domain_index = int(parsed[0])
hsp.is_included = parsed[1] == '!'
hsp.bitscore = float(parsed[2])
hsp.bias = float(parsed[3])
hsp.evalue_cond = float(parsed[4])
hsp.evalue = float(parsed[5])
if self._meta.get('program') == 'hmmscan':
# adjust 'from' and 'to' coordinates to 0-based ones
hsp.hit_endtype = parsed[8]
hsp.query_endtype = parsed[11]
elif self._meta.get('program') in ['hmmsearch', 'phmmer']:
# adjust 'from' and 'to' coordinates to 0-based ones
hsp.hit_endtype = parsed[11]
hsp.query_endtype = parsed[8]
# adjust 'from' and 'to' coordinates to 0-based ones
hsp.env_start = int(parsed[12]) - 1
hsp.env_end = int(parsed[13])
hsp.env_endtype = parsed[14]
hsp.acc_avg = float(parsed[15])
hsp_list.append(hsp)
self.line = read_forward(self.handle)
# parse the hsp alignments
if self.line.startswith(' Alignments for each domain:'):
self._parse_aln_block(hid, hit.hsps)
def _parse_aln_block(self, hid, hsp_list):
"""Parse a HMMER3 HSP alignment block (PRIVATE)."""
self.line = read_forward(self.handle)
dom_counter = 0
while True:
if self.line.startswith('>>') or \
self.line.startswith('Internal pipeline'):
return hsp_list
assert self.line.startswith(' == domain %i' % (dom_counter + 1))
# alias hsp to local var
# but note that we're still changing the attrs of the actual
# hsp inside the qresult as we're not creating a copy
frag = hsp_list[dom_counter][0]
# XXX: should we validate again here? regex is expensive..
# regx = re.search(_HRE_VALIDATE, self.line)
# assert hsp.bitscore == float(regx.group(1))
# assert hsp.evalue_cond == float(regx.group(2))
hmmseq = ''
aliseq = ''
annot = {}
self.line = self.handle.readline()
# parse all the alignment blocks in the hsp
while True:
regx = None
# check for hit or query line
# we don't check for the hit or query id specifically
# to anticipate special cases where query id == hit id
regx = re.search(_HRE_ID_LINE, self.line)
if regx:
# the first hit/query self.line we encounter is the hmmseq
if len(hmmseq) == len(aliseq):
hmmseq += regx.group(2)
# and for subsequent self.lines, len(hmmseq) is either
# > or == len(aliseq)
elif len(hmmseq) > len(aliseq):
aliseq += regx.group(2)
assert len(hmmseq) >= len(aliseq)
# check for start of new domain
elif self.line.startswith(' == domain') or \
self.line.startswith('>>') or \
self.line.startswith('Internal pipeline'):
frag.aln_annotation = annot
if self._meta.get('program') == 'hmmscan':
frag.hit = hmmseq
frag.query = aliseq
elif self._meta.get('program') in ['hmmsearch', 'phmmer']:
frag.hit = aliseq
frag.query = hmmseq
dom_counter += 1
hmmseq = ''
aliseq = ''
annot = {}
break
# otherwise check if it's an annotation line and parse it
# len(hmmseq) is only != len(aliseq) when the cursor is parsing
# the similarity character. Since we're not parsing that, we
# check for when the condition is False (i.e. when it's ==)
elif len(hmmseq) == len(aliseq):
regx = re.search(_HRE_ANNOT_LINE, self.line)
if regx:
annot_name = regx.group(3)
if annot_name in annot:
annot[annot_name] += regx.group(2)
else:
annot[annot_name] = regx.group(2)
self.line = self.handle.readline()
class Hmmer3TextIndexer(_BaseHmmerTextIndexer):
"""Indexer class for HMMER plain text output."""
_parser = Hmmer3TextParser
qresult_start = _as_bytes('Query: ')
qresult_end = _as_bytes('//')
def __iter__(self):
"""Iterate over Hmmer3TextIndexer; yields query results' key, offsets, 0."""
handle = self._handle
handle.seek(0)
start_offset = handle.tell()
regex_id = re.compile(_as_bytes(_QRE_ID_LEN_PTN))
while True:
line = read_forward(handle)
end_offset = handle.tell()
if line.startswith(self.qresult_start):
regx = re.search(regex_id, line)
qresult_key = regx.group(1).strip()
# qresult start offset is the offset of this line
# (starts with the start mark)
start_offset = end_offset - len(line)
elif line.startswith(self.qresult_end):
yield _bytes_to_string(qresult_key), start_offset, 0
start_offset = end_offset
elif not line:
break
# if not used as a module, run the doctest
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
| [
"43266412+sannepost2001@users.noreply.github.com"
] | 43266412+sannepost2001@users.noreply.github.com |
0682e73e7996182b5c7e1fc9d9644fdff3829b00 | 11df0f91cb97d974a8097a74a907dadfdf63e5a3 | /plugins/jobs/setup.py | ba8f34b55e76d90d049415c52752673fce8b6d66 | [
"Apache-2.0"
] | permissive | kotfic/girder | 730b8234a51e8428952cf359cd5ddb3ccb992510 | 461faf52288c8fc4936f1e7a2ff08ee5a674f324 | refs/heads/master | 2021-01-15T21:07:53.923485 | 2018-10-24T15:03:53 | 2018-10-24T15:03:53 | 40,503,503 | 0 | 0 | Apache-2.0 | 2018-10-05T14:49:29 | 2015-08-10T20:05:16 | Python | UTF-8 | Python | false | false | 1,867 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from setuptools import setup, find_packages
# perform the install
setup(
name='girder-jobs',
version='3.0.0a1',
description='A general purpose plugin for managing offline jobs.',
author='Kitware, Inc.',
author_email='kitware@kitware.com',
url='http://girder.readthedocs.io/en/latest/plugins.html#jobs',
license='Apache 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5'
],
include_package_data=True,
packages=find_packages(exclude=['plugin_tests']),
zip_safe=False,
install_requires=['girder>=3.0.0a1'],
entry_points={
'girder.plugin': [
'jobs = girder_jobs:JobsPlugin'
]
}
)
| [
"jonathan.beezley@kitware.com"
] | jonathan.beezley@kitware.com |
374d8fa07453bdb586f04c0da638a78ecfb7da08 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/fractions_20200802110516.py | 772193c82c005ec68c8acc419df9558f065a5ee3 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py | def fractions(numerator,denominator):
if denominator == 0 :
return str(numerator)
number = numerator / denominator
if numerator % denominator == 0:
return str(numerator // denominator)
newStr = str(number)
print(newStr)
largeStr = newStr.split(".")
if len(largeStr[1]) > 1:
return largeStr[0] + "." + '(' + largeStr[1][0] + ')'
return newStr
def frac(numerator,denominator):
res = ""
if numerator == 0:
return "0"
if denominator == 0:
return "undefined"
if (numerator < 0 and denominator > 0) or (numerator > 0 and denominator <0):
res += "-"
if numerator % denominator == 0:
return str(numerator / denominator)
else:
# this means its has a remainder
res += "."
newDict = {}
rem = numerator % denominator
while rem !=0:
if (rem in newDict):
res += "("
res +=
print(rem)
print(frac(4,333)) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
95acbaf7ca434dbb5ce338d2f69a29098e9a845c | 744096e063ffb4cdb017f60e6dfae410a51c789a | /keras/keras78_iris_cnn.py | 76a91517f44f06c521e9584fdf7f72c5fcb4fbf3 | [] | no_license | elf0508/Study-bit | 59ddab507b02c13a45913c05a4799ff946e63f95 | a773d7643cbb1c0008e7ea01c32615c9e6e3678c | refs/heads/master | 2022-12-31T11:53:44.344693 | 2020-10-16T09:04:01 | 2020-10-16T09:04:01 | 270,950,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,088 | py | # 다중 분류
from sklearn.datasets import load_iris
iris = load_iris()
x = iris.data
y = iris.target
print(x.shape) # (150, 4)
print(y.shape) # (150, )
# x : scaler
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(x)
x = scaler.transform(x).reshape(150, 2, 2, 1)
# y : one hot encoding
from keras.utils.np_utils import to_categorical
y = to_categorical(y)
print(y.shape) # (150, 3)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size =0.8,random_state= 10)
#2. model
from keras.models import Sequential
from keras.layers import Dense, Dropout, Conv2D, MaxPool2D, Flatten
model = Sequential()
model.add(Conv2D(10, (2, 2), input_shape = (2, 2, 1 ), activation = 'relu', padding = 'same'))
model.add(Conv2D(50, (2, 2),activation = 'relu', padding = 'same'))
model.add(Dropout(0.1))
model.add(Conv2D(50, (2, 2),activation = 'relu', padding = 'same'))
model.add(Dropout(0.1))
model.add(Conv2D(50, (2, 2),activation = 'relu', padding = 'same'))
model.add(Dropout(0.1))
model.add(Conv2D(50,(2, 2), activation = 'relu', padding = 'same'))
model.add(Dropout(0.1))
model.add(Conv2D(50, (2, 2),activation = 'relu', padding = 'same'))
model.add(Dropout(0.1))
model.add(Conv2D(50,(2, 2), activation = 'relu', padding = 'same'))
model.add(Dropout(0.1))
model.add(Conv2D(50, (2, 2),activation = 'relu', padding = 'same'))
model.add(Dropout(0.1))
model.add(Conv2D(50, (2, 2),activation = 'relu', padding = 'same'))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(3, activation = 'softmax'))
# callbacks
from keras.callbacks import EarlyStopping, TensorBoard, ModelCheckpoint
# earlystopping
es = EarlyStopping(monitor = 'val_loss', patience = 50, verbose =1)
# Tensorboard
ts_board = TensorBoard(log_dir = 'graph', histogram_freq= 0,
write_graph = True, write_images=True)
# Checkpoint
modelpath = './model/{epoch:02d}-{val_loss:.4f}.hdf5'
ckecpoint = ModelCheckpoint(filepath = modelpath, monitor = 'val_loss',
save_best_only= True)
#3. compile, fit
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['acc'])
hist = model.fit(x_train, y_train, epochs =100, batch_size= 64,
validation_split = 0.2, verbose = 2,
callbacks = [es])
# evaluate
loss, acc = model.evaluate(x_test, y_test, batch_size = 64)
print('loss: ', loss )
print('acc: ', acc)
# graph
import matplotlib.pyplot as plt
plt.figure(figsize = (10, 5))
# 1
plt.subplot(2, 1, 1)
plt.plot(hist.history['loss'], c= 'red', marker = '^', label = 'loss')
plt.plot(hist.history['val_loss'], c= 'cyan', marker = '^', label = 'val_loss')
plt.title('loss')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
# 2
plt.subplot(2, 1, 2)
plt.plot(hist.history['acc'], c= 'red', marker = '^', label = 'acc')
plt.plot(hist.history['val_acc'], c= 'cyan', marker = '^', label = 'val_acc')
plt.title('accuarcy')
plt.xlabel('epochs')
plt.ylabel('acc')
plt.legend()
plt.show() | [
"elf0508@naver.com"
] | elf0508@naver.com |
3a6f59ae3607d35583d9c3f4b8a8aede43a77042 | 1e4d852a59e6f16d70fb05e74f5b8d6e52bbc5d7 | /data_visualization/15/5/rw_visual.py | 074f07253417fcf0ddce0a00d6e446f6e87c938f | [] | no_license | 1000monkeys/probable-invention | 6cb32fae592f7752c77c295a4be2d500e0a55ec9 | adf42f00c32ab7eb165d78dde3703eba3037356d | refs/heads/master | 2021-06-29T17:08:52.810761 | 2020-10-08T12:45:20 | 2020-10-08T12:45:20 | 168,561,027 | 0 | 0 | null | 2020-09-16T12:38:46 | 2019-01-31T16:54:48 | Python | UTF-8 | Python | false | false | 753 | py | import matplotlib.pyplot as plt
from random_walk import RandomWalk
while True:
# Make a random walk, and plot the points.
rw = RandomWalk(500000)
rw.fill_walk()
plt.figure(figsize=(10, 6))
point_numbers = list(range(rw.num_points))
plt.scatter(rw.x_values, rw.y_values, c=point_numbers, cmap=plt.cm.Blues, edgecolor='none', s=1)
# Emphasize the first and last points.
plt.scatter(0, 0, c='green', edgecolors='none', s=100)
plt.scatter(rw.x_values[-1], rw.y_values[-1], c='red', edgecolors='none', s=100)
plt.axes().get_xaxis().set_visible(False)
plt.axes().get_yaxis().set_visible(False)
plt.show()
keep_running = input("Make another walk? (y/n): ")
if keep_running == 'n':
break
| [
"vos.kjell@gmail.com"
] | vos.kjell@gmail.com |
4291163c4d878c064857a56c6a82f69f91513bf4 | e22390ec9aa1a842626075113472f81076e1bf5f | /pullenti/ner/Token.py | a43336724fed84022a0f45a9486a9a8d01a9b5a1 | [] | no_license | pullenti/PullentiPython | ba9f450f3f49786732e80f34d0506d4a6d41afc3 | 815d550b99f113034c27f60d97493ce2f8e4cfcc | refs/heads/master | 2021-06-22T17:12:36.771479 | 2020-12-11T06:10:23 | 2020-12-11T06:10:23 | 161,268,453 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,959 | py | # Copyright (c) 2013, Pullenti. All rights reserved.
# Non-Commercial Freeware and Commercial Software.
# This class is generated using the converter UniSharping (www.unisharping.ru) from Pullenti C#.NET project.
# The latest version of the code is available on the site www.pullenti.ru
import typing
from pullenti.unisharp.Utils import Utils
from pullenti.unisharp.Streams import Stream
from pullenti.morph.CharsInfo import CharsInfo
from pullenti.morph.MorphGender import MorphGender
from pullenti.morph.MorphNumber import MorphNumber
from pullenti.morph.LanguageHelper import LanguageHelper
from pullenti.ner.MorphCollection import MorphCollection
class Token:
""" Базовый класс для всех токенов. Наследные классы - TextToken (конечная словоформа) и MetaToken (связный фрагмент других токенов).
Токен
"""
def __init__(self, kit_ : 'AnalysisKit', begin : int, end : int) -> None:
self.kit = None;
self.__m_begin_char = 0
self.__m_end_char = 0
self.tag = None;
self._m_previous = None;
self._m_next = None;
self.__m_morph = None;
self.chars = None;
self.__m_attrs = 0
self.kit = kit_
self.__m_begin_char = begin
self.__m_end_char = end
@property
def begin_char(self) -> int:
""" Позиция в тексте начального символа
"""
return self.__m_begin_char
@property
def end_char(self) -> int:
""" Позиция в тексте конечного символа
"""
return self.__m_end_char
@property
def length_char(self) -> int:
""" Длина в текстовых символах """
return (self.end_char - self.begin_char) + 1
@property
def previous(self) -> 'Token':
""" Предыдущий токен в цепочке токенов
"""
return self._m_previous
@previous.setter
def previous(self, value) -> 'Token':
self._m_previous = value
if (value is not None):
value._m_next = self
self.__m_attrs = (0)
return value
@property
def next0_(self) -> 'Token':
""" Следующий токен в цепочке токенов
"""
return self._m_next
@next0_.setter
def next0_(self, value) -> 'Token':
self._m_next = value
if (value is not None):
value._m_previous = self
self.__m_attrs = (0)
return value
@property
def morph(self) -> 'MorphCollection':
""" Морфологическая информация
"""
if (self.__m_morph is None):
self.__m_morph = MorphCollection()
return self.__m_morph
@morph.setter
def morph(self, value) -> 'MorphCollection':
self.__m_morph = value
return value
def __str__(self) -> str:
return self.kit.sofa.text[self.begin_char:self.begin_char+(self.end_char + 1) - self.begin_char]
def __get_attr(self, i : int) -> bool:
ch = '\x00'
if ((((self.__m_attrs) & 1)) == 0):
self.__m_attrs = (1)
if (self._m_previous is None):
self._set_attr(1, True)
self._set_attr(3, True)
else:
j = self._m_previous.end_char + 1
while j < self.begin_char:
ch = self.kit.sofa.text[j]
if (Utils.isWhitespace((ch))):
self._set_attr(1, True)
if ((ord(ch)) == 0xD or (ord(ch)) == 0xA or ch == '\f'):
self._set_attr(3, True)
j += 1
if (self._m_next is None):
self._set_attr(2, True)
self._set_attr(4, True)
else:
j = self.end_char + 1
while j < self._m_next.begin_char:
ch = self.kit.sofa.text[j]
if (Utils.isWhitespace(ch)):
self._set_attr(2, True)
if ((ord(ch)) == 0xD or (ord(ch)) == 0xA or ch == '\f'):
self._set_attr(4, True)
j += 1
return (((((self.__m_attrs) >> i)) & 1)) != 0
def _set_attr(self, i : int, val : bool) -> None:
if (val):
self.__m_attrs |= ((1 << i))
else:
self.__m_attrs &= (~ ((1 << i)))
@property
def is_whitespace_before(self) -> bool:
""" Наличие пробельных символов перед """
return self.__get_attr(1)
@is_whitespace_before.setter
def is_whitespace_before(self, value) -> bool:
self._set_attr(1, value)
return value
@property
def is_whitespace_after(self) -> bool:
""" Наличие пробельных символов после """
return self.__get_attr(2)
@is_whitespace_after.setter
def is_whitespace_after(self, value) -> bool:
self._set_attr(2, value)
return value
@property
def is_newline_before(self) -> bool:
""" Элемент начинается с новой строки.
Для 1-го элемента всегда true. """
return self.__get_attr(3)
@is_newline_before.setter
def is_newline_before(self, value) -> bool:
self._set_attr(3, value)
return value
@property
def is_newline_after(self) -> bool:
""" Элемент заканчивает строку.
Для последнего элемента всегда true. """
return self.__get_attr(4)
@is_newline_after.setter
def is_newline_after(self, value) -> bool:
self._set_attr(4, value)
return value
@property
def inner_bool(self) -> bool:
# Это используется внутренним образом
return self.__get_attr(5)
@inner_bool.setter
def inner_bool(self, value) -> bool:
self._set_attr(5, value)
return value
@property
def not_noun_phrase(self) -> bool:
# Это используется внутренним образом
# (признак того, что здесь не начинается именная группа, чтобы повторно не пытаться выделять)
return self.__get_attr(6)
@not_noun_phrase.setter
def not_noun_phrase(self, value) -> bool:
self._set_attr(6, value)
return value
@property
def whitespaces_before_count(self) -> int:
""" Количество пробелов перед, переход на новую строку = 10, табуляция = 5 """
if (self.previous is None):
return 100
if ((self.previous.end_char + 1) == self.begin_char):
return 0
return self.__calc_whitespaces(self.previous.end_char + 1, self.begin_char - 1)
@property
def newlines_before_count(self) -> int:
""" Количество переходов на новую строку перед """
ch0 = chr(0)
res = 0
txt = self.kit.sofa.text
for p in range(self.begin_char - 1, -1, -1):
ch = txt[p]
if ((ord(ch)) == 0xA):
res += 1
elif ((ord(ch)) == 0xD and (ord(ch0)) != 0xA):
res += 1
elif (ch == '\f'):
res += 10
elif (not Utils.isWhitespace(ch)):
break
ch0 = ch
return res
@property
def newlines_after_count(self) -> int:
""" Количество переходов на новую строку перед """
ch0 = chr(0)
res = 0
txt = self.kit.sofa.text
p = self.end_char + 1
while p < len(txt):
ch = txt[p]
if ((ord(ch)) == 0xD):
res += 1
elif ((ord(ch)) == 0xA and (ord(ch0)) != 0xD):
res += 1
elif (ch == '\f'):
res += 10
elif (not Utils.isWhitespace(ch)):
break
ch0 = ch
p += 1
return res
@property
def whitespaces_after_count(self) -> int:
""" Количество пробелов перед, переход на новую строку = 10, табуляция = 5 """
if (self.next0_ is None):
return 100
if ((self.end_char + 1) == self.next0_.begin_char):
return 0
return self.__calc_whitespaces(self.end_char + 1, self.next0_.begin_char - 1)
def __calc_whitespaces(self, p0 : int, p1 : int) -> int:
if ((p0 < 0) or p0 > p1 or p1 >= len(self.kit.sofa.text)):
return -1
res = 0
i = p0
while i <= p1:
ch = self.kit.get_text_character(i)
if (ch == '\r' or ch == '\n'):
res += 10
ch1 = self.kit.get_text_character(i + 1)
if (ch != ch1 and ((ch1 == '\r' or ch1 == '\n'))):
i += 1
elif (ch == '\t'):
res += 5
elif (ch == '\u0007'):
res += 100
elif (ch == '\f'):
res += 100
else:
res += 1
i += 1
return res
@property
def is_hiphen(self) -> bool:
""" Это символ переноса """
ch = self.kit.sofa.text[self.begin_char]
return LanguageHelper.is_hiphen(ch)
@property
def is_table_control_char(self) -> bool:
""" Это спец-символы для табличных элементов (7h, 1Eh, 1Fh) """
ch = self.kit.sofa.text[self.begin_char]
return (ord(ch)) == 7 or (ord(ch)) == 0x1F or (ord(ch)) == 0x1E
@property
def is_and(self) -> bool:
""" Это соединительный союз И (на всех языках) """
return False
@property
def is_or(self) -> bool:
""" Это соединительный союз ИЛИ (на всех языках) """
return False
@property
def is_comma(self) -> bool:
""" Это запятая """
return self.is_char(',')
@property
def is_comma_and(self) -> bool:
""" Это запятая или союз И """
return self.is_comma or self.is_and
def is_char(self, ch : 'char') -> bool:
""" Токен состоит из конкретного символа
Args:
ch('char'): проверяемый символ
"""
if (self.begin_char != self.end_char):
return False
return self.kit.sofa.text[self.begin_char] == ch
def is_char_of(self, chars_ : str) -> bool:
""" Токен состоит из одного символа, который есть в указанной строке
Args:
chars_(str): строка возможных символов
"""
if (self.begin_char != self.end_char):
return False
return chars_.find(self.kit.sofa.text[self.begin_char]) >= 0
def is_value(self, term : str, termua : str=None) -> bool:
""" Проверка конкретного значения слова
Args:
term(str): слово (проверяется значение TextToken.Term)
termua(str): слово для проверки на украинском языке
Returns:
bool: да-нет
"""
return False
@property
def is_letters(self) -> bool:
""" Признак того, что это буквенный текстовой токен (TextToken) """
return False
def get_referent(self) -> 'Referent':
""" Получить ссылку на сущность (не null только для ReferentToken)
"""
return None
def get_referents(self) -> typing.List['Referent']:
""" Получить список ссылок на все сущности, скрывающиеся под элементом.
Дело в том, что одни сущности могут накрывать другие (например, адрес накроет город).
"""
return None
def get_normal_case_text(self, mc : 'MorphClass'=None, num : 'MorphNumber'=MorphNumber.UNDEFINED, gender : 'MorphGender'=MorphGender.UNDEFINED, keep_chars : bool=False) -> str:
""" Получить связанный с токеном текст в именительном падеже
Args:
mc(MorphClass): желательная часть речи
num(MorphNumber): желательное число
gender(MorphGender): желательный пол
keep_chars(bool): сохранять регистр символов (по умолчанию, всё в верхний)
Returns:
str: строка текста
"""
return str(self)
def get_source_text(self) -> str:
""" Получить фрагмент исходного текста, связанный с токеном
Returns:
str: фрагмент исходного текста
"""
len0_ = (self.end_char + 1) - self.begin_char
if ((len0_ < 1) or (self.begin_char < 0)):
return None
if ((self.begin_char + len0_) > len(self.kit.sofa.text)):
return None
return self.kit.sofa.text[self.begin_char:self.begin_char+len0_]
def get_morph_class_in_dictionary(self) -> 'MorphClass':
""" Проверка, что слово есть в словаре соответствующего языка
Returns:
MorphClass: части речи, если не из словаря, то IsUndefined
"""
return self.morph.class0_
def _serialize(self, stream : Stream) -> None:
from pullenti.ner.core.internal.SerializerHelper import SerializerHelper
SerializerHelper.serialize_int(stream, self.begin_char)
SerializerHelper.serialize_int(stream, self.end_char)
SerializerHelper.serialize_int(stream, self.__m_attrs)
SerializerHelper.serialize_int(stream, self.chars.value)
if (self.__m_morph is None):
self.__m_morph = MorphCollection()
self.__m_morph._serialize(stream)
def _deserialize(self, stream : Stream, kit_ : 'AnalysisKit', vers : int) -> None:
from pullenti.ner.core.internal.SerializerHelper import SerializerHelper
self.kit = kit_
self.__m_begin_char = SerializerHelper.deserialize_int(stream)
self.__m_end_char = SerializerHelper.deserialize_int(stream)
self.__m_attrs = (SerializerHelper.deserialize_int(stream))
self.chars = CharsInfo._new2561(SerializerHelper.deserialize_int(stream))
self.__m_morph = MorphCollection()
self.__m_morph._deserialize(stream) | [
"alex@alexkuk.ru"
] | alex@alexkuk.ru |
2b0a3e54bd7df8f06edc4175709dcf19073b79f7 | 02945da6cb88389315e6237e7a0715b99a88fc1f | /example/shop/modifiers/__init__.py | 2069050ccce50d08b5d9241b829f26ffdbc5af81 | [
"BSD-3-Clause"
] | permissive | dinoperovic/django-salesman | eab524cebe0fecdf9003c23c9135e3b15a0c434c | 8105e84c0ccca00f483122b91a74175131808399 | refs/heads/master | 2023-08-31T23:47:22.743673 | 2023-08-17T14:21:11 | 2023-08-17T14:21:11 | 237,980,844 | 334 | 44 | BSD-3-Clause | 2023-05-05T14:20:32 | 2020-02-03T14:11:35 | Python | UTF-8 | Python | false | false | 60 | py | from .basic import * # noqa
from .complex import * # noqa
| [
"dino.perovic@gmail.com"
] | dino.perovic@gmail.com |
20a132c6febfa2ed47929dcbd79b1f4d5ab9b0ff | f89313b12b1c260d7275da58198efd08cf3ff45a | /baal/structures/constituency_tree.py | 3a325c71a8cae05c5c1b3824feac7f1a9b1e3f1a | [] | no_license | braingineer/baal | 2f528c5bddfb4962cfefeb4a37b68cba910741ce | 8a7504fb4bbd14832143dd1b013ac9d9e166bd67 | refs/heads/master | 2020-12-18T23:30:03.526423 | 2016-12-05T13:53:07 | 2016-12-05T13:53:07 | 59,702,468 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,695 | py | from baal.utils.general import cprint, bcolors, cformat, nonstr_join
from baal.utils import config
from copy import copy, deepcopy
import re, logging
try:
from nltk.tree import Tree as TKTree
except ImportError:
print("Warning: You don't have NLTK. One method won't work in ConstituencyTree")
logger = logging.getLogger("treedebugging")
import baal
#baal.utils.loggers.turn_on("trees", "debug")
class ConstituencyTree(object):
def __init__(self, symbol, children=None, parent="", semantic=None):
self.symbol = symbol
self.children = children or []
self.parent = parent
self.head = ""
self.spine_index = -1
self.complement = False
self.adjunct = False
self.lexical = False
self.depth = 0
self.direction = ""
self.semantictag = semantic
self.substitutions = []
self.adjuncts = []
self.saved_repr = None
@classmethod
def make(cls, bracketed_string=None, tree=None, correct_root=False):
"""
Instantiation check order: bracketed_string, lexical_item, root.
so leave bracketed_string and lexical_item empty if root.
lexical_item will only work if you have a lexicon
and even then, it expects the lexicon to yield bracketed strings
"""
if not tree and not bracketed_string:
raise TypeError("tree.instantiate takes either an existing tree"
+ " or a bracketed string structure to convert")
if bracketed_string:
new_tree, addressbook = from_string(bracketed_string)
elif tree:
#I don't think I'll ever use this, but just in case
new_tree, addressbook = copy(tree)
else:
raise TypeError("I don't know how you got here, but this is wrong")
if correct_root:
if len(new_tree.symbol) == 0:
assert len(new_tree.children) == 1
new_tree, addressbook = new_tree.children[0].clone()
#new_tree.correct_projections()
new_tree, addressbook = new_tree.clone()
return new_tree, addressbook
def correct_projections(self):
while (len(self.children) == 1 and not self.children[0].lexical and
self.children[0].symbol == self.symbol):
# if things suck and keep sucking.
self.children = self.children[0].children
for child in self.children:
child.correct_projections()
def to_nltk_tree(self):
def f(x):
if len(x.children) == 1 and x.children[0] is None:
return "SUB({})".format(x.symbol)
return [child.symbol if child.lexical else child.to_nltk_tree()
for child in x.children if child is not None]
return TKTree(self.symbol, f(self))
#if self.lexical:
# return TKTree(self.symbol, [self.])
def clone(self, address=None, addressbook=None,prefab_children=None,
child_offset=0):
""" Copy the tree and make the addressbook along the way """
#print("Cloning myself: {}".format(self))
address = address or [0]
addressbook = addressbook or {}
if prefab_children is not None:
new_children = prefab_children
else:
new_children = []
for c_i, child in enumerate(self.children):
new_child, addressbook = child.clone(address+[c_i+child_offset], addressbook)
new_children.append(new_child)
new_tree = ConstituencyTree(self.symbol, children=new_children, parent=self.parent)
new_tree.head = self.head
new_tree.spine_index = self.spine_index
new_tree.complement = self.complement
new_tree.adjunct = self.adjunct
new_tree.lexical = self.lexical
new_tree.direction = self.direction
addressbook[tuple(address)] = new_tree
return new_tree, addressbook
def insert_into(self, op_tree, op_addr, recur_addr=[0], addrbook={}):
"""
Input: an insertion tree:
the root symbol matches a frontier interior symbol
(an ancestor of the further left/right lexical)
"""
# /// Check for base case
if len(op_addr) == 0:
if op_tree.direction == "left":
# adding to the left side of the node
other_children = []
for c_i, child in enumerate(op_tree.children):
new_child, addrbook = child.clone(recur_addr+[c_i], addrbook)
other_children.append(new_child)
new_child.parent = self.symbol
new_tree, addrbook = self.clone(recur_addr, addrbook, child_offset=len(op_tree.children))
new_tree.children = other_children + new_tree.children
if new_tree.spine_index >= 0:
new_tree.spine_index += len(op_tree.children)
else:
# Adding to the right side of the node
new_tree, addrbook = self.clone(recur_addr, addrbook)
other_children = []
b_i = len(self.children)
for c_i, child in enumerate(op_tree.children):
new_child, addrbook = child.clone(recur_addr+[b_i+c_i], addrbook)
new_child.parent = new_tree.symbol
other_children.append(new_child)
new_tree.children += other_children
return new_tree, addrbook
# /// Recursive case. Clone children, recurse on operation child
next_addr, new_op_addr = op_addr[0], op_addr[1:]
new_children = []
for c_i, child in enumerate(self.children):
if c_i == next_addr:
new_child, addrbook = child.insert_into(op_tree, new_op_addr, recur_addr+[c_i], addrbook)
new_child.parent = self.symbol
new_children.append(new_child)
else:
new_child, addrbook = child.clone(recur_addr+[c_i], addrbook)
new_children.append(new_child)
# /// Return ourself cloned
return self.clone(recur_addr, addrbook, new_children)
def substitute_into(self, op_tree, op_addr, recur_addr=[0], addrbook={}):
"""
Input: an substitution tree:
the root symbol matches a frontier symbol
(a frontier symbol just beyond left/right lexical)
"""
# /// Check for base case
if len(op_addr) == 0:
new_tree, addrbook = op_tree.clone(recur_addr, addrbook)
#new_tree.complement = False
#new_tree.is_argument = True
return new_tree, addrbook
# /// Recursive case. Clone children, recurse on operation child
next_addr, op_addr = op_addr[0], op_addr[1:]
new_children = []
for c_i, child in enumerate(self.children):
if c_i == next_addr:
new_child, addrbook = child.substitute_into(op_tree, op_addr,
recur_addr+[c_i], addrbook)
new_child.parent = self.symbol
new_children.append(new_child)
else:
new_child, addrbook = child.clone(recur_addr+[c_i], addrbook)
new_children.append(new_child)
# /// Return ourself cloned
return self.clone(recur_addr, addrbook, new_children)
def excise_substitutions(self):
# print "inside excise substitutions"
new_subtrees = []
for subst in self.substitutions:
# print "excsising %s" % subst
new_subst = subst
new_subtrees.append(new_subst)
ind = self.children.index(subst)
self.children[ind] = ConstituencyTree(subst.symbol)
self.children[ind].complement = True
self.children[ind].parent = self.symbol
self.substitutions = []
return new_subtrees
def excise_adjuncts(self):
new_subtrees = []
for adj_tree in self.adjuncts:
ind = self.children.index(adj_tree)
self.children[ind] = None
adj_wrapper = ConstituencyTree(symbol=self.symbol, children=[adj_tree])
adj_wrapper.adjunct = True
adj_wrapper.direction = "left" if ind < self.spine_index else "right"
new_subtrees.append(adj_wrapper)
self.adjuncts = []
return new_subtrees
def __getitem__(self, k):
if k>=len(self.children):
return None
return self.children[k]
def __str__(self):
if config.ChartSettings().verbose:
return self.verbose_string()
else:
return "(%s" % self.symbol + ("-> %s" % self.children if len(self.children) > 0 else ")")
return "From: %s; I am %s, with %s children: %s" % (self.parent, self.symbol, len(self.children), "\n".join([str(child) for child in self.children]))
def verbose_string(self,depth=1, verboseness=float('inf'), spacing=1):
# return self._simple_str()
p = lambda x: bcolors.OKBLUE+x+bcolors.ENDC
b = lambda x: bcolors.BOLD+x+bcolors.ENDC
g = lambda x: bcolors.OKGREEN+x+bcolors.ENDC
r = lambda x: bcolors.FAIL+x+bcolors.ENDC
if self.complement:
this_symbol = "%s(%s)" % (p("Subst"), g(self.symbol))
elif self.adjunct:
direction = "[->]" if self.direction == "right" else "[<-]"
this_symbol = "%s(%s)" % (p(direction+"Ins"), g(self.symbol))
elif self.lexical:
this_symbol = "%s(%s)" % (p("lex"), g(self.symbol))
else:
this_symbol = self.symbol
d = depth
s = lambda x: ("{:%d}" % (x*d)).format("")
if verboseness > 0:
if len(self.children) > 0:
tree_print = "\n"+s(spacing)+b("%s{"%d)
tree_print += "{:-<1}->\n".format(this_symbol)
tree_print += s(spacing) + r("[")
tree_print += " "+("\n "+s(spacing)).join([x.verbose_string(depth+1, verboseness-1) for x in self.children])
tree_print += r("]") + "\n"
tree_print += s(spacing)+b("}")
return tree_print
else:
return this_symbol
else:
this_symbol = self.symbol if self.lexical else ""
mysep,childsep, spacers = " ", "; ", " "
if len(self.children) > 0:
tree_print = this_symbol
tree_print += ("|").join([x.verbose_string(depth+1, verboseness-1) for x in self.children])
return tree_print
else:
return this_symbol
def __repr__(self):
if self.lexical:
return "lex/" + self.symbol
if self.adjunct:
sym = "<" if self.direction=="left" else ">"
sym = "ins/"+sym+"/"+self.symbol
elif self.complement:
sym = "sub/"+self.symbol
elif self.parent is None:
sym = "root/"+self.symbol
else:
sym = self.symbol
this_str = "(%s" % sym
for child in self.children:
if child is None:
continue
this_str += " %s" % repr(child)
this_str+=")"
self.saved_repr = this_str
return this_str
def save_str(self):
if self.lexical:
return self.symbol.lower()
if self.adjunct:
sym = (self.symbol, "*") if self.direction=="left" else ("*", self.symbol)
sym = "{}{}".format(*sym)
else:
sym = self.symbol
this_str = "(%s" % sym
for child in self.children:
if child is None:
continue
this_str += " %s" % child.save_str()
this_str+=")"
return this_str
def lexical_string(self):
if self.lexical:
return self.symbol.lower()
else:
return " ".join([child.lexical_string() for child in self.children])
def from_string(in_str):
"""
modeled from NLTK's version in their class.
Assuming () as open and close patterns
e.g. (S (NP (NNP John)) (VP (V runs)))
TODO: we want parent to be insertion, foot to be foot. fix.
"""
tree_starting = lambda x: x[0] == "("
tree_ending = lambda x: x[0] == ")"
token_re = re.compile("\(\s*([^\s\(\)]+)?|\)|([^\s\(\)]+)")
stack = [(None, [])]
for match in token_re.finditer(in_str):
token, semantic = match.group(), ""
#if "-" in token and token[0] != '-' and in_str[match.start()-1] != " ":
# token, semantic = token.split("-")[0], token.split("-")[1]
# Case: tree/subtree starting. prepare structure
if tree_starting(token):
stack.append((token[1:].lstrip(), [], semantic))
# Case: tree/subtree is ending. make sure it's buttoned up
elif tree_ending(token):
label, children, semantic = stack.pop()
stack[-1][1].append(ConstituencyTree(symbol=label,
children=children,
parent=stack[-1][0],
semantic=semantic))
# Case: leaf node.
else:
if len(token) > 0:
stack[-1][1].append(token)
try:
assert len(stack) == 1
assert len(stack[0][1]) == 1
assert stack[0][0] is None
except AssertionError as e:
print(stack)
print(in_str)
raise AssertionError
resulting_tree = stack[0][1][0]
if isinstance(resulting_tree, list):
resulting_tree = resulting_tree[0]
assert isinstance(resulting_tree, ConstituencyTree)
return clean_tree(resulting_tree, [0], {tuple([0]):resulting_tree})
def clean_tree(root_tree, address, addressbook):
"""
Clean the tree. This is called by from_string
From_String doesn't annotate the tree, it just makes makes the structure
this is to annotate with the relevant information
"""
logger = logging.getLogger('trees')
logger.debug(root_tree)
tag_exceptions = set("-LRB- -RRB- -RSB- -RSB- -LCB- -RCB-".split(" "))
if root_tree.symbol == "" and len(root_tree.children)==1:
root_tree.symbol = "ROOT"
if "*" in root_tree.symbol:
root_tree.adjunct = True
root_tree.direction = "right" if root_tree.symbol[0] == "*" else "left"
root_tree.symbol = root_tree.symbol.replace("*", "")
if "-" in root_tree.symbol and root_tree.symbol not in tag_exceptions:
root_tree.symbol = root_tree.symbol.split("-")[0]
if "=" in root_tree.symbol:
root_tree.symbol = root_tree.symbol.split("=")[0]
if "|" in root_tree.symbol:
root_tree.symbol = root_tree.symbol.split("|")[0]
if root_tree.symbol == "":
print(root_tree, addressbook)
return None, addressbook
logger.debug('starting child iter for %s' % root_tree.symbol)
bad_offset = 0
marked_bad = []
#### NOTES ON BAD OFFSET
# to fix when bad children happen (aka, -None-)
# I want to remove their tree. but this is tricky because its coupled
# with the spine_index
# so, i keep track of the bad offset, set the spine index to the correct val using it
# and then at the end, I update the list to be correct
for c_i, child in enumerate(root_tree.children):
next_address = address+[c_i - bad_offset]
if isinstance(child, ConstituencyTree):
logger.debug("child is a constituency tree")
logger.debug(child)
if '-NONE-' in child.symbol:
marked_bad.append(c_i)
bad_offset += 1
continue
if len(child.children) > 0:
### SPECIAL CASE:
### basically, this can take out the None chidlren that happen in WSJ
if ( len(child.children) == 1 and
isinstance(child.children[0], ConstituencyTree) and
"-NONE-" in child.children[0].symbol):
marked_bad.append(c_i)
bad_offset += 1
continue
# Interior node
logger.debug('diving into child')
logger.debug('specifically: %s' % child)
child, addressbook = clean_tree(child, next_address, addressbook)
if child is None:
marked_bad.append(c_i)
bad_offset += 1
continue
root_tree.children[c_i] = child
if child.head is not None:
root_tree.head = child.head
root_tree.spine_index = c_i - bad_offset
#else:
# raise AlgorithmicException, "Only making initial trees here"
else:
# Substitution point
logger.debug("child was a complement")
child.complement = True
else:
if "*" in child:
marked_bad.append(c_i)
bad_offset += 1
continue
# Found the head
child = child.lower()
child = ConstituencyTree(symbol=child, parent=root_tree.symbol)
child.lexical = True
root_tree.children[c_i] = child
head = child
head.lexical = True
root_tree.head = head.symbol
root_tree.spine_index = c_i - bad_offset
try:
addressbook[tuple(next_address)] = child
except TypeError as e:
import pdb
pdb.set_trace()
child.parent = root_tree.symbol
root_tree.children = [child for c_i, child in enumerate(root_tree.children) if c_i not in marked_bad]
if len(root_tree.children) == 0:
return None, addressbook
if all([child.complement for child in root_tree.children]):
logger.warning("Condition: this subtree has no lexical items. "+
"This condition should be indicating co-heads. FIX")
root_tree.head = None
# Debugging stuff
try:
logger.debug(root_tree.head)
logger.debug(root_tree.head is None)
logger.debug(type(root_tree.head))
logger.debug(root_tree.symbol)
logger.debug(addressbook)
assert root_tree.head is None or len(root_tree.head) > 0, type(root_tree)
except AttributeError as e:
logger.debug(root_tree)
raise e
return root_tree, addressbook
| [
"brian.c.mcmahan@gmail.com"
] | brian.c.mcmahan@gmail.com |
af9477decdec1d2bfb03a8a5787df8343ad1b196 | b8e9dd6fd8f8b691cba5a3af2388467bcf6c90bb | /samples/openapi3/client/petstore/python-experimental/petstore_api/model/integer_min15.py | e6dcb3af72d7dc36592959b07c480cccf1a37f3f | [
"Apache-2.0"
] | permissive | FallenRiteMonk/openapi-generator | f8b98940219eecf14dc76dced4b0fbd394522aa3 | b6576d11733ecad6fa4a0a616e1a06d502a771b7 | refs/heads/master | 2023-03-16T05:23:36.501909 | 2022-09-02T01:46:56 | 2022-09-02T01:46:56 | 164,609,299 | 0 | 0 | Apache-2.0 | 2019-01-08T09:08:56 | 2019-01-08T09:08:56 | null | UTF-8 | Python | false | false | 864 | py | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from petstore_api import schemas # noqa: F401
class IntegerMin15(
schemas.Int64Schema
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
class MetaOapg:
inclusive_minimum = 15
| [
"noreply@github.com"
] | FallenRiteMonk.noreply@github.com |
f7a92fcc0db6370db2bf509626dc91e8f3bf07f4 | de8336cbcaa51a5156346a0f4513adf2ebc29fd3 | /api/admin.py | d8a324370e1d999628566cabd4dbbb8476718b36 | [] | no_license | kranthi0987/djangosample | 8219b09a0d16591f274864b1fdc04ce46a31ce8a | bbae4ab38279d275353a2deb40ab9964fc6f7216 | refs/heads/master | 2020-08-23T16:23:37.896331 | 2019-10-27T15:23:40 | 2019-10-27T15:23:40 | 216,661,294 | 0 | 1 | null | 2020-07-21T05:33:21 | 2019-10-21T20:39:34 | Python | UTF-8 | Python | false | false | 159 | py | from django.contrib import admin
from .models import Songs, DummyData
# Register your models here.
admin.site.register(Songs)
admin.site.register(DummyData)
| [
"kranthi0987@gmail.com"
] | kranthi0987@gmail.com |
2c1ff6bd6165f7ddf080a6f5e104a08aecb7e88e | a4b9550d36b82b0ad5d24db8c75ab0d49a8a0904 | /Electrum/asgi.py | 8e4fdffa274eed668b4c2051a8150d7f27767792 | [] | no_license | jithinvijayan007/Electrum-Assignment- | e84323a82b152fc051a3e981e793c83e9cb174bf | eb82195ebcb48a19b97738c77a30fc3307ca0514 | refs/heads/master | 2023-01-11T07:44:47.029271 | 2020-11-18T13:24:44 | 2020-11-18T13:24:44 | 313,941,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
ASGI config for Electrum project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Electrum.settings')
application = get_asgi_application()
| [
"jithinvijayan007@gmail.com"
] | jithinvijayan007@gmail.com |
63b3332617f7b78be4ecf567876be04b1a66db94 | b018b734af4170d34d28c474f68777597dba29ec | /venv/bin/pyrsa-decrypt | 6297436bdc29e64724b2cb09382dc7feaf97b4c6 | [] | no_license | abdulkhan94/BigDataTechnology | ae0b7f8c03831f07b791bc5898c2bb18a4c3fec5 | 7be6d3a13e8fd42d9592d7287d694d507f9070b5 | refs/heads/master | 2023-02-13T04:07:49.070798 | 2021-01-11T01:34:51 | 2021-01-11T01:34:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | #!/Users/abdullahkhan/PycharmProjects/CloudKhan/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import decrypt
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(decrypt())
| [
"abdullahn@gmail.com"
] | abdullahn@gmail.com | |
cfe2831a6522b71a9b9499c6a825bf3ae2606a14 | 7102ec163136e65c4da47658e669fde07521aaf1 | /app/setup.py | 2ac412c5412da5c4f619cae9a324bfdeedb2fd39 | [] | no_license | ghuntley/nomad-with-nix | 170d5ab571d9ae92fc1c420cd29ec94042e1c243 | bc5ff4afea2ed89074479c6ed2c39db1c577e062 | refs/heads/master | 2023-02-10T07:21:57.801694 | 2021-01-07T20:06:58 | 2021-01-07T20:07:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | from setuptools import setup, find_packages
setup(
name="app",
py_modules=[
"main",
],
install_requires=[
"asyncpg",
"databases",
"fastapi",
"psycopg2",
"uvicorn",
],
entry_points={
"console_scripts": ["uvicorn=uvicorn:main"],
},
)
| [
"asko.soukka@iki.fi"
] | asko.soukka@iki.fi |
981a793fd86c41e5b30e11570db8718e3d216f27 | dd6ed4e1fa17ff9dd59116632964e2fad438bc83 | /eventframe/nodes/participant_list/forms.py | 7c00adf40e454b4c701b0bc744c66c54638c7838 | [] | no_license | elvisds/eventframe | 8ba3b6911ffad1d80b3c56eecf36d40c7ca3d1cc | 5a65c3671d1dea3967efdea4bf163f11bde39879 | refs/heads/master | 2021-01-15T18:04:59.943728 | 2013-08-15T18:11:39 | 2013-08-15T18:11:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 900 | py | # -*- coding: utf-8 -*-
import flask.ext.wtf as wtf
from eventframe.forms import DictField
from eventframe.nodes.content import ContentForm
__all__ = ['ParticipantListForm']
class ParticipantListForm(ContentForm):
source = wtf.SelectField(u"Data Source", choices=[
('', ''), ('doattend', 'DoAttend')],
description=u"Source from which the participant list will be retrieved.")
sourceid = wtf.TextField(u"Event id",
description=u"Id of this event at the selected data source.")
api_key = wtf.TextField(u"API Key",
description=u"API key to retrieve data from the selected data source.")
participant_template = wtf.TextField("Participant template",
validators=[wtf.Required()], default='participant.html',
description=u"Template with which a participant’s directory entry will be rendered.")
properties = DictField(u"Properties")
| [
"kiran@hasgeek.com"
] | kiran@hasgeek.com |
e527dc3bc132a68f454bd616f68298778961bf12 | 84b05857cbe74d190bdbee18d442d0c720b1b84d | /Coderbyte_algorithms/Hard/BracketCombinations/test_BracketCombinations.py | bea5f4e5ce46337ed54f65b42d3923344d653408 | [] | no_license | JakubKazimierski/PythonPortfolio | 1c8c7e7b0f1358fc42a2295b807d0afafd8e88a3 | 3aa62ad36c3b06b2a3b05f1f8e2a9e21d68b371f | refs/heads/master | 2023-06-01T01:16:22.897097 | 2023-05-15T01:05:22 | 2023-05-15T01:05:22 | 311,473,524 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | '''
Unittests for BracketCombinations.py
January 2021 Jakub Kazimierski
'''
import unittest
import BracketCombinations
class test_BracketCombinations(unittest.TestCase):
'''
Class with unittests for BracketCombinations.py
'''
# region Unittests
def test_ExpectedOutput(self):
'''
Checks if returned output is as expected.
'''
output = BracketCombinations.BracketCombinations(3)
self.assertEqual(output, 5)
# endregion
if __name__ == "__main__":
'''
Main method for test cases.
'''
unittest.main() | [
"j.m.kazimierski@gmail.com"
] | j.m.kazimierski@gmail.com |
9f7f06c7fa42160e59e486c69ff7a5a5312517c4 | f35018f5d22eaafb31412430b49799dd11f15519 | /chapter01/scopes_example.py | 506dfb91634c847bf6c8d335ea95eacc0edee047 | [] | no_license | bambit21/packtpub---learning-python | 7bbaa6a510cdf6c01c0fe300aada53f7e2b1522c | 53cc65ed5b55c2bbd54388de631809016cfcd43a | refs/heads/master | 2020-05-29T15:37:00.636527 | 2018-07-05T20:08:28 | 2018-07-05T20:08:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | #Local versus Global
def local():
m = 7
print(m)
m = 5
print(m)
local()
| [
"wielki.wilq@gmail.com"
] | wielki.wilq@gmail.com |
a610e3e7093a75d9b1c33e04c9d6bf1e7b38c66b | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnmanhood.py | 1926b16301f2acfd7ae219bd15376ab3abf80e7b | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 1,315 | py | ii = [('BentJDO2.py', 2), ('EmerRN.py', 1), ('CookGHP3.py', 3), ('KembFFF.py', 4), ('GodwWSL2.py', 5), ('FerrSDO3.py', 1), ('WilbRLW.py', 1), ('WilbRLW4.py', 2), ('CookGHP.py', 3), ('MartHSI2.py', 1), ('WilkJMC3.py', 2), ('WilbRLW5.py', 3), ('PettTHE.py', 1), ('PeckJNG.py', 1), ('BailJD2.py', 1), ('ChalTPW2.py', 2), ('ClarGE2.py', 4), ('CarlTFR.py', 7), ('LyttELD.py', 3), ('CoopJBT2.py', 2), ('TalfTAC.py', 2), ('RoscTTI3.py', 4), ('CookGHP2.py', 2), ('KiddJAE.py', 3), ('BailJD1.py', 2), ('CoolWHM.py', 2), ('CrokTPS.py', 2), ('ClarGE.py', 12), ('LandWPA.py', 1), ('GilmCRS.py', 4), ('DibdTRL2.py', 1), ('AinsWRR.py', 1), ('CrocDNL.py', 1), ('WadeJEB.py', 3), ('TalfTIT.py', 3), ('GodwWLN.py', 1), ('CoopJBT.py', 3), ('SoutRD2.py', 2), ('BachARE.py', 3), ('SoutRD.py', 2), ('MereHHB3.py', 1), ('HowiWRL2.py', 5), ('BailJD3.py', 1), ('WilkJMC.py', 2), ('MartHRW.py', 2), ('MackCNH.py', 1), ('DequTKM.py', 1), ('EdgeMHT.py', 1), ('BowrJMM.py', 1), ('LyttELD3.py', 1), ('HallFAC.py', 5), ('FerrSDO.py', 1), ('StorJCC.py', 3), ('BabbCRD.py', 1), ('MackCNH2.py', 1), ('BellCHM.py', 1), ('WilbRLW3.py', 2), ('RogeSIP.py', 2), ('DibdTRL.py', 10), ('NortSTC.py', 4), ('SadlMLP2.py', 1), ('BowrJMM3.py', 1), ('TaylIF.py', 2), ('DibdTBR.py', 2), ('ChalTPW.py', 3), ('KeigTSS.py', 2), ('BentJDO.py', 1), ('ClarGE4.py', 4)] | [
"varunwachaspati@gmail.com"
] | varunwachaspati@gmail.com |
134c09442a208868e04335c154fd1be6a4f089c0 | a41e7ac731210a0cb9d198029962a086dc6b4311 | /python/helpers/pycharm/lettuce_runner.py | 2f64afc956d9bdca0e3a13d8bf65680fceb4428b | [] | no_license | gencer/intellij-community | 19e7d2eafd9da1e3ad9bddd4253a0cc91a1271e9 | dc9043c92d20ef479ea8c0a9114479c2cfd1f95f | refs/heads/master | 2020-12-13T21:53:16.304289 | 2014-11-03T10:23:07 | 2014-11-03T10:23:07 | 15,880,732 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,779 | py | # coding=utf-8
"""
BDD lettuce framework runner
TODO: Support other params (like tags) as well.
Supports only 2 params now: folder to search "features" for or file and "-s scenario_index"
"""
import argparse
import os
import _bdd_utils
__author__ = 'Ilya.Kazakevich'
from lettuce.exceptions import ReasonToFail
import lettuce
from lettuce import core
class _LettuceRunner(_bdd_utils.BddRunner):
"""
Lettuce runner (BddRunner for lettuce)
"""
def __init__(self, base_dir, what_to_run, scenarios):
"""
:param scenarios scenario numbers to run
:type scenarios list
:param base_dir base directory to run tests in
:type base_dir: str
:param what_to_run folder or file to run
:type what_to_run str
"""
super(_LettuceRunner, self).__init__(base_dir)
self.__runner = lettuce.Runner(what_to_run, ",".join(scenarios))
def _get_features_to_run(self):
super(_LettuceRunner, self)._get_features_to_run()
features = []
if self.__runner.single_feature: # We need to run one and only one feature
features = [core.Feature.from_file(self.__runner.single_feature)]
else:
# Find all features in dir
for feature_file in self.__runner.loader.find_feature_files():
feature = core.Feature.from_file(feature_file)
assert isinstance(feature, core.Feature), feature
# TODO: cut out due to https://github.com/gabrielfalcao/lettuce/issues/451 Fix when this issue fixed
feature.scenarios = filter(lambda s: not s.outlines, feature.scenarios)
if feature.scenarios:
features.append(feature)
# Choose only selected scenarios
if self.__runner.scenarios:
for feature in features:
filtered_feature_scenarios = []
for index in [i - 1 for i in self.__runner.scenarios]: # decrease index by 1
if index < len(feature.scenarios):
filtered_feature_scenarios.append(feature.scenarios[index])
feature.scenarios = filtered_feature_scenarios
return features
def _run_tests(self):
super(_LettuceRunner, self)._run_tests()
self.__install_hooks()
self.__runner.run()
def __step(self, is_started, step):
"""
Reports step start / stop
:type step core.Step
:param step: step
"""
test_name = step.sentence
if is_started:
self._test_started(test_name, step.described_at)
elif step.passed:
self._test_passed(test_name)
elif step.failed:
reason = step.why
assert isinstance(reason, ReasonToFail), reason
self._test_failed(test_name, message=reason.exception, details=reason.traceback)
elif step.has_definition:
self._test_skipped(test_name, "In lettuce, we do know the reason", step.described_at)
else:
self._test_undefined(test_name, step.described_at)
def __install_hooks(self):
"""
Installs required hooks
"""
# Install hooks
lettuce.before.each_feature(
lambda f: self._feature_or_scenario(True, f.name, f.described_at))
lettuce.after.each_feature(
lambda f: self._feature_or_scenario(False, f.name, f.described_at))
lettuce.before.each_scenario(
lambda s: self.__scenario(True, s))
lettuce.after.each_scenario(
lambda s: self.__scenario(False, s))
lettuce.before.each_background(
lambda b, *args: self._background(True, b.feature.described_at))
lettuce.after.each_background(
lambda b, *args: self._background(False, b.feature.described_at))
lettuce.before.each_step(lambda s: self.__step(True, s))
lettuce.after.each_step(lambda s: self.__step(False, s))
def __scenario(self, is_started, scenario):
"""
Reports scenario launched
:type scenario core.Scenario
:param scenario: scenario
"""
if scenario.outlines:
scenario.steps = [] # Clear to prevent running. TODO: Fix when this issue fixed
scenario.background = None # TODO: undocumented
return
self._feature_or_scenario(is_started, scenario.name, scenario.described_at)
if __name__ == "__main__":
(base_dir, scenarios, what_to_run) = _bdd_utils.get_what_to_run_by_env(os.environ)
if len(what_to_run) > 1:
raise Exception("Lettuce can't run more than one file now")
_bdd_utils.fix_win_drive(what_to_run[0])
_LettuceRunner(base_dir, what_to_run[0], scenarios).run() | [
"Ilya.Kazakevich@jetbrains.com"
] | Ilya.Kazakevich@jetbrains.com |
3afd486fd50e5acb41db7c6f19412760f26eb79e | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/01_netCDF_extraction/merra902Combine/757-tideGauge.py | b5cc5d83438152989fa3ddf082f55d7b833dd4b9 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,376 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 17 11:28:00 2020
--------------------------------------------
Load predictors for each TG and combine them
--------------------------------------------
@author: Michael Tadesse
"""
import os
import pandas as pd
#define directories
# dir_name = 'F:\\01_erainterim\\01_eraint_predictors\\eraint_D3'
dir_in = "/lustre/fs0/home/mtadesse/merraLocalized"
dir_out = "/lustre/fs0/home/mtadesse/merraAllCombined"
def combine():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
x = 757
y = 758
for tg in range(x, y):
os.chdir(dir_in)
tg_name = tg_list_name[tg]
print(tg_name, '\n')
#looping through each TG folder
os.chdir(tg_name)
#check for empty folders
if len(os.listdir()) == 0:
continue
#defining the path for each predictor
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp.csv'),\
"wnd_u": os.path.join(where, 'wnd_u.csv'),\
'wnd_v' : os.path.join(where, 'wnd_v.csv')}
first = True
for pr in csv_path.keys():
print(tg_name, ' ', pr)
#read predictor
pred = pd.read_csv(csv_path[pr])
#remove unwanted columns
pred.drop(['Unnamed: 0'], axis = 1, inplace=True)
#sort based on date as merra files are scrambled
pred.sort_values(by = 'date', inplace=True)
#give predictor columns a name
pred_col = list(pred.columns)
for pp in range(len(pred_col)):
if pred_col[pp] == 'date':
continue
pred_col[pp] = pr + str(pred_col[pp])
pred.columns = pred_col
#merge all predictors
if first:
pred_combined = pred
first = False
else:
pred_combined = pd.merge(pred_combined, pred, on = 'date')
#saving pred_combined
os.chdir(dir_out)
tg_name = str(tg)+"_"+tg_name;
pred_combined.to_csv('.'.join([tg_name, 'csv']))
os.chdir(dir_in)
print('\n')
#run script
combine()
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.