hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aa76e89131f556e4b895bbde1b48b9a5fe172a26 | 1,278 | py | Python | codeforces/binarySearch二分搜索/1400/1284B上升组合.py | yofn/pyacm | e573f8fdeea77513711f00c42f128795cbba65a6 | [
"Apache-2.0"
] | null | null | null | codeforces/binarySearch二分搜索/1400/1284B上升组合.py | yofn/pyacm | e573f8fdeea77513711f00c42f128795cbba65a6 | [
"Apache-2.0"
] | null | null | null | codeforces/binarySearch二分搜索/1400/1284B上升组合.py | yofn/pyacm | e573f8fdeea77513711f00c42f128795cbba65a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#https://codeforces.com/problemset/problem/1284/B
#记录每个序列是否有上升,如果没有min.max是多少..
#n=1e5,还得避免O(N**2)..
#上升情况分解
#情况1: sx+sy中sx或sy本身是上升的
#情况2: sx,sy都不上升,判断四个极值的情况(存在几个上升)
#DP..增量计数; f(n+1)=f(n)+X; X=??
# 如果s本身上升,X=(2n+1)
# 如果s本身不升,拿s的min/max去一个数据结构去检查(min/max各一个?)..(低于线性..binary search??)
# ..
def bs(k,li):
l, r = 0, len(li)-1
if li[l] > k: return 0
if li[r] <= k: return len(li)
while True: #HOLD: li[l]<= k < li[r] VERY important!
if r-l<2: return r
m = l + ((r-l) >> 1) #safer; NOTE: () for right order
if li[m]>k:
r = m
else:
l = m
n = int(input())
sll = [list(map(int,input().split())) for _ in range(n)]
minl = []
maxl = []
for sl in sll:
nn = sl[0]
m = sl[1]
ascent = False
if nn==1:
minl.append(m)
maxl.append(m)
continue
for i in range(2,nn+1):
if sl[i] > m:
ascent = True
break
else:
m = sl[i]
if not ascent: # count non-ascenting
minl.append(min(sl[1:]))
maxl.append(max(sl[1:]))
maxs = sorted(maxl) #mins = sorted(minl)
cnt = sum([bs(m,maxs) for m in minl]) # m+maxs, counting non-ascending (m >= maxs)
print(n*n-cnt)
| 25.058824 | 84 | 0.517997 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 650 | 0.442779 |
aa7b7c8babb6becd9e4e564c3c915d139ea9f52e | 50 | py | Python | tests/regressiontests/auth_decorators/__init__.py | mdornseif/huDjango | ce6f256b2e3e9fec4af749aebcae81a973e8874b | [
"BSD-2-Clause"
] | null | null | null | tests/regressiontests/auth_decorators/__init__.py | mdornseif/huDjango | ce6f256b2e3e9fec4af749aebcae81a973e8874b | [
"BSD-2-Clause"
] | 1 | 2017-02-16T16:07:03.000Z | 2017-02-17T09:49:20.000Z | tests/regressiontests/auth_decorators/__init__.py | hudora/huDjango | c99ce38517d706973df8e97df48274ab9392f52e | [
"BSD-2-Clause"
] | null | null | null | """Test hudjango.auth.decorator functionality."""
| 25 | 49 | 0.76 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.98 |
aa7c2fe26948a573c0ecf561cfcced69727cd5a6 | 3,672 | py | Python | examples/ttt_wm_vs_human.py | pearlfranz20/AL_Core | 6592079330c7ec3ca264b86f8414970ddab06c0e | [
"MIT"
] | 10 | 2019-11-01T01:09:57.000Z | 2022-02-17T09:15:12.000Z | examples/ttt_wm_vs_human.py | pearlfranz20/AL_Core | 6592079330c7ec3ca264b86f8414970ddab06c0e | [
"MIT"
] | 40 | 2019-08-06T18:01:31.000Z | 2021-07-15T12:38:56.000Z | examples/ttt_wm_vs_human.py | pearlfranz20/AL_Core | 6592079330c7ec3ca264b86f8414970ddab06c0e | [
"MIT"
] | 6 | 2019-08-15T01:45:19.000Z | 2021-06-01T19:54:29.000Z | from apprentice.agents import SoarTechAgent
from apprentice.working_memory import ExpertaWorkingMemory
from apprentice.working_memory.representation import Sai
# from apprentice.learners.when_learners import q_learner
from ttt_simple import ttt_engine, ttt_oracle
def get_user_demo():
print()
print("Current Player: " + game.current_player)
print(game)
print("Don't know what to do.")
print("Please provide example of correct behavior.")
print()
while True:
try:
loc = input("Enter move as row and column integers "
"(e.g., 1,2):")
loc = loc.split(',')
row = int(loc[0])
col = int(loc[1])
player = game.current_player
break
except Exception:
print("error with input, try again.")
return Sai(None, "move", {"row": row, "col": col, "player": player})
if __name__ == "__main__":
# with experta knowledge engine
wm1 = ExpertaWorkingMemory(ke=ttt_engine())
a1 = SoarTechAgent(
# wm=wm1, when=q_learner.QLearner(func=q_learner.Cobweb, q_init=0.0)
feature_set=[], function_set=[],
wm=wm1,
epsilon=0.5,
# when=q_learner.QLearner(func=q_learner.LinearFunc, q_init=0.0),
negative_actions=True,
action_penalty=0.0
)
new_game = True
while new_game:
game = ttt_oracle()
winner = False
last_state = None
last_sai = None
user_demo = False
while not winner:
print()
print("Current Player: " + game.current_player)
print(game)
state = game.as_dict()
# pprint(state)
if game.current_player == "X":
if (last_state is not None and last_sai is not None and not
user_demo):
a1.train(last_state, state, last_sai, 0.0, "", [""])
elif (last_state is not None and last_sai is not None and
user_demo):
print('providing bonus reward for user demo!')
a1.train(last_state, state, last_sai, 1.0, "", [""])
last_state = state
sai = a1.request(state)
if not isinstance(sai, Sai):
sai = get_user_demo()
user_demo = True
else:
user_demo = False
last_sai = sai
getattr(game, sai.action)(**sai.input)
print("AI's move", sai)
else:
while True:
try:
loc = input("Enter move as row and column integers "
"(e.g., 1,2):")
loc = loc.split(',')
row = int(loc[0])
col = int(loc[1])
player = game.current_player
game.move(row, col, player)
break
except Exception:
print("error with input, try again.")
winner = game.check_winner()
if winner == "X":
a1.train(last_state, None, last_sai, 1.0, "", [""])
elif winner == "O":
a1.train(last_state, None, last_sai, -1.0, "", [""])
else:
a1.train(last_state, None, last_sai, 0, "", [""])
print("WINNER = ", winner)
print(game)
print()
new_game = True
# new = input("Play again? Press enter to continue or type 'no' to"
# " stop.")
# new_game = new == ""
| 31.930435 | 76 | 0.498094 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 751 | 0.204521 |
aa7c9e46ce390c33e4950020c5b8e38f18d9c7b1 | 2,905 | py | Python | pic_carver.py | volf52/black_hat_python | 063c241db473d3ef25782efc17f651c3aa66b4f8 | [
"MIT"
] | null | null | null | pic_carver.py | volf52/black_hat_python | 063c241db473d3ef25782efc17f651c3aa66b4f8 | [
"MIT"
] | null | null | null | pic_carver.py | volf52/black_hat_python | 063c241db473d3ef25782efc17f651c3aa66b4f8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
@author : 'Muhammad Arslan <rslnrkmt2552@gmail.com>'
"""
import re
import zlib
import cv2
from scapy.all import *
pics = "pictues"
faces_dir = "faces"
pcap_file = "bhp.pcap"
def get_http_headers(http_payload):
try:
headers_raw = http_payload[:http_payload.index("\r\n\r\n")+2]
headers = dict(re.findall(r"(?P<'name>.*?): (?P<value>.*?)\r\n", headers_raw))
except:
return None
def extract_images(headers, http_payload):
image = None
image_type = None
try:
if "image" in headers['Content-Type']:
image_type = headers['Content-Type'].split('/')[1]
image = http_payload[http_payload.index('\r\n\r\n') + 4:]
try:
if "Content-Encoding" in headers.keys():
if headers['Content-Encoding'] == 'gzip':
image = zlib.decompress(image, 16+zlib.MAX_WBITS)
elif headers['Content-Encoding'] == "deflate":
image = zlib.decompress(image)
except:
pass
except:
return None, None
return image, image_type
def face_detect(path, filename):
img = cv2.imread(path)
cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
rects = cascade.detectMultiScale(img, 1.3, 4, cv2.cv.CV_HAAR_SCALE_IMAGE, (20, 20))
if len(rects) == 0:
return False
rects[:, 2:] += rects[:, :2]
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), (127, 255, 0), 2)
cv2.imwrite("%s/$s-%s" % (faces_dir, pcap_file, filename), img)
return True
def http_assembler(pcap_file):
carved_images = 0
faces_detected = 0
a = rdpcap(pcap_file)
sessions = a.sessions()
for session in sessions:
http_payload = ""
for packet in sessions[session]:
try:
if packet[TCP].dport == 80 or packet[TCP].sport == 80:
http_payload += str(packet[TCP].payload)
except:
pass
headers = get_http_headers(http_payload)
if headers is None:
continue
image, image_type = extract_image(headers, http_payload)
if image is not None and image_type is not None:
file_name = "%s-pic_carver_%d.%s" % (pcap_file, carved_images, image_type)
with open("%s/%s" % (pics, file_name), "wb") as fd:
fd.write(image)
carved_images += 1
try:
result = face_detect("%s/%s" % (pics, file_name), file_name)
if result is True:
faces_detected += 1
except:
pass
return carved_images, faces_detected
carved_images, faces_detected = http_assembler(pcap_file)
print "Extracted: %d images" % carved_images
print "Detected: %d faces" % faces_detected
| 25.9375 | 87 | 0.571084 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 399 | 0.137349 |
aa7e5a273a38c1c336f9c6538edfafbe1859aa62 | 1,536 | py | Python | py/testdir_hosts/test_rf_311M_rows_hosts.py | vkuznet/h2o | e08f7014f228cbaecfb21f57379970e6a3ac0756 | [
"Apache-2.0"
] | null | null | null | py/testdir_hosts/test_rf_311M_rows_hosts.py | vkuznet/h2o | e08f7014f228cbaecfb21f57379970e6a3ac0756 | [
"Apache-2.0"
] | null | null | null | py/testdir_hosts/test_rf_311M_rows_hosts.py | vkuznet/h2o | e08f7014f228cbaecfb21f57379970e6a3ac0756 | [
"Apache-2.0"
] | null | null | null | import unittest, sys, time
sys.path.extend(['.','..','py'])
import h2o_cmd, h2o, h2o_hosts, h2o_browse as h2b, h2o_import as h2i
# Uses your username specific json: pytest_config-<username>.json
# copy pytest_config-simple.json and modify to your needs.
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_rf_311M_rows_hosts(self):
# since we'll be waiting, pop a browser
# h2b.browseTheCloud()
importFolderPath = 'standard'
csvFilename = 'new-poker-hand.full.311M.txt.gz'
csvPathname = importFolderPath + "/" + csvFilename
for trials in range(2):
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='local',
timeoutSecs=500)
print csvFilename, 'parse time:', parseResult['response']['time']
print "Parse result['destination_key']:", parseResult['destination_key']
inspect = h2o_cmd.runInspect(None,parseResult['destination_key'])
print "\n" + csvFilename
start = time.time()
RFview = h2o_cmd.runRF(parseResult=parseResult, trees=5, depth=5,
timeoutSecs=600, retryDelaySecs=10.0)
print "RF end on ", csvFilename, 'took', time.time() - start, 'seconds'
if __name__ == '__main__':
h2o.unit_main()
| 35.72093 | 108 | 0.647786 | 1,230 | 0.800781 | 0 | 0 | 150 | 0.097656 | 0 | 0 | 408 | 0.265625 |
aa7f45d29566c608c5cb4116ab2b18d2257d27b0 | 1,229 | py | Python | .ipynb_checkpoints/config-checkpoint.py | BillKiller/ECG_shandong | bb73f3c5eccfe68badf0e783ca1305783ceeb0fc | [
"Apache-2.0"
] | null | null | null | .ipynb_checkpoints/config-checkpoint.py | BillKiller/ECG_shandong | bb73f3c5eccfe68badf0e783ca1305783ceeb0fc | [
"Apache-2.0"
] | null | null | null | .ipynb_checkpoints/config-checkpoint.py | BillKiller/ECG_shandong | bb73f3c5eccfe68badf0e783ca1305783ceeb0fc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
@time: 2019/9/8 18:45
@ author: javis
'''
import os
class Config:
# for data_process.py
#root = r'D:\ECG'
root = r'data'
train_dir = os.path.join(root, 'ecg_data/')
# test_dir = os.path.join(root, 'ecg_data/testA')
# train_label = os.path.join(root, 'hf_round1_label.txt')
# test_label = os.path.join(root, 'hf_round1_subA.txt')
# arrythmia = os.path.join(root, 'hf_round1_arrythmia.txt')
train_data = os.path.join(root, 'ecg_data')
# for train
#训练的模型名称
model_name = 'resnet50'
#在第几个epoch进行到下一个state,调整lr
stage_epoch = [32, 64,128]
#训练时的batch大小
batch_size = 128
#label的类别数
num_classes = 18
#最大训练多少个epoch
max_epoch = 128
#目标的采样长度
target_point_num = 2048 * 5
#保存模型的文件夹
ckpt = 'ckpt/'
#保存提交文件的文件夹
sub_dir = 'submit'
#初始的学习率
lr = 1e-3
#保存模型当前epoch的权重
kfold = ""
current_w = 'current_w.pth'
#保存最佳的权重 你还愿意
best_w = 'best_w.pth'
# 学习率衰减 lr/=lr_decay
lr_decay = 10
#for test
temp_dir=os.path.join(root,'temp')
# SiT
patch_size = 8
dim = 256
mlp_dim = 512
dropout = 0.3
head_num = 8
depth = 12
heads = 8
config = Config()
| 19.822581 | 63 | 0.598861 | 1,308 | 0.92569 | 0 | 0 | 0 | 0 | 0 | 0 | 776 | 0.549186 |
aa7ffb92cb8baa6e29735ecc7cc7fe43b3457d06 | 974 | py | Python | integration-tests/steps/test_update_stack.py | rootifera/sceptre | 0a1aeb8305c38dbd5c6c6cc6ec0cae4d468abdcf | [
"Apache-2.0"
] | 2 | 2021-05-16T09:43:29.000Z | 2022-03-15T10:21:54.000Z | integration-tests/steps/test_update_stack.py | joseroubert08/sceptre | 652b844ce69aecfe0ed31d918a6b49fd954a23bd | [
"Apache-2.0"
] | null | null | null | integration-tests/steps/test_update_stack.py | joseroubert08/sceptre | 652b844ce69aecfe0ed31d918a6b49fd954a23bd | [
"Apache-2.0"
] | null | null | null | from behave import *
import subprocess
import os
import boto3
@when("the stack config is changed")
def step_impl(context):
# Get config file path
vpc_config_file = os.path.abspath(os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"config",
"test-env",
"a",
"vpc.yaml"
))
with open(vpc_config_file, "r+") as f:
config = f.read()
config = config.replace("vpc.py", "updated_vpc.py")
with open(vpc_config_file, "w") as f:
f.write(config)
@when("we run update stack")
def step_impl(context):
subprocess.call(["sceptre", "update-stack", "test-env/a", "vpc"])
@then("the stack is updated")
def step_impl(context):
client = boto3.client("cloudformation")
response = client.describe_stacks(
StackName="{0}-{1}-vpc".format(
context.project_code, context.environment_path_a
)
)
assert response["Stacks"][0]["StackStatus"] == "UPDATE_COMPLETE"
| 25.631579 | 69 | 0.63039 | 0 | 0 | 0 | 0 | 903 | 0.927105 | 0 | 0 | 263 | 0.270021 |
aa80d9ba0e36ab051a2ec4dfeed5a0569841b3bc | 840 | py | Python | test.py | ask-santosh/Document-Matching | 2b5a1be3e8e460029121e43b16fc676ed3874094 | [
"Apache-2.0"
] | null | null | null | test.py | ask-santosh/Document-Matching | 2b5a1be3e8e460029121e43b16fc676ed3874094 | [
"Apache-2.0"
] | null | null | null | test.py | ask-santosh/Document-Matching | 2b5a1be3e8e460029121e43b16fc676ed3874094 | [
"Apache-2.0"
] | null | null | null | import cv2
import matplotlib.pyplot as plt
import easyocr
reader = easyocr.Reader(['en'], gpu=False)
image = cv2.imread('results/JK_21_05/page_1.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
dilated = cv2.dilate(image, None, iterations=1)
eroded = cv2.erode(image, None, iterations=1)
res = reader.readtext(eroded)
cv2.imshow('s', eroded)
cv2.waitKey(0)
cv2.destroyAllWindows()
# for response in res:
# print(res)
for (bbox, text, prob) in res:
# unpack the bounding box
(tl, tr, br, bl) = bbox
tl = (int(tl[0]), int(tl[1]))
tr = (int(tr[0]), int(tr[1]))
br = (int(br[0]), int(br[1]))
bl = (int(bl[0]), int(bl[1]))
cv2.rectangle(eroded, tl, br, (0, 255, 0), 2)
cv2.putText(eroded, text, (tl[0], tl[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
cv2.imshow("Image", eroded)
cv2.waitKey(0) | 31.111111 | 97 | 0.646429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.12619 |
aa819751141039d158103e1e22f76244cbd9c7bf | 874 | py | Python | configs/_base_/models/x3d.py | ptoupas/mmaction2 | 1e1911295b63cffeba4c6f4809cb74d291c4505b | [
"Apache-2.0"
] | null | null | null | configs/_base_/models/x3d.py | ptoupas/mmaction2 | 1e1911295b63cffeba4c6f4809cb74d291c4505b | [
"Apache-2.0"
] | null | null | null | configs/_base_/models/x3d.py | ptoupas/mmaction2 | 1e1911295b63cffeba4c6f4809cb74d291c4505b | [
"Apache-2.0"
] | null | null | null | # model settings
model = dict(
type='Recognizer3D',
backbone=dict(type='X3D', frozen_stages = -1, gamma_w=1, gamma_b=2.25, gamma_d=2.2),
cls_head=dict(
type='X3DHead',
in_channels=432,
num_classes=400,
multi_class=False,
spatial_type='avg',
dropout_ratio=0.7,
fc1_bias=False),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
# model = dict(
# type='Recognizer3D',
# backbone=dict(type='X3D', frozen_stages = 0, gamma_w=2, gamma_b=2.25, gamma_d=5),
# cls_head=dict(
# type='X3DHead',
# in_channels=864,
# num_classes=7,
# spatial_type='avg',
# dropout_ratio=0.6,
# fc1_bias=False),
# # model training and testing settings
# train_cfg=None,
# test_cfg=dict(average_clips='prob'))
| 29.133333 | 88 | 0.606407 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 504 | 0.576659 |
aa84562ff31e7d467b614463b77b32138e9f4492 | 372 | py | Python | amktools/util.py | jimbo1qaz/amktools | 25a65d7c9c09a2622065fcacdaed82e1f9d7fb2c | [
"BSD-3-Clause"
] | 2 | 2020-03-14T06:13:03.000Z | 2022-03-03T17:53:51.000Z | amktools/util.py | nyanpasu64/amktools | 25a65d7c9c09a2622065fcacdaed82e1f9d7fb2c | [
"BSD-3-Clause"
] | 14 | 2018-06-19T14:48:58.000Z | 2018-10-28T07:02:27.000Z | amktools/util.py | jimbo1qaz/amktools | 25a65d7c9c09a2622065fcacdaed82e1f9d7fb2c | [
"BSD-3-Clause"
] | null | null | null | from typing import TypeVar, Optional
def ceildiv(n: int, d: int) -> int:
return -(-n // d)
T = TypeVar("T")
def coalesce(*args: Optional[T]) -> T:
if len(args) == 0:
raise TypeError("coalesce expected >=1 argument, got 0")
for arg in args:
if arg is not None:
return arg
raise TypeError("coalesce() called with all None")
| 20.666667 | 64 | 0.594086 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.201613 |
aa8496130dc2cec7262e9b446640da8f5aad4fbe | 1,589 | py | Python | tello_detection_v2.py | m0dzi77a/jetson-nano-drone-surveillance | a3846e1e7b9d1656f5fada8fabc4b53e5264973e | [
"MIT"
] | null | null | null | tello_detection_v2.py | m0dzi77a/jetson-nano-drone-surveillance | a3846e1e7b9d1656f5fada8fabc4b53e5264973e | [
"MIT"
] | null | null | null | tello_detection_v2.py | m0dzi77a/jetson-nano-drone-surveillance | a3846e1e7b9d1656f5fada8fabc4b53e5264973e | [
"MIT"
] | null | null | null | from djitellopy import Tello
import cv2, math
import numpy as np
import jetson.inference
import jetson.utils
from threading import Thread
import time
net = jetson.inference.detectNet("ssd-mobilenet-v1", threshold=0.5) #facenet is working; ssd-mobilenet-v1
drohne = Tello()
drohne.connect()
print(drohne.get_battery())
drohne.streamon()
frame_read = drohne.get_frame_read()
img = frame_read.frame
img = cv2.resize(img, (480, 360)) #360,240 480,360
key = cv2.waitKey(1) & 0xff
def move():
while True:
if key == 27:
break
elif key == ord('t'):
drohne.takeoff()
elif key == ord('e'):
drohne.rotate_clockwise(30)
elif key == ord('r'):
drohne.move_up(30)
elif key == ord('w'):
drohne.move_forward(30)
elif key == ord('s'):
drohne.move_back(30)
elif key == ord('a'):
drohne.move_left(30)
elif key == ord('d'):
drohne.move_right(30)
elif key == ord('q'):
drohne.rotate_counter_clockwise(30)
elif key == ord('f'):
drohne.move_down(30)
drohne.land()
if __name__ == "__main__":
t1 = Thread(target = move)
t1.setDaemon(True)
t1.start()
while True:
img = frame_read.frame
#img = cv2.resize(img, (480, 360)) #360,240 480,360
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.cvtColor(img, cv2.COLOR_RGB2RGBA).astype(np.float32)
img = jetson.utils.cudaFromNumpy(img)
detections = net.Detect(img)
img = jetson.utils.cudaToNumpy(img)
img = cv2.cvtColor(img, cv2.COLOR_RGBA2RGB).astype(np.uint8)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imshow("Drone Surveillance", img)
key = cv2.waitKey(1) & 0xff
pass
| 24.828125 | 105 | 0.684707 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 179 | 0.112649 |
aa84e85a9b53b2b65be434c51bb6eb739b861665 | 3,638 | py | Python | sql_judge/model/load_types.py | r4ulill0/gui_dbjudge | 7c421c6f2fe1281f95e624242338b3e020ee5f28 | [
"MIT"
] | 1 | 2020-12-11T10:45:58.000Z | 2020-12-11T10:45:58.000Z | sql_judge/model/load_types.py | r4ulill0/gui_dbjudge | 7c421c6f2fe1281f95e624242338b3e020ee5f28 | [
"MIT"
] | null | null | null | sql_judge/model/load_types.py | r4ulill0/gui_dbjudge | 7c421c6f2fe1281f95e624242338b3e020ee5f28 | [
"MIT"
] | null | null | null | from PyQt5.QtCore import QAbstractTableModel, QAbstractItemModel
from PyQt5.QtCore import Qt, QModelIndex, pyqtSlot
class LoadTypesProcess(QAbstractTableModel):
def __init__(self):
super().__init__()
self.csv_values = []
self.header_model = HeaderModel()
def index(self, row, column, parent=QModelIndex()):
return self.createIndex(row, column)
def rowCount(self, parent=QModelIndex()):
return len(self.csv_values)
def columnCount(self, parent=QModelIndex()):
count = 0
if len(self.csv_values):
count = len(self.csv_values[0])
return count
def data(self, index, role=Qt.DisplayRole):
if role == Qt.DisplayRole:
return self.csv_values[index.row()][index.column()]
def setData(self, index, value, role=Qt.EditRole):
if index.isValid() and role == Qt.EditRole:
if index.column() >= self.columnCount():
self.insertColumns(index.column(), 1)
if index.row() >= self.rowCount():
self.insertRows(index.row(), 1)
self.csv_values[index.row()][index.column()] = value
self.dataChanged.emit(index, index)
return True
return False
def flags(self, index):
return QAbstractTableModel.flags(self, index) | Qt.ItemIsEditable
def insertRows(self, position, rows, index=QModelIndex()):
self.beginInsertRows(index, position, position+rows-1)
for _ in range(rows):
new_row = []
for _ in range(self.columnCount()):
new_row.append("")
self.csv_values.append(new_row)
self.endInsertRows()
return True
def insertColumns(self, position, columns, index=QModelIndex()):
self.beginInsertColumns(index, position, position+columns-1)
for row in self.csv_values:
for _ in range(columns):
row.append("")
self.endInsertColumns()
return True
def removeRows(self, position, rows, index=QModelIndex()):
self.beginRemoveRows(index, position, position+rows-1)
for row in reversed(range(position, position+rows)):
self.csv_values.pop(row)
self.endRemoveRows()
def removeColumns(self, position, columns, index=QModelIndex()):
self.beginRemoveColumns(index, position, position+columns-1)
for row in self.csv_values:
for column in reversed(range(position, position+columns)):
row.pop(column)
class HeaderModel(QAbstractItemModel):
def __init__(self):
super().__init__()
self.values = []
def index(self, row, column, parent=QModelIndex()):
return self.createIndex(row, column)
def columnCount(self, parent=QModelIndex()):
return len(self.values)
def rowCount(self, parent=QModelIndex()):
return 1
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role == Qt.DisplayRole:
return self.values[section]
def setHeaderData(self, section, orientation, value, role=Qt.EditRole):
if role == Qt.EditRole:
self.values[section] = value
def removeColumn(self, column, index=QModelIndex()):
self.beginRemoveColumns(index, column, column)
self.values.pop(column)
self.endRemoveColumns()
def insertColumns(self, column, amount, index=QModelIndex()):
self.beginInsertColumns(index, column, column+amount-1)
for idx in range(amount):
self.values.append(str(self.columnCount()+idx))
self.endInsertColumns()
| 33.685185 | 75 | 0.631116 | 3,516 | 0.966465 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.0011 |
aa8608fd488af6aaed7a72ebd77b25da200b3ddd | 3,752 | py | Python | examples/example1.py | michael-riha/gstreamer-101-python | be281f91ebdd260ce23f747cc32e1ae2133410e9 | [
"MIT"
] | 4 | 2020-06-09T14:21:26.000Z | 2021-07-31T19:30:19.000Z | examples/example1.py | michael-riha/gstreamer-101-python | be281f91ebdd260ce23f747cc32e1ae2133410e9 | [
"MIT"
] | null | null | null | examples/example1.py | michael-riha/gstreamer-101-python | be281f91ebdd260ce23f747cc32e1ae2133410e9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# mix of:
# https://www.programcreek.com/python/example/88577/gi.repository.Gst.Pipeline
# https://github.com/GStreamer/gst-python/blob/master/examples/helloworld.py
# http://lifestyletransfer.com/how-to-launch-gstreamer-pipeline-in-python/
import sys
import collections
from pprint import pprint
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst, GLib
import pdb
'''
gst-launch-1.0 \
videotestsrc is-live=true ! \
queue ! videoconvert ! x264enc byte-stream=true ! \
h264parse config-interval=1 ! queue ! matroskamux ! queue leaky=2 ! \
tcpserversink port=7001 host=0.0.0.0 recover-policy=keyframe sync-method=latest-keyframe sync=false
'''
def main(args):
# depricated but still in much of the tutorials I found!
#GObject.threads_init()
Gst.init(None)
# ! NO PYTHON DEV WARING ! -> https://pymotw.com/2/collections/namedtuple.html
Element = collections.namedtuple('Element', ['type', 'attributes'])
elements = [
Element('videotestsrc', { "is-live": True}),
Element('queue', {}),
Element('videoconvert', {}),
Element('x264enc', {"byte-stream": True}),
Element('h264parse', {"config-interval":1}),
Element('queue', {}),
Element('matroskamux', {}),
Element('queue', {"leaky": 2}),
Element('tcpserversink', {"port": 7001, "host": "0.0.0.0", "recover-policy": "keyframe", "sync-method":"latest-keyframe", "sync": False}),
]
pipeline = Gst.Pipeline()
message_bus = pipeline.get_bus()
message_bus.add_signal_watch()
message_bus.connect('message', bus_call, None)
elements_created= dict()
# ! NO PYTHON DEV WARING ! -> https://stackoverflow.com/questions/25150502/python-loop-index-of-key-value-for-loop-when-using-items
for index, item in enumerate(elements):
name = item.type+str(index)
elements_created[name] = Gst.ElementFactory.make(item.type, name)
for key, value in item.attributes.items():
#pdb.set_trace()
elements_created[name].set_property(key, value)
pipeline.add(elements_created[name])
# https://www.geeksforgeeks.org/iterate-over-a-list-in-python/
length = len(elements)
i = 0
# Iterating to connect the elements
while i < length-1:
pprint(elements[i].type+str(i))
current_name_in_created= elements[i].type+str(i)
next_name_in_created= elements[i+1].type+str(i+1)
## now link them!
print(current_name_in_created+"->"+next_name_in_created)
elements_created[current_name_in_created].link(elements_created[next_name_in_created])
i += 1
pprint(elements_created)
#pdb.set_trace()
# start play back and listed to events
pipeline.set_state(Gst.State.PLAYING)
# create and event loop and feed gstreamer bus mesages to it
loop = GLib.MainLoop()
try:
loop.run()
except:
loop.quit()
# cleanup
print("cleaning up")
pipeline.set_state(Gst.State.NULL)
sys.exit()
# http://lifestyletransfer.com/how-to-launch-gstreamer-pipeline-in-python/
def bus_call(bus: Gst.Bus, message: Gst.Message, loop: GLib.MainLoop):
t = message.type
if t == Gst.MessageType.EOS:
sys.stdout.write("End-of-stream\n")
loop.quit()
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write("Error: %s: %s\n" % (err, debug))
loop.quit()
elif t == Gst.MessageType.WARNING:
# Handle warnings
err, debug = message.parse_warning()
sys.stderr.write("Warning: %s: %s\n" % (err, debug))
return True
if __name__ == '__main__':
#done in main!
#sys.exit(main(sys.argv))
#https://stackoverflow.com/questions/4205317/capture-keyboardinterrupt-in-python-without-try-except
try:
main(sys.argv)
except KeyboardInterrupt:
# do nothing here
pass
| 32.344828 | 142 | 0.691365 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,674 | 0.446162 |
aa8996944e86f8abe0a588ebb23f714fffd14e70 | 3,464 | py | Python | tests/server1_test.py | kalebswartz7/sirepo | 8d1f2b3914cf9622eaae6b0bf32e23e38e4e5972 | [
"Apache-2.0"
] | null | null | null | tests/server1_test.py | kalebswartz7/sirepo | 8d1f2b3914cf9622eaae6b0bf32e23e38e4e5972 | [
"Apache-2.0"
] | null | null | null | tests/server1_test.py | kalebswartz7/sirepo | 8d1f2b3914cf9622eaae6b0bf32e23e38e4e5972 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
u"""Test simulationSerial
:copyright: Copyright (c) 2016 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('srwl_bl')
#: Used for a sanity check on serial numbers
_MIN_SERIAL = 10000000
def test_1_serial_stomp():
from pykern.pkdebug import pkdp, pkdpretty
from pykern.pkunit import pkfail, pkok
from sirepo import sr_unit
import copy
fc = sr_unit.flask_client()
sim_type = 'srw'
data = fc.sr_post('listSimulations', {'simulationType': sim_type})
for youngs in data:
if youngs['name'] == "Young's Double Slit Experiment":
break
else:
pkfail("{}: Young's not found", pkdpretty(data))
data = fc.sr_get(
'simulationData',
{
'simulation_type': sim_type,
'pretty': '0',
'simulation_id': youngs['simulationId'],
},
)
prev_serial = data['models']['simulation']['simulationSerial']
prev_data = copy.deepcopy(data)
pkok(
prev_serial > _MIN_SERIAL,
'{}: serial must be greater than {}',
prev_serial,
_MIN_SERIAL,
)
data['models']['beamline'][4]['position'] = '61'
curr_data = fc.sr_post('saveSimulationData', data)
curr_serial = curr_data['models']['simulation']['simulationSerial']
pkok(
prev_serial < curr_serial,
'{}: serial not incremented, still < {}',
prev_serial,
curr_serial,
)
prev_data['models']['beamline'][4]['position'] = '60.5'
failure = fc.sr_post('saveSimulationData', prev_data)
pkok(
failure['error'] == 'invalidSerial',
'{}: unexpected status, expected serial failure',
failure,
)
curr_data['models']['beamline'][4]['position'] = '60.5'
curr_serial = curr_data['models']['simulation']['simulationSerial']
new_data = fc.sr_post('saveSimulationData', curr_data)
new_serial = new_data['models']['simulation']['simulationSerial']
pkok(
curr_serial < new_serial,
'{}: serial not incremented, still < {}',
new_serial,
curr_serial,
)
def test_oauth():
from pykern import pkconfig
pkconfig.reset_state_for_testing({
'SIREPO_SERVER_OAUTH_LOGIN': '1',
'SIREPO_OAUTH_GITHUB_KEY': 'n/a',
'SIREPO_OAUTH_GITHUB_SECRET': 'n/a',
'SIREPO_OAUTH_GITHUB_CALLBACK_URI': 'n/a',
})
from pykern.pkunit import pkfail, pkok
from sirepo import server
from sirepo import sr_unit
import re
sim_type = 'srw'
fc = sr_unit.flask_client()
fc.sr_post('listSimulations', {'simulationType': sim_type})
text = fc.sr_get(
'oauthLogin',
{
'simulation_type': sim_type,
'oauth_type': 'github',
},
raw_response=True,
).data
state = re.search(r'state=(.*?)"', text).group(1)
#TODO(pjm): causes a forbidden error due to missing variables, need to mock-up an oauth test type
text = fc.get('/oauth-authorized/github')
text = fc.sr_get(
'oauthLogout',
{
'simulation_type': sim_type,
},
raw_response=True,
).data
pkok(
text.find('Redirecting') > 0,
'missing redirect',
)
pkok(
text.find('"/{}"'.format(sim_type)) > 0,
'missing redirect target',
)
| 29.862069 | 101 | 0.610277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,352 | 0.3903 |
aa8a28180d9f5e87c7bd636c0ff1fa82ab7cd53e | 1,306 | py | Python | kg_nodeexporter/tests/test_builder.py | RangelReale/kg_nodeexporter | e8e2635940d83f05b674414489ea70519d881fa4 | [
"MIT"
] | null | null | null | kg_nodeexporter/tests/test_builder.py | RangelReale/kg_nodeexporter | e8e2635940d83f05b674414489ea70519d881fa4 | [
"MIT"
] | null | null | null | kg_nodeexporter/tests/test_builder.py | RangelReale/kg_nodeexporter | e8e2635940d83f05b674414489ea70519d881fa4 | [
"MIT"
] | null | null | null | import unittest
from kubragen import KubraGen
from kubragen.jsonpatch import FilterJSONPatches_Apply, ObjectFilter, FilterJSONPatch
from kubragen.provider import Provider_Generic
from kg_nodeexporter import NodeExporterBuilder, NodeExporterOptions
class TestBuilder(unittest.TestCase):
def setUp(self):
self.kg = KubraGen(provider=Provider_Generic())
def test_empty(self):
nodeexporter_config = NodeExporterBuilder(kubragen=self.kg)
self.assertEqual(nodeexporter_config.object_name('daemonset'), 'node-exporter')
def test_basedata(self):
nodeexporter_config = NodeExporterBuilder(kubragen=self.kg, options=NodeExporterOptions({
'namespace': 'myns',
'basename': 'mynodeexporter',
}))
self.assertEqual(nodeexporter_config.object_name('daemonset'), 'mynodeexporter')
FilterJSONPatches_Apply(items=nodeexporter_config.build(nodeexporter_config.BUILD_SERVICE), jsonpatches=[
FilterJSONPatch(filters=ObjectFilter(names=[nodeexporter_config.BUILDITEM_DAEMONSET]), patches=[
{'op': 'check', 'path': '/metadata/name', 'cmp': 'equals', 'value': 'mynodeexporter'},
{'op': 'check', 'path': '/metadata/namespace', 'cmp': 'equals', 'value': 'myns'},
]),
])
| 42.129032 | 113 | 0.701378 | 1,053 | 0.806279 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.175345 |
aa8a95508ab94b965d527ae1816936f597cbd54c | 1,349 | py | Python | docs/ASH/notebooks/object-segmentation-on-azure-stack/score.py | RichardZhaoW/AML-Kubernetes | dd699c484c0811bc2b7a21f80f19e0c40832acdc | [
"MIT"
] | 176 | 2019-07-03T00:20:15.000Z | 2022-03-14T07:51:22.000Z | docs/ASH/notebooks/object-segmentation-on-azure-stack/score.py | RichardZhaoW/AML-Kubernetes | dd699c484c0811bc2b7a21f80f19e0c40832acdc | [
"MIT"
] | 121 | 2019-06-24T20:47:27.000Z | 2022-03-28T02:16:18.000Z | docs/ASH/notebooks/object-segmentation-on-azure-stack/score.py | RichardZhaoW/AML-Kubernetes | dd699c484c0811bc2b7a21f80f19e0c40832acdc | [
"MIT"
] | 144 | 2019-06-18T18:48:43.000Z | 2022-03-31T12:14:46.000Z | import os
import json
import time
import torch
# Called when the deployed service starts
def init():
global model
global device
# Get the path where the deployed model can be found.
model_filename = 'obj_segmentation.pkl'
model_path = os.path.join(os.environ['AZUREML_MODEL_DIR'], model_filename)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model = torch.load(model_path, map_location=device)
# Handle requests to the service
def run(data):
try:
start_at = time.time()
inputs = json.loads(data)
img_data_list = inputs["instances"]
img_tensor_list = [torch.tensor(item) for item in img_data_list]
model.eval()
with torch.no_grad():
predictions = model([item.to(device) for item in img_tensor_list])
pred_data_list = [{
"masks": prediction['masks'][0, 0].mul(255).byte().cpu().numpy().tolist(),
"boxes": prediction['boxes'].numpy().tolist(),
"labels": prediction['labels'].numpy().tolist(),
"scores": prediction['scores'].numpy().tolist(),
} for prediction in predictions]
return {"predictions": pred_data_list,
"elapsed_time": time.time() - start_at}
except Exception as e:
error = str(e)
return error
| 31.372093 | 87 | 0.631579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 276 | 0.204596 |
aa8aecad2be10769e57043eef6e4e9a70e4c119c | 5,023 | py | Python | acos_client/v21/slb/virtual_port.py | jjmanzer/acos-client | a35af927afa0d07ea6c7c172c1ae2ebe8ec6d10c | [
"Apache-2.0"
] | null | null | null | acos_client/v21/slb/virtual_port.py | jjmanzer/acos-client | a35af927afa0d07ea6c7c172c1ae2ebe8ec6d10c | [
"Apache-2.0"
] | null | null | null | acos_client/v21/slb/virtual_port.py | jjmanzer/acos-client | a35af927afa0d07ea6c7c172c1ae2ebe8ec6d10c | [
"Apache-2.0"
] | null | null | null | # Copyright 2014, Doug Wiegley, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from acos_client.v21 import base
class VirtualPort(base.BaseV21):
# Protocols
TCP = 2
UDP = 3
HTTP = 11
HTTPS = 12
OTHERS = 4
RTSP = 5
FTP = 6
MMS = 7
SIP = 8
FAST_HTTP = 9
GENERIC_PROXY = 10
SSL_PROXY = 13
SMTP = 14
SIP_TCP = 15
SIPS = 16
DIAMETER = 17
DNS_UDP = 18
TFTP = 19
DNS_TCP = 20
RADIUS = 21
MYSQL = 22
MSSQL = 23
FIX = 24
SMPP_TCP = 25
SPDY = 26
SPDYS = 27
FTP_PROXY = 28
# The keys as specified in the ACOS JSON message.
CLIENT_SSL_TMPL_KEY = "client_ssl_template"
SERVER_SSL_TMPL_KEY = "server_ssl_template"
# The keys as sent from a10-neutron-lbaas
# They match what we use in v4 so we transform here
CLIENT_SSL_ANL_KEY = "template_client_ssl"
SERVER_SSL_ANL_KEY = "template_server_ssl"
def _set(self, action, virtual_server_name, name, protocol, port,
service_group_name,
s_pers_name=None, c_pers_name=None, status=1,
autosnat=False,
ipinip=False,
source_nat=None,
**kwargs):
params = {
"name": virtual_server_name,
"vport": self.minimal_dict({
"name": name,
"service_group": service_group_name,
"protocol": protocol,
"port": int(port),
"source_ip_persistence_template": s_pers_name,
"cookie_persistence_template": c_pers_name,
"status": status
})
}
client_ssl_template = kwargs.get(self.CLIENT_SSL_TMPL_KEY)
server_ssl_template = kwargs.get(self.SERVER_SSL_TMPL_KEY)
if client_ssl_template:
params['vport'][self.CLIENT_SSL_ANL_KEY] = client_ssl_template
if server_ssl_template:
params['vport'][self.SERVER_SSL_ANL_KEY] = server_ssl_template
if autosnat:
params['vport']['source_nat_auto'] = int(autosnat)
if ipinip:
params['vport']['ip_in_ip'] = int(ipinip)
if source_nat and len(source_nat) > 0:
params['vport']['source_nat'] = source_nat
self._post(action, params, **kwargs)
def get(self, virtual_server_name, name, protocol, port, **kwargs):
# There is no slb.virtual_server.vport.search.
# Instead, we get the virtual server and get the desired vport.
results = self._post('slb.virtual_server.search', {'name': virtual_server_name}, **kwargs)
vports = results.get("virtual_server").get("vport_list", [])
port_filter = lambda x: x.get("name") == name
filtered_vports = [vport for vport in vports if port_filter(vport)]
if len(filtered_vports) > 0:
return filtered_vports[0]
def create(self, virtual_server_name, name, protocol, port,
service_group_name,
s_pers_name=None, c_pers_name=None, status=1,
autosnat=False,
ipinip=False,
source_nat_pool=None,
**kwargs):
self._set('slb.virtual_server.vport.create', virtual_server_name,
name, protocol, port, service_group_name,
s_pers_name, c_pers_name, status,
autosnat=autosnat, ipinip=ipinip, source_nat=source_nat_pool,
**kwargs)
def update(self, virtual_server_name, name, protocol, port,
service_group_name,
s_pers_name=None, c_pers_name=None, status=1,
autosnat=False,
ipinip=False,
source_nat_pool=None,
**kwargs):
self._set('slb.virtual_server.vport.update', virtual_server_name,
name, protocol, port, service_group_name,
s_pers_name, c_pers_name, status,
autosnat=autosnat, ipinip=ipinip, source_nat=source_nat_pool,
**kwargs)
def delete(self, virtual_server_name, name, protocol, port, **kwargs):
params = {
"name": virtual_server_name,
"vport": {
"name": name,
"protocol": protocol,
"port": int(port)
}
}
self._post("slb.virtual_server.vport.delete", params, **kwargs)
| 34.641379 | 98 | 0.601433 | 4,285 | 0.853076 | 0 | 0 | 0 | 0 | 0 | 0 | 1,348 | 0.268366 |
aa8b120c78b48885a14d17efcfc8523380e3b89e | 388 | py | Python | base/struct_data.py | cateatfish108/AutoTest | 8697aadd4c60c6a7cb435f784fc5c588805067bf | [
"MIT"
] | null | null | null | base/struct_data.py | cateatfish108/AutoTest | 8697aadd4c60c6a7cb435f784fc5c588805067bf | [
"MIT"
] | null | null | null | base/struct_data.py | cateatfish108/AutoTest | 8697aadd4c60c6a7cb435f784fc5c588805067bf | [
"MIT"
] | null | null | null | #coding:utf-8
# 数据库结构体
class DataBase:
url = ""
port = 3306
username = ""
password = ""
database = ""
charset = ""
# 测试用例信息结构体
class CaseInfo:
path = ""
case_list = []
# 测试用例结构体
class Case:
url = ""
db_table = ""
case_id = ""
method = ""
data = {}
check_item = {}
status = ""
db_key = {}
check_result = "" | 12.933333 | 21 | 0.474227 | 338 | 0.782407 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.252315 |
aa8b89cc9bb68c461a7f8b894c654bdcdd501e2a | 2,000 | py | Python | pox/info/debug_deadlock.py | korrigans84/pox_network | cd58d95d97c94b3d139bc2026fd1be0a30987911 | [
"Apache-2.0"
] | 416 | 2015-01-05T18:16:36.000Z | 2022-03-28T21:44:26.000Z | pox/info/debug_deadlock.py | korrigans84/pox_network | cd58d95d97c94b3d139bc2026fd1be0a30987911 | [
"Apache-2.0"
] | 140 | 2015-01-18T23:32:34.000Z | 2022-03-17T05:40:24.000Z | pox/info/debug_deadlock.py | korrigans84/pox_network | cd58d95d97c94b3d139bc2026fd1be0a30987911 | [
"Apache-2.0"
] | 344 | 2015-01-08T06:44:23.000Z | 2022-03-26T04:06:27.000Z | # Copyright 2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Primitive help for debugging deadlocks.
Prints stack info for all threads.
(Might be more useful if it only printed stack frames that
were not changing, sort of like recoco_spy.)
This was initially factored out from a pox.py modification by
Colin or Andi.
"""
import sys
import time
import inspect
import traceback
import threading
from pox.core import core
import os
base_path = __file__
base_path = os.path.split(base_path)[0]
base_path = os.path.split(base_path)[0]
base_path += os.path.sep
def fmt_tb (tb):
f = tb.filename
if f.startswith(base_path):
f = f[len(base_path):]
l = "%s:%i" % (f, tb.lineno)
code = tb.code_context
if code: code = code[0].strip()
if not code: code = "<Unknown>"
return "%20s: %s" % (l,code)
def _trace_thread_proc ():
try:
while core.running:
frames = sys._current_frames()
for key in frames:
frame = frames[key]
print(fmt_tb(inspect.getframeinfo(frame)))
outer_frames = inspect.getouterframes(frame)
for i in range(0, len(outer_frames)):
print(" " + fmt_tb(inspect.getframeinfo(outer_frames[i][0])))
time.sleep(5)
except:
traceback.print_exc()
def launch ():
_trace_thread = threading.Thread(target=_trace_thread_proc)
_trace_thread.daemon = True
# Start it up a bit in the future so that it doesn't print all over
# init messages.
core.callDelayed(3, _trace_thread.start)
| 28.169014 | 74 | 0.7135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 945 | 0.4725 |
aa8c0c3cea083104acd36512910c56a34a5f2037 | 850 | py | Python | src/test/chirc/tests/fixtures.py | Schrotty/sIRC | c2edf179651794ddc75ea26a69429933ab04809d | [
"MIT"
] | 2 | 2019-03-26T06:22:50.000Z | 2019-03-26T11:16:25.000Z | test/tests/chirc/tests/fixtures.py | MU001999/npcp | af0f8c33ef518d01580a95dcaea3951b1fa7087c | [
"MIT"
] | 1 | 2016-05-02T19:52:25.000Z | 2016-05-03T20:37:16.000Z | test/tests/chirc/tests/fixtures.py | MU001999/npcp | af0f8c33ef518d01580a95dcaea3951b1fa7087c | [
"MIT"
] | null | null | null | channels1 = { "#test1": ("@user1", "user2", "user3"),
"#test2": ("@user4", "user5", "user6"),
"#test3": ("@user7", "user8", "user9")
}
channels2 = { "#test1": ("@user1", "user2", "user3"),
"#test2": ("@user4", "user5", "user6"),
"#test3": ("@user7", "user8", "user9"),
None: ("user10" , "user11")
}
channels3 = { "#test1": ("@user1", "user2", "user3"),
"#test2": ("@user2",),
"#test3": ("@user3", "@user4", "user5", "user6"),
"#test4": ("@user7", "+user8", "+user9", "user1", "user2"),
"#test5": ("@user1", "@user5"),
None: ("user10" , "user11")
}
channels4 = { None: ("user1", "user2", "user3", "user4", "user5") } | 42.5 | 73 | 0.372941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 401 | 0.471765 |
aa8c8dfa05d8f4d2b3026058517396daf687dbef | 20,989 | py | Python | src/annalist_root/annalist/models/entityfinder.py | gklyne/annalist | 82e7ef2d56a400325e7618fa9e590072ee8a71d3 | [
"MIT"
] | 18 | 2015-02-20T23:09:13.000Z | 2020-11-13T06:06:43.000Z | src/annalist_root/annalist/models/entityfinder.py | gklyne/annalist | 82e7ef2d56a400325e7618fa9e590072ee8a71d3 | [
"MIT"
] | 30 | 2015-01-03T09:56:28.000Z | 2021-06-10T20:58:55.000Z | src/annalist_root/annalist/models/entityfinder.py | gklyne/annalist | 82e7ef2d56a400325e7618fa9e590072ee8a71d3 | [
"MIT"
] | 5 | 2015-02-02T09:01:23.000Z | 2018-06-14T20:05:28.000Z | from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
This module contains (and isolates) logic used to find entities based on entity type,
list selection criteria and search terms.
"""
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import re
from pyparsing import Word, QuotedString, Literal, Group, Empty, StringEnd, ParseException
from pyparsing import alphas, alphanums
from utils.py3porting import is_string, to_unicode
from annalist import layout
from annalist.util import valid_id, extract_entity_id
from annalist.models.recordtype import RecordType
from annalist.models.recordtypedata import RecordTypeData
from annalist.models.entitytypeinfo import EntityTypeInfo
# -------------------------------------------------------------------
# Auxilliary functions
# -------------------------------------------------------------------
def order_entity_key(entity):
"""
Function returns sort key for ordering entities by type and entity id
Use with `sorted`, thus:
sorted(entities, order_entity_key)
"""
type_id = entity.get_type_id()
entity_id = entity.get_id()
key = ( 0 if type_id.startswith('_') else 1, type_id,
0 if entity_id.startswith('_') else 1, entity_id
)
return key
# -------------------------------------------------------------------
# EntityFinder
# -------------------------------------------------------------------
class EntityFinder(object):
"""
Logic for enumerating entities matching a supplied type, selector and/or search string.
"""
def __init__(self, coll, selector=None):
"""
Initialize entity finder for collection and selector.
"""
super(EntityFinder, self).__init__()
self._coll = coll
self._site = coll.get_site()
self._selector = EntitySelector(selector, FieldComparison(coll))
# self._subtypes = None
return
def get_collection_type_ids(self, altscope):
"""
Returns iterator over possible type ids in current collection.
Each type is returned as a candidate type identifier string
"""
return self._coll.cache_get_all_type_ids(altscope=altscope)
def get_collection_subtype_ids(self, supertype_id, altscope):
"""
Returns a iterator of type ids for all subtypes of the supplied type
accessible in the indicated scope from the current collection, including
the identified type itself.
"""
if not valid_id(supertype_id):
log.warning("EntityFinder.get_collection_subtype_ids: invalid type_id %s"%(supertype_id,))
return
supertype_info = EntityTypeInfo(self._coll, supertype_id)
supertype_uri = supertype_info.get_type_uri()
if supertype_uri is not None:
for try_subtype_id in self.get_collection_type_ids(altscope):
try_subtype = self._coll.cache_get_type(try_subtype_id)
if try_subtype:
try_subtype_uri = try_subtype.get_uri()
if ( ( supertype_uri == try_subtype_uri ) or
( supertype_uri in self._coll.cache_get_supertype_uris(try_subtype_uri) ) ):
yield try_subtype_id
else:
log.warning("EntityFinder.get_collection_subtype_ids: no type_uri for %s"%(supertype_id,))
def get_type_entities(self, type_id, user_permissions, altscope):
"""
Iterate over entities from collection matching the supplied type.
'altscope' is used to determine the extent of data to be included in the listing:
a value of 'all' means that site-wide entyities are icnluded in the listing.
Otherwise only collection entities are included.
"""
#@@
# log.info("get_type_entities: type_id %s, user_permissions %r"%(type_id,user_permissions))
#@@
entitytypeinfo = EntityTypeInfo(self._coll, type_id)
for e in entitytypeinfo.enum_entities_with_implied_values(
user_permissions, altscope=altscope
):
if e.get_id() != layout.INITIAL_VALUES_ID:
#@@
# log.info(" yield: %s"%(e.get_id(),))
#@@
yield e
return
def get_subtype_entities(self, type_id, user_permissions, altscope):
"""
Iterate over entities from collection that are of the indicated type
or any of its subtypes.
'altscope' is used to determine the extent of data to be included in the listing:
a value of 'all' means that site-wide entities are included in the listing.
Otherwise only collection entities are included.
"""
for subtype_id in self.get_collection_subtype_ids(type_id, "all"):
subtype_info = EntityTypeInfo(self._coll, subtype_id)
es = subtype_info.enum_entities_with_implied_values(
user_permissions, altscope=altscope
)
#@@
# es = list(es) #@@ Force strict eval
# log.info("get_subtype_entities: %r"%([e.get_id() for e in es],))
#@@
for e in es:
if e.get_id() != layout.INITIAL_VALUES_ID:
yield e
return
def get_all_types_entities(self, types, user_permissions, altscope):
"""
Iterate over all entities of all types from a supplied type iterator
"""
#@@
# log.info("@@@@ get_all_types_entities")
#@@
for t in types:
for e in self.get_type_entities(t, user_permissions, altscope):
#@@
# log.info("get_all_types_entities: type %s/%s"%(t,e.get_id()))
#@@
yield e
return
def get_base_entities(self, type_id=None, user_permissions=None, altscope=None):
"""
Iterate over base entities from collection, matching the supplied type id if supplied.
If a type_id is supplied, site data values are included.
"""
entities = None
if type_id:
entities = self.get_subtype_entities(type_id, user_permissions, altscope)
# return self.get_type_entities(type_id, user_permissions, scope)
else:
entities = self.get_all_types_entities(
self.get_collection_type_ids(altscope="all"), user_permissions, altscope
)
#@@
# entities = list(entities) #@@ Force strict eval
# log.info("get_base_entities: %r"%([(e.get_type_id(), e.get_id()) for e in entities],))
#@@
return entities
def search_entities(self, entities, search):
"""
Iterate over entities from supplied iterator containing supplied search term.
"""
for e in entities:
if self.entity_contains(e, search):
yield e
return
def get_entities(self,
user_permissions=None, type_id=None, altscope=None, context=None, search=None
):
"""
Iterates over entities of the specified type, matching search term and visible to
supplied user permissions.
"""
entities = self._selector.filter(
self.get_base_entities(type_id, user_permissions, altscope), context=context
)
if search:
entities = self.search_entities(entities, search)
return entities
def get_entities_sorted(self,
user_permissions=None, type_id=None, altscope=None, context={}, search=None
):
"""
Get sorted list of entities of the specified type, matching search term and
visible to supplied user permissions.
"""
entities = self.get_entities(
user_permissions, type_id=type_id, altscope=altscope,
context=context, search=search
)
#@@
# entities = list(entities) #@@ Force strict eval
# log.info("get_entities_sorted: %r"%([e.get_id() for e in entities],))
#@@
return sorted(entities, key=order_entity_key)
@classmethod
def entity_contains(cls, e, search):
"""
Returns True if entity contains/matches search term, else False.
Search term None (or blank) matches all entities.
>>> e1 = { 'p:a': '1', 'p:b': '2', 'p:c': '3', 'annal:property_uri': 'annal:member' }
>>> EntityFinder.entity_contains(e1, "1")
True
>>> EntityFinder.entity_contains(e1, "3")
True
>>> EntityFinder.entity_contains(e1, "nothere")
False
>>> EntityFinder.entity_contains(e1, "annal:member")
True
>>> e2 = { 'list': ['l1', 'l2', 'l3'] \
, 'dict': {'p:a': 'd1', 'p:b': 'd2', 'p:c': 'd3'} \
}
>>> EntityFinder.entity_contains(e2, "l1")
True
>>> EntityFinder.entity_contains(e2, "d3")
True
>>> EntityFinder.entity_contains(e2, "nothere")
False
"""
if search:
# Entity is not a dict, so scan entity keys for search
for key in e:
val = e[key]
if cls.value_contains(val, search):
return True
return False
return True
@classmethod
def value_contains(cls, val, search):
"""
Helper function tests for search term in dictionary, list or string values.
Other values are not searched.
"""
if isinstance(val, dict):
for k in val:
if cls.value_contains(val[k], search):
return True
elif isinstance(val, list):
for e in val:
if cls.value_contains(e, search):
return True
elif is_string(val):
return search in val
return False
# -------------------------------------------------------------------
# EntitySelector
# -------------------------------------------------------------------
class EntitySelector(object):
"""
This class implements a selector filter. It is initialized with a selector
expression, and may be invoked as a filter applied to an entity generator,
or as a predicate applied to a single entity.
>>> e = { 'p:a': '1', 'p:b': '2', 'p:c': '3', '@type': ["http://example.com/type", "foo:bar"] }
>>> c = { 'view': { 'v:a': '1', 'v:b': ['2', '3'] } }
>>> f1 = "'1' == [p:a]"
>>> f2 = "[p:a]=='2'"
>>> f3 = ""
>>> f4 = "'http://example.com/type' in [@type]"
>>> f5 = "'foo:bar' in [@type]"
>>> f6 = "'bar:foo' in [@type]"
>>> f7 = "[p:a] in view[v:a]"
>>> f8 = "[p:b] in view[v:b]"
>>> f9 = "[p:a] in view[v:b]"
>>> f10 = "[annal:field_entity_type] in view[annal:view_entity_type]"
>>> f11 = "foo:bar in [@type]"
>>> f12 = "bar:foo in [@type]"
>>> EntitySelector(f1).select_entity(e, c)
True
>>> EntitySelector(f2).select_entity(e, c)
False
>>> EntitySelector(f3).select_entity(e, c)
True
>>> EntitySelector(f4).select_entity(e, c)
True
>>> EntitySelector(f5).select_entity(e, c)
True
>>> EntitySelector(f6).select_entity(e, c)
False
>>> EntitySelector(f7).select_entity(e, c)
True
>>> EntitySelector(f8).select_entity(e, c)
True
>>> EntitySelector(f9).select_entity(e, c)
False
>>> EntitySelector(f10).select_entity(e, c)
True
>>> EntitySelector(f11).select_entity(e, c)
True
>>> EntitySelector(f12).select_entity(e, c)
False
"""
def __init__(self, selector, fieldcomp=None):
self._fieldcomp = fieldcomp
# Returns None if no filter is applied, otherwise a predcicate function
self._selector = self.compile_selector_filter(selector)
return
def filter(self, entities, context=None):
"""
Iterate over selection of entities from supplied iterator, using the
selection specification supplied to the constructor of the current object.
entities is an iterator over entities from which selection is made
context is a dictionary of context values that may be referenced by
the selector in choosing entities to be returned.
If no filtering is applied, the supplied iterator is returned as-is.
"""
if self._selector:
entities = self._filter(entities, context)
return entities
def _filter(self, entities, context):
"""
Internal helper applies selector to entity iterator, returning a new iterator.
"""
for e in entities:
if self._selector(e, context):
yield e
return
def select_entity(self, entity, context={}):
"""
Apply selector to an entity, and returns True if the entity is selected
"""
if self._selector:
return self._selector(entity, context)
return True
@classmethod #@@ @staticmethod, no cls?
def parse_selector(cls, selector):
"""
Parse a selector and return list of tokens
Selector formats:
ALL (or blank) match any entity
<val1> == <val2> values are same
<val1> in <val2> second value is list containing 1st value,
or values are same, or val1 is None.
<val1> <name> <val2> invoke comparison method from supplied
FieldComparison object
<val1> and <val2> may be:
[<field-id>] refers to field in entity under test
<name>[<field-id>] refers to field of context value, or None if the
indicated context value or field is not defined.
"<string>" literal string value. Quotes within are escaped.
<field_id> values are URIs or CURIEs, using characters defined by RFC3986,
except "[" and "]"
RFC3986:
unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
reserved = gen-delims / sub-delims
gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
/ "*" / "+" / "," / ";" / "="
Parser uses pyparsing combinators (cf. http://pyparsing.wikispaces.com).
"""
def get_value(val_list):
if len(val_list) == 1:
return { 'type': 'literal', 'name': None, 'field_id': None, 'value': val_list[0] }
elif val_list[0] == '[':
return { 'type': 'entity', 'name': None, 'field_id': val_list[1], 'value': None }
elif val_list[1] == '[':
return { 'type': 'context', 'name': val_list[0], 'field_id': val_list[2], 'value': None }
else:
return { 'type': 'unknown', 'name': None, 'field_id': None, 'value': None }
p_name = Word(alphas+"_", alphanums+"_")
p_id = Word(alphas+"_@", alphanums+"_-.~:/?#@!$&'()*+,;=)")
p_val = ( Group( Literal("[") + p_id + Literal("]") )
| Group( p_name + Literal("[") + p_id + Literal("]") )
| Group( QuotedString('"', "\\") )
| Group( QuotedString("'", "\\") )
| Group( p_id )
)
p_comp = ( Literal("==") | Literal("in") | p_name )
p_selector = ( p_val + p_comp + p_val + StringEnd() )
try:
resultlist = p_selector.parseString(selector).asList()
except ParseException:
return None
resultdict = {}
if resultlist:
resultdict['val1'] = get_value(resultlist[0])
resultdict['comp'] = resultlist[1]
resultdict['val2'] = get_value(resultlist[2])
return resultdict
def compile_selector_filter(self, selector):
"""
Return filter for for testing entities matching a supplied selector.
Returns None if no selection is performed; i.e. all possible entities are selected.
Selector formats: see `parse_selector` above.
This function returns a filter function compiled from the supplied selector.
"""
def get_entity(field_id):
"Get field from entity tested by filter"
def get_entity_f(e, c):
return e.get(field_id, None)
return get_entity_f
#
def get_context(name, field_id):
"Get field from named value in current display context"
def get_context_f(e, c):
if name in c and c[name]:
return c[name].get(field_id, None)
return None
return get_context_f
#
def get_literal(value):
"Get literal value specified directly in selector string"
def get_literal_f(e, c):
return value
return get_literal_f
#
def get_val_f(selval):
if selval['type'] == "entity":
return get_entity(selval['field_id'])
elif selval['type'] == "context":
return get_context(selval['name'], selval['field_id'])
elif selval['type'] == "literal":
return get_literal(selval['value'])
else:
msg = "Unrecognized value type from selector (%s)"%selval['type']
raise ValueError(msg)
assert False, "Unrecognized value type from selector"
#
def match_eq(v1f, v2f):
def match_eq_f(e, c):
return v1f(e, c) == v2f(e, c)
return match_eq_f
#
def match_in(v1f, v2f):
def match_in_f(e, c):
v1 = v1f(e, c)
if not v1: return True
v2 = v2f(e, c)
if isinstance(v2, list):
return v1 in v2
return v1 == v2
return match_in_f
#
def match_subtype(v1f, v2f):
def match_subtype_f(e, c):
return self._fieldcomp.subtype(v1f(e, c), v2f(e, c))
return match_subtype_f
#
if selector in {None, "", "ALL"}:
return None
sel = self.parse_selector(selector)
if not sel:
msg = "Unrecognized selector syntax (%s)"%selector
raise ValueError(msg)
v1f = get_val_f(sel['val1'])
v2f = get_val_f(sel['val2'])
if sel['comp'] == "==":
return match_eq(v1f, v2f)
if sel['comp'] == "in":
return match_in(v1f, v2f)
if sel['comp'] == "subtype":
return match_subtype(v1f, v2f)
# Drop through: raise error
msg = "Unrecognized entity selector (%s)"%selector
raise ValueError(msg)
# -------------------------------------------------------------------
# FieldComparison
# -------------------------------------------------------------------
class FieldComparison(object):
"""
Logic for comparing fields using additional context information not available
directly to 'EntitySelector'
"""
def __init__(self, coll):
super(FieldComparison, self).__init__()
self._coll = coll
self._site = coll.get_site()
return
def get_uri_type_info(self, type_uri):
"""
Return typeinfo corresponding to the supplied type URI
"""
t = self._coll.get_uri_type(type_uri)
return t and EntityTypeInfo(self._coll, t.get_id())
def subtype(self, type1_uri, type2_uri):
"""
Returns True if the first type is a subtype of the second type, where both
types are supplied as type URIs. Returns True if both URIs are the same.
If type1_uri is not specified, assume no restriction.
If type2_uri is not specified, assume it does not satisfy the restriction.
"""
# log.info("FieldComparison.subtype(%s, %s)"%(type1_uri, type2_uri))
if not type2_uri or (type1_uri == type2_uri):
return True
if not type1_uri:
return False
type1_info = self.get_uri_type_info(type1_uri)
type1_supertype_uris = (type1_info and type1_info.get_all_type_uris()) or []
# log.info("FieldComparison.subtype: type1_uris (supertypes) %r"%(type1_uris,))
return type2_uri in type1_supertype_uris
if __name__ == "__main__":
import doctest
doctest.testmod()
# End.
| 38.441392 | 112 | 0.554529 | 18,885 | 0.899757 | 4,125 | 0.196532 | 4,784 | 0.227929 | 0 | 0 | 10,260 | 0.488827 |
aa8d4e3a21127f714c7d16f0c3d1dca3b4a21610 | 3,029 | py | Python | screengrab.py | denosawr/fairdyne-ai | cd275ecbf12d239fd2705090a7632174e6f2a8a7 | [
"MIT"
] | null | null | null | screengrab.py | denosawr/fairdyne-ai | cd275ecbf12d239fd2705090a7632174e6f2a8a7 | [
"MIT"
] | null | null | null | screengrab.py | denosawr/fairdyne-ai | cd275ecbf12d239fd2705090a7632174e6f2a8a7 | [
"MIT"
] | null | null | null | from mss import mss
from PIL import Image
def screengrab(monitor=0, output="screenshot.png"):
""" Uses MSS to capture a screenshot quickly. """
sct = mss()
monitors = sct.enum_display_monitors()
scale = 1
game_x = 300*scale
game_y = 300*scale
mon_x = monitors[monitor]["left"]
mon_y = monitors[monitor]["top"]
size_x = monitors[monitor]["width"]
size_y = monitors[monitor]["height"]
x = int(mon_x + (size_x - game_x)/2)
y = int(mon_y + 220*scale)
mon = {'top': y, 'left': x, 'width': game_x, 'height': game_y}
sct.to_png(data=sct.get_pixels(mon), output=output)
def findheart(image="screenshot.png"):
""" Finds the heart. """
image_data = Image.open(image)
width = image_data.size[0]
heart = list()
arrow_blue = (0, 255, 0)
arrow_yellow = (255, 223, 25)
for count, i in enumerate(image_data.getdata()):
if i == (0, 0, 0):
continue
elif i == arrow_blue or i == arrow_yellow:
x = count % width
y = int(count/width)
heart.append([x, y])
if not heart:
return
sh = len(heart)
cx = int(sum([x[0] for x in heart])/sh)
cy = int(sum([y[1] for y in heart])/sh)
return cx, cy
def monitors():
m = mss()
return(m.enum_display_monitors())
def getsize(image="screenshot.png"):
""" Returns the size of the image. """
image_data = Image.open(image)
return image_data.size[0], image_data.size[1]
def findarrows(image="screenshot.png"):
""" Finds arrows in the specified image, by finding the closest pixel of a certain color. """
try:
image_data = Image.open(image)
width = image_data.size[0]
height = image_data.size[1]
matches = list()
heart = list()
for count, i in enumerate(image_data.getdata()):
if i == (0, 0, 0):
continue
elif i == (47, 208, 255):
count += 1
x = count % width
y = int(count/width)
matches.append([x, y])
elif i == (0, 255, 0):
x = count % width
y = int(count/width)
heart.append([x, y])
sh = len(heart)
cx = int(sum([x[0] for x in heart])/sh)
cy = int(sum([y[1] for y in heart])/sh)
max_match = (0, 0, width)
x_dist = width//40
x_s = width//2 - x_dist
x_b = width//2 + x_dist
y_dist = height//40
y_s = height//2 - y_dist
y_b = height//2 + y_dist
for i in matches:
if y_s < i[1] < y_b:
if (abs(cx-i[0])) < max_match[2]:
max_match = (i[0], i[1], abs(cx-i[0]))
elif x_s < i[0] < x_b:
if (abs(cy-i[1])) < max_match[2]:
max_match = (i[0], i[1], abs(cy-i[1]))
return max_match
except ZeroDivisionError:
return None
if __name__ == "__main__":
print(findarrows(image="screenshot.png"))
| 28.308411 | 97 | 0.528557 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 346 | 0.114229 |
aa8f41fdb7d2b4f91adbdaae406e59a5680747b1 | 3,829 | py | Python | camper/handlers/users/edit.py | mrtopf/camper | 7016539f92202bbea608c6d53ce19097d4ad931d | [
"MIT"
] | 13 | 2016-03-13T02:33:39.000Z | 2021-04-01T13:09:12.000Z | camper/handlers/users/edit.py | comlounge/camper | 7016539f92202bbea608c6d53ce19097d4ad931d | [
"MIT"
] | 122 | 2016-03-10T09:28:09.000Z | 2021-09-07T23:49:05.000Z | camper/handlers/users/edit.py | mrtopf/camper | 7016539f92202bbea608c6d53ce19097d4ad931d | [
"MIT"
] | 5 | 2017-01-11T22:00:57.000Z | 2020-04-26T14:03:32.000Z | #encoding=utf8
from starflyer import Handler, redirect, asjson
from camper import BaseForm, db, BaseHandler
from camper import logged_in, is_admin
from wtforms import *
from sfext.babel import T
from camper.handlers.forms import *
import werkzeug.exceptions
from bson import ObjectId
from camper.handlers.images import AssetUploadView
class ProfileImageAssetUploadView(AssetUploadView):
"""custom upload handler for different version"""
variant = "medium_user"
class EditForm(BaseForm):
"""form for adding a barcamp"""
user_id = HiddenField()
fullname = TextField(T(u"Fullname"))
username = TextField(T(u"url name (username)"), [validators.Length(min=3, max=50), validators.Required(), validators.Regexp('^[a-zA-Z0-9_]+$')], description=T("this is the url path of your profile page, should only contain letters and numbers"))
bio = TextAreaField(T(u"About me"))
organisation = TextField(T(u"Organization"), [validators.Length(max=100)], description = T("your school, company, institution (max. 100 characters)"))
twitter = TextField(T(u"Twitter"), [validators.Length(max=100)], description = T("your twitter username"))
facebook = TextField(T(u"Facebook"), [validators.Length(max=255)], description = T("path to your facebook profile (without domain)"))
image = UploadField(T(u"Profile Image (optional)"))
# TODO: maybe change email, too?
def validate_email(form, field):
if form.app.module_map.userbase.users.find({'email' : field.data}).count() > 0:
raise ValidationError(form.handler._('this email address is already taken'))
def validate_username(form, field):
if form.app.module_map.userbase.users.find({'username' : field.data, '_id' : {'$ne': ObjectId(form.data['user_id'])}}).count() > 0:
raise ValidationError(form.handler._('this url path is already taken'))
class ProfileEditView(BaseHandler):
"""shows the profile edit form"""
template = "users/edit.html"
@logged_in()
def get(self):
"""render the view"""
form = EditForm(self.request.form, obj = self.user, config = self.config, app = self.app, handler = self)
if self.user.image:
try:
asset = self.app.module_map.uploader.get(self.user.image)
image = self.url_for("asset", asset_id = asset.variants['medium_user']._id)
except:
image = None
else:
image = None
if self.request.method=="POST":
if form.validate():
self.user.update(form.data)
self.user.save()
self.flash(self._("Your profile has been updated"), category="info")
url = self.url_for("profile", username = self.user.username)
return redirect(url)
else:
self.flash(self._("There have been errors in the form"), category="danger")
return self.render(form = form, user = self.user, image = image)
post = get
class ProfileImageDeleteView(BaseHandler):
"""delete the profile image"""
@asjson()
def json(self, d):
return d
@logged_in()
def delete(self):
"""delete the profile image and return to the profile page"""
asset_id = self.user.image
if asset_id is not None:
asset = self.app.module_map.uploader.remove(asset_id)
self.user.image = None
self.user.save()
self.flash(self._("Your profile image has been deleted"), category="info")
fmt = self.request.form.get("fmt", "html")
if fmt=="html":
url = self.url_for("profile", username = self.user.username)
return redirect(url)
else:
return self.json({"status": "ok"})
| 41.619565 | 254 | 0.630713 | 3,484 | 0.909898 | 0 | 0 | 1,685 | 0.440063 | 0 | 0 | 939 | 0.245234 |
aa91f44f3777df08b95fda1ce748bac56394b3f3 | 4,210 | py | Python | scripts/citation_extractor/citation_extractor.py | elainehoml/Savu | e4772704606f71d6803d832084e10faa585e7358 | [
"Apache-2.0"
] | 39 | 2015-03-30T14:03:42.000Z | 2022-03-16T16:50:33.000Z | scripts/citation_extractor/citation_extractor.py | elainehoml/Savu | e4772704606f71d6803d832084e10faa585e7358 | [
"Apache-2.0"
] | 670 | 2015-02-11T11:08:09.000Z | 2022-03-21T09:27:57.000Z | scripts/citation_extractor/citation_extractor.py | elainehoml/Savu | e4772704606f71d6803d832084e10faa585e7358 | [
"Apache-2.0"
] | 54 | 2015-02-13T14:09:52.000Z | 2022-01-24T13:57:09.000Z | import argparse
import h5py
import sys
import os
from savu.version import __version__
class NXcitation(object):
def __init__(self, description, doi, endnote, bibtex):
self.description = description.decode('UTF-8')
self.doi = doi.decode('UTF-8')
self.endnote = endnote.decode('UTF-8')
self.bibtex = bibtex.decode('UTF-8')
def get_bibtex_ref(self):
return self.bibtex.split(',')[0].split('{')[1] \
if self.bibtex else ""
def get_first_author(self):
parts = self.endnote.split('\n')
for part in parts:
if part.startswith("%A"):
return part.replace("%A", "").strip()
def get_date(self):
parts = self.endnote.split('\n')
for part in parts:
if part.startswith("%D"):
return part.replace("%D", "").strip()
def get_description_with_author(self):
return "%s \\ref{%s}(%s, %s)" % (self.description,
self.get_bibtex_ref(),
self.get_first_author(),
self.get_date())
class NXcitation_manager(object):
def __init__(self):
self.NXcite_list = []
def add_citation(self, citation):
self.NXcite_list.append(citation)
def get_full_endnote(self):
return "\n\n".join([cite.endnote for cite in self.NXcite_list])
def get_full_bibtex(self):
return "\n".join([cite.bibtex for cite in self.NXcite_list])
def get_description_with_citations(self):
return ". ".join([cite.get_description_with_author() for cite in
self.NXcite_list])
def __str__(self):
return "\nDESCRIPTION\n%s\n\nBIBTEX\n%s\n\nENDNOTE\n%s" % \
(self.get_description_with_citations(), self.get_full_bibtex(),
self.get_full_endnote())
class NXciteVisitor(object):
def __init__(self):
self.citation_manager = NXcitation_manager()
def _visit_NXcite(self, name, obj):
if "NX_class" in list(obj.attrs.keys()):
if obj.attrs["NX_class"] in ["NXcite"]:
citation = NXcitation(obj['description'][0],
obj['doi'][0],
obj['endnote'][0],
obj['bibtex'][0])
self.citation_manager.add_citation(citation)
def get_citation_manager(self, nx_file, entry):
nx_file[entry].visititems(self._visit_NXcite)
return self.citation_manager
def __check_input_params(args):
""" Check for required input arguments.
"""
if len(args) != 2:
print("Input and output filename need to be specified")
print("Exiting with error code 1 - incorrect number of inputs")
sys.exit(1)
if not os.path.exists(args[0]):
print(("Input file '%s' does not exist" % args[0]))
print("Exiting with error code 2 - Input file missing")
sys.exit(2)
def __option_parser(doc=True):
""" Option parser for command line arguments.
"""
version = "%(prog)s " + __version__
parser = argparse.ArgumentParser()
parser.add_argument('in_file', help='Input data file.')
parser.add_argument('out_file', help='Output file to extract citation \
information to.')
parser.add_argument('--version', action='version', version=version)
return parser if doc==True else parser.parse_args()
def main(in_file=None, quiet=False):
# when calling directly from tomo_recon.py
if in_file:
log_folder = os.path.join(os.path.dirname(in_file),"run_log")
out_file = os.path.join(log_folder, "citations.txt")
else:
args = __option_parser(doc=False)
in_file = args.in_file
out_file = args.out_file
infile = h5py.File(in_file, 'r')
citation_manager = NXciteVisitor().get_citation_manager(infile, "/")
if citation_manager is not None:
with open(out_file, 'w') as outfile:
outfile.write(citation_manager.__str__())
if not quiet:
print("Extraction complete")
if __name__ == '__main__':
main()
| 32.890625 | 75 | 0.592399 | 2,489 | 0.591211 | 0 | 0 | 0 | 0 | 0 | 0 | 745 | 0.17696 |
aa93c24856d615e9328af105c675a5a9cd2f9c75 | 25,124 | py | Python | src/predict_ball_pos/src/predict_ball_position.py | diddytpq/Predict-Tennisball-LandingPoint | 0ae4a9ff45fd4dd82b4b4e3cc2533e7fd5d1506a | [
"MIT"
] | null | null | null | src/predict_ball_pos/src/predict_ball_position.py | diddytpq/Predict-Tennisball-LandingPoint | 0ae4a9ff45fd4dd82b4b4e3cc2533e7fd5d1506a | [
"MIT"
] | null | null | null | src/predict_ball_pos/src/predict_ball_position.py | diddytpq/Predict-Tennisball-LandingPoint | 0ae4a9ff45fd4dd82b4b4e3cc2533e7fd5d1506a | [
"MIT"
] | null | null | null | #! /home/drcl_yang/anaconda3/envs/py36/bin/python
from pathlib import Path
import sys
FILE = Path(__file__).absolute()
sys.path.append(FILE.parents[0].as_posix()) # add code to path
path = str(FILE.parents[0])
import numpy as np
from sympy import Symbol, solve
import time
import roslib
import rospy
from std_msgs.msg import String, Float64, Float64MultiArray
from gazebo_msgs.srv import *
from geometry_msgs.msg import *
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import cv2
import torch
import torch.backends.cudnn as cudnn
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import check_img_size, check_requirements, check_imshow, colorstr, non_max_suppression, \
apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box
from utils.plots import colors, plot_one_box
from utils.torch_utils import select_device, load_classifier, time_synchronized
from utils.augmentations import letterbox
roslib.load_manifest('ball_trajectory')
# ball_tracking setup
fgbg = cv2.createBackgroundSubtractorMOG2(100, 16, False)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
kernel_dilation_2 = cv2.getStructuringElement(cv2.MORPH_RECT,(5,5))
kernel_erosion_1 = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
# yolov5 setup
conf_thres = 0.25
iou_thres=0.45
classes = None # filter by class: --class 0, or --class 0 2 3
agnostic_nms = False # class-agnostic NMS
max_det = 2 # maximum detections per image
hide_labels=False, # hide labels
hide_conf=False, # hide confidences
line_thickness=3, # bounding box thickness (pixels)
set_logging()
device = select_device(0)
weights = path + '/weights/best.pt'
img_size = 640
model = attempt_load(weights, map_location=device) # load FP32 model
stride = int(model.stride.max()) # model stride
imgsz = check_img_size(img_size, s=stride) # check image size
names = model.module.names if hasattr(model, 'module') else model.names # get class names
# draw graph setup
point_image = np.zeros([640,640,3], np.uint8) + 255
trajectroy_image = np.zeros([640,640,3], np.uint8) + 255
tennis_court_img = cv2.imread(path + "/images/tennis_court.png")
tennis_court_img = cv2.resize(tennis_court_img,(0,0), fx=2, fy=2, interpolation = cv2.INTER_AREA)
real_ball_trajectory_list = []
estimation_ball_trajectory_list = []
esti_ball_landing_point_list = []
save_flag = 0
disappear_cnt = 0
time_list = []
ball_val_list = []
real_ball_val_list = []
esti_ball_val_list = []
a = []
b = []
#kalman filter setup
color = tuple(np.random.randint(low=75, high = 255, size = 3).tolist())
class Image_converter:
def __init__(self):
self.bridge = CvBridge()
self.landingpoint = [0, 0]
rospy.init_node('Image_converter', anonymous=True)
#send topic to landing point check.py
self.pub = rospy.Publisher('/esti_landing_point',Float64MultiArray, queue_size = 10)
self.array2data = Float64MultiArray()
rospy.Subscriber("/camera_right_0_ir/camera_right_0/color/image_raw",Image,self.callback_right_0)
rospy.Subscriber("/camera_left_0_ir/camera_left_0/color/image_raw",Image,self.callback_left_0)
rospy.Subscriber("/camera_left_top_ir/camera_left_top_ir/color/image_raw", Image, self.callback_left_top_ir)
rospy.Subscriber("/camera_right_1_ir/camera_right_1/color/image_raw",Image,self.main)
def callback_left_top_ir(self, data):
try:
self.t0 = time.time()
self.left_top_data_0 = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
def callback_left_0(self, data):
try:
self.t0 = time.time()
self.left_data_0 = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
def callback_right_0(self, data):
try:
self.right_data_0 = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
def ball_tracking(self, image):
self.ball_cand_box = []
image_ori = image.copy()
self.blur = cv2.GaussianBlur(image_ori, (13, 13), 0)
self.fgmask_1 = fgbg.apply(self.blur, None, 0.01)
#self.fgmask_erode = cv2.erode(self.fgmask_1, kernel_erosion_1, iterations = 1) #오픈 연산이아니라 침식으로 바꾸자
self.fgmask_dila = cv2.dilate(self.fgmask_1,kernel_dilation_2,iterations = 1)
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(self.fgmask_dila, connectivity = 8)
for i in range(len(stats)):
x, y, w, h, area = stats[i]
if area > 3000 : # or area < 500 or aspect > 1.2 or aspect < 0.97 :
continue
cv2.rectangle(image_ori, (x, y), (x + w, y + h), (255,0,0), 3)
x0, y0, x1, y1 = x, y, x+w, y+h
self.ball_cand_box.append([x0, y0, x1, y1 ])
return image_ori
def robot_tracking(self, image):
self.robot_box = []
image_ori = image.copy()
img = letterbox(image_ori, imgsz, stride=stride)[0]
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
img_in = torch.from_numpy(img).to(device)
img_in = img_in.float()
img_in /= 255.0
if img_in.ndimension() == 3:
img_in = img_in.unsqueeze(0)
pred = model(img_in, augment=False)[0]
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
for i, det in enumerate(pred): # detections per image
im0 = image_ori.copy()
if len(det):
det[:, :4] = scale_coords(img_in.shape[2:], det[:, :4], im0.shape).round()
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s = f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
for *xyxy, conf, cls in reversed(det):
c = int(cls) # integer class
label = names[c] #None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_thickness=3)
x0, y0, x1, y1 = int(xyxy[0]), int(xyxy[1]), int(xyxy[2]), int(xyxy[3])
self.robot_box.append([x0, y0, x1, y1])
return im0
def check_iou(self, robot_box, ball_cand_box):
no_ball_box = []
centroid_ball = []
if len(robot_box) < 1:
self.ball_box = ball_cand_box
return 0
for i in range(len(robot_box)):
for j in range(len(ball_cand_box)):
if self.iou(robot_box[i], ball_cand_box[j]):
no_ball_box.append(ball_cand_box[j])
for i in no_ball_box:
del ball_cand_box[ball_cand_box.index(i)]
self.ball_box = ball_cand_box
def iou(self,box_0, box_1):
b0x_0, b0y_0, b0x_1 ,b0y_1 = box_0
b1x_0, b1y_0, b1x_1 ,b1y_1 = box_1
min_x = np.argmin([b0x_0,b1x_0])
min_y = np.argmin([b0y_0,b1y_0])
if min_x == 0 and min_y == 0:
if ((b0x_0 <= b1x_0 <= b0x_1) or (b0x_0 <= b1x_1 <= b0x_1)) and ((b0y_0 <= b1y_0 <= b0y_1) or (b0y_0 <= b1y_1 <= b0y_1)):
return True
if min_x == 0 and min_y == 1:
if ((b0x_0 <= b1x_0 <= b0x_1) or (b0x_0 <= b1x_1 <= b0x_1)) and ((b1y_0 <= b0y_0 <= b1y_1) or (b1y_0 <= b0y_1 <= b1y_1)):
return True
if min_x == 1 and min_y == 0:
if ((b1x_0 <= b0x_0 <= b1x_1) or (b1x_0 <= b0x_1 <= b1x_1)) and ((b0y_0 <= b1y_0 <= b0y_1) or (b0y_0 <= b1y_1 <= b0y_1)):
return True
if min_x == 1 and min_y == 1:
if ((b1x_0 <= b0x_0 <= b1x_1) or (b1x_0 <= b0x_1 <= b1x_1) ) and ((b1y_0 <= b0y_0 <= b1y_1) or (b1y_0 <= b0y_1 <= b1y_1) ):
return True
return False
def get_depth_height(self, L_pos, R_pos):
cx = 320
cy = 160
focal_length = 343.159
x_L, y_L = L_pos[0] - cx, L_pos[1] - cy
x_R, y_R = R_pos[0] - cx, R_pos[1] - cy
c_L = np.sqrt(focal_length ** 2 + x_L ** 2 + y_L ** 2)
a_L = np.sqrt(focal_length ** 2 + x_L ** 2)
if x_L < 0:
th_L = 0.785398 + np.arccos(focal_length / a_L)
else :
th_L = 0.785398 - np.arccos(focal_length / a_L)
b_L = a_L * np.cos(th_L)
c_R = np.sqrt(focal_length ** 2 + x_R ** 2 + y_R ** 2)
a_R = np.sqrt(focal_length ** 2 + x_R ** 2)
if x_R > 0:
th_R = 0.785398 + np.arccos(focal_length / a_R)
else :
th_R = 0.785398 - np.arccos(focal_length / a_R)
b_R = a_R * np.cos(th_R)
self.theta_L = np.arccos(b_L/c_L)
self.theta_R = np.arccos(b_R/c_R)
D_L = 12.8 * np.sin(self.theta_R) / np.sin(3.14 - (self.theta_L + self.theta_R))
D_R = 12.8 * np.sin(self.theta_L) / np.sin(3.14 - (self.theta_L + self.theta_R))
height_L = abs(D_L * np.sin(np.arcsin(y_L/c_L)))
height_R = abs(D_R * np.sin(np.arcsin(y_R/c_R)))
#height_L = abs(D_L * np.sin(np.arctan(y_L/a_L)))
#height_R = abs(D_R * np.sin(np.arctan(y_R/a_R)))
if y_L < 0:
height_L += 1
else:
height_L -= 1
if y_R < 0:
height_R += 1
else:
height_R -= 1
return D_L, D_R, height_L, height_R
def cal_ball_position(self, ball_height_list, ball_distance_list):
height = sum(ball_height_list) / 2 - 1
if sum(ball_distance_list) < 13:
return [np.nan, np.nan, np.nan]
ball2net_length_x_L = ball_distance_list[0] * np.sin(self.theta_L)
ball_position_y_L = ball_distance_list[0] * np.cos(self.theta_L)
ball_plate_angle_L = np.arcsin(height / ball2net_length_x_L)
ball_position_x_L = ball2net_length_x_L * np.cos(ball_plate_angle_L)
ball2net_length_x_R = ball_distance_list[1] * np.sin(self.theta_R)
ball_position_y_R = ball_distance_list[1] * np.cos(self.theta_R)
ball_plate_angle_R = np.arcsin(height / ball2net_length_x_R)
ball_position_x_R = ball2net_length_x_R * np.cos(ball_plate_angle_R)
"""print("theta_L, theta_R : ", np.rad2deg(self.theta_L), np.rad2deg(self.theta_R))
print("ball_plate_angle_L, ball_plate_angle_R : ", np.rad2deg(ball_plate_angle_L), np.rad2deg(ball_plate_angle_R))
print([-ball_position_x_L, ball_position_y_L - 6.4, height + 1])
print([-ball_position_x_R, 6.4 - ball_position_y_R, height + 1])"""
if self.theta_L > self.theta_R:
ball_position_y = ball_position_y_L - 6.4
else :
ball_position_y = 6.4 - ball_position_y_R
return [-ball_position_x_L, ball_position_y, height + 1]
def draw_point_court(self, real_point_list, camera_predict_point_list, draw_landing_point = False):
real_pix_point_list = []
predict_pix_point_list = []
if np.isnan(camera_predict_point_list[0]):
return 0
x_pred = camera_predict_point_list[0]
y_pred = camera_predict_point_list[1]
print()
y_pix_length, x_pix_length = tennis_court_img.shape[0], tennis_court_img.shape[1]
x_meter2pix = 23.77 / x_pix_length
y_meter2pix = 10.97 / y_pix_length
real_pix_point_list.append(int(np.round((11.885 + real_point_list[0]) / x_meter2pix)))
real_pix_point_list.append(int(np.round((5.485 - real_point_list[1]) / y_meter2pix)))
predict_pix_point_list.append(int(np.round((11.885 + x_pred) / x_meter2pix)))
predict_pix_point_list.append(int(np.round((5.485 - y_pred) / y_meter2pix)))
real_pix_point_xy = real_pix_point_list[0:2]
predict_pix_point = predict_pix_point_list[0:2]
cv2.circle(tennis_court_img,real_pix_point_xy, 4, [0, 0, 255], -1)
cv2.circle(tennis_court_img,predict_pix_point, 4, [0, 255, 0], -1)
if draw_landing_point and (np.isnan(self.esti_ball_landing_point[0]) == False) and self.esti_ball_landing_point[0] > 0:
landing_point_list = []
landing_point_list.append(int(np.round((11.885 + self.esti_ball_landing_point[0]) / x_meter2pix)))
landing_point_list.append(int(np.round((5.485 - self.esti_ball_landing_point[1]) / y_meter2pix)))
landing_point = landing_point_list[0:2]
print("landing_point = ",self.esti_ball_landing_point)
cv2.circle(tennis_court_img,landing_point, 4, [0, 255, 255], -1)
def check_ball_seq(self, disappear_cnt):
global save_flag
if np.isnan(self.ball_camera_list[0]):
disappear_cnt += 1
if disappear_cnt == 5 :
if save_flag == 0 :
#print(esti_ball_landing_point_list)
save_flag = 1
#print("real_ball_trajectory_list = np.array(", real_ball_trajectory_list ,")")
#print("estimation_ball_trajectory_list = np.array(", estimation_ball_trajectory_list,")")
disappear_cnt = 0
real_ball_trajectory_list.clear()
estimation_ball_trajectory_list.clear()
esti_ball_val_list.clear()
esti_ball_landing_point_list.clear()
time_list.clear()
else:
disappear_cnt = 0
time_list.append(time.time())
real_ball_trajectory_list.append(self.real_ball_pos_list)
estimation_ball_trajectory_list.append([np.round(self.ball_camera_list[0],3), np.round(self.ball_camera_list[1],3), np.round(self.ball_camera_list[2],3)])
save_flag = 0
return disappear_cnt
def cal_ball_val(self):
if len(time_list) > 1 :
v0, v1 = np.array(estimation_ball_trajectory_list[-2]), np.array(estimation_ball_trajectory_list[-1])
dt = time_list[-1] - time_list[-2]
real_v0, real_v1 = np.array(real_ball_trajectory_list[-2]), np.array(real_ball_trajectory_list[-1])
return (v1 - v0)/dt , (real_v1 - real_v0)/dt
else:
return [np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan]
def get_ball_status(self):
self.g_get_state = rospy.ServiceProxy("/gazebo/get_model_state", GetModelState)
self.ball_state = self.g_get_state(model_name = 'ball_left')
self.ball_pose = Pose()
self.ball_pose.position.x = float(self.ball_state.pose.position.x)
self.ball_pose.position.y = float(self.ball_state.pose.position.y)
self.ball_pose.position.z = float(self.ball_state.pose.position.z)
self.ball_vel = Twist()
self.ball_vel.linear.x = float(self.ball_state.twist.linear.x)
self.ball_vel.linear.y = float(self.ball_state.twist.linear.y)
self.ball_vel.linear.z = float(self.ball_state.twist.linear.z)
self.ball_vel.angular.x = float(self.ball_state.twist.angular.x)
self.ball_vel.angular.y = float(self.ball_state.twist.angular.y)
self.ball_vel.angular.z = float(self.ball_state.twist.angular.z)
def cal_landing_point(self, pos):
t_list = []
vel = self.check_vel_noise()
x0, y0, z0 = pos[0], pos[1], pos[2]
vx, vy, vz = vel[0], vel[1], vel[2]
a = -((0.5 * 0.507 * 1.2041 * np.pi * (0.033 ** 2) * vz ** 2 ) / 0.057 + 9.8 / 2 )
b = vz
c = z0
t_list.append((-b + np.sqrt(b ** 2 - 4 * a * c))/(2 * a))
t_list.append((-b - np.sqrt(b ** 2 - 4 * a * c))/(2 * a))
t = max(t_list)
x = np.array(x0 + vx * t - (0.5 * 0.507 * 1.2041 * np.pi * (0.033 ** 2) * vx ** 2 ) * (t ** 2) / 0.057,float)
y = np.array(y0 + vy * t - (0.5 * 0.507 * 1.2041 * np.pi * (0.033 ** 2) * vy ** 2 ) * (t ** 2) / 0.057,float)
z = np.array(z0 + vz * t - ((0.5 * 0.507 * 1.2041 * np.pi * (0.033 ** 2) * vz ** 2 ) / 0.057 + 9.8 / 2) * (t ** 2),float)
return [np.round(x,3), np.round(y,3), np.round(z,3)]
def check_vel_noise(self):
y_vel_list = np.array(esti_ball_val_list)[:,1]
if len(y_vel_list) > 3 :
vel_mean = np.mean(y_vel_list)
if abs(abs(vel_mean) - abs(y_vel_list[-1])) > 2:
vel_mean = np.mean(y_vel_list[:-1])
esti_ball_val_list[-1][1] = vel_mean
return esti_ball_val_list[-1]
else:
return esti_ball_val_list[-1]
def main(self, data):
global point_image
global color
global tennis_court_img
global real_ball_trajectory_list
global estimation_ball_trajectory_list
global esti_ball_landing_point_list
global save_flag
global time_list
global disappear_cnt
(rows,cols,channels) = self.left_data_0.shape
self.ball_box = []
self.ball_height_list = [[0], [0]]
self.ball_centroid_list = [[0, 0],[0, 0]]
self.ball_distance_list = [[0],[0]]
self.ball_depth_list = [[0],[0]]
self.esti_ball_val = [np.nan, np.nan, np.nan]
self.esti_ball_landing_point = [np.nan, np.nan, np.nan]
self.get_ball_status()
self.ball_camera_list = [np.nan, np.nan, np.nan]
if cols > 60 and rows > 60 :
t1 = time.time()
self.real_ball_pos_list = [np.round(self.ball_pose.position.x,3), np.round(self.ball_pose.position.y,3), np.round(self.ball_pose.position.z,3)]
self.left_top_frame = cv2.resize(self.left_top_data_0,(640,640),interpolation = cv2.INTER_AREA)
self.left_frame = cv2.vconcat([self.left_data_0,self.right_data_0])
self.main_frame = cv2.hconcat([self.left_frame, self.left_top_frame])
ball_detect_img = self.main_frame.copy()
robot_detect_img = self.main_frame.copy()
robot_detect_img = self.robot_tracking(self.left_frame.copy()) #get robot bbox
self.ball_tracking(self.left_frame.copy()) #get ball cand bbox list
if self.ball_cand_box:
self.check_iou(self.robot_box, self.ball_cand_box) # get ball bbox list
if self.ball_box: #draw ball bbox and trajectory and predict ball pos
for i in range(len(self.ball_box)):
x0, y0, x1, y1 = self.ball_box[i]
ball_x_pos, ball_y_pos = int((x0 + x1)/2), int((y0 +y1)/2)
cv2.rectangle(ball_detect_img, (x0, y0), (x1, y1), color, 3)
cv2.circle(point_image,(ball_x_pos, ball_y_pos), 4, color, -1)
#predict ball pos
#ball_depth = self.get_depth(x0, y0, x1, y1)
if ball_x_pos < 640:
if ball_y_pos < 320:
self.ball_centroid_list[0] = [ball_x_pos, ball_y_pos]
else:
self.ball_centroid_list[1] = [ball_x_pos, ball_y_pos - 320]
self.ball_distance_list[0], self.ball_distance_list[1], self.ball_height_list[0], self.ball_height_list[1] = self.get_depth_height(self.ball_centroid_list[0], self.ball_centroid_list[1])
if min(self.ball_centroid_list) > [0, 0]:
self.ball_camera_list = self.cal_ball_position(self.ball_height_list, self.ball_distance_list)
if np.isnan(self.ball_camera_list[0]) == False:
self.ball_camera_list[0] = self.ball_camera_list[0] + 0.3
"""print("------------------------------------------------------------------")
print("real_distance : ", np.round(np.sqrt(self.real_ball_pos_list[0] **2 + (self.real_ball_pos_list[1] - (-6.4)) ** 2 + (self.real_ball_pos_list[2] - 1) ** 2), 3),
np.round(np.sqrt(self.real_ball_pos_list[0] **2 + (self.real_ball_pos_list[1] - (6.4)) ** 2 + (self.real_ball_pos_list[2] - 1) ** 2), 3))
print("distance : ", np.round(self.ball_distance_list[0], 3), np.round(self.ball_distance_list[1], 3))
print("real_ball_pos = [{}, {}, {}]".format(self.real_ball_pos_list[0], self.real_ball_pos_list[1], self.real_ball_pos_list[2]))
print("camera_preadict_pos = " ,[np.round(self.ball_camera_list[0],3), np.round(self.ball_camera_list[1],3), np.round(self.ball_camera_list[2],3)])
"""
#a.append([np.round(np.sqrt(self.real_ball_pos_list[0] **2 + (self.real_ball_pos_list[1] - (-6.4)) ** 2 + (self.real_ball_pos_list[2] - 1) ** 2), 3),
# np.round(np.sqrt(self.real_ball_pos_list[0] **2 + (self.real_ball_pos_list[1] - (6.4)) ** 2 + (self.real_ball_pos_list[2] - 1) ** 2), 3)])
#b.append([np.round(self.ball_distance_list[0], 3), np.round(self.ball_distance_list[1], 3)])
#print("real_distance = np.array(",a,")")
#print("distance = np.array(",b,")")
disappear_cnt = self.check_ball_seq(disappear_cnt)
self.esti_ball_val, self.real_ball_val = self.cal_ball_val()
if np.isnan(self.ball_camera_list[0]) == False and np.isnan(self.esti_ball_val[0]) == False:
#print("ball_val = " ,[np.round(self.ball_vel.linear.x,3), np.round(self.ball_vel.linear.y,3), np.round(self.ball_vel.linear.z,3)])
#print("real_ball_val = " ,[self.real_ball_val[0], self.real_ball_val[1], self.real_ball_val[2]])
#print("esti_ball_val = " ,[self.esti_ball_val[0], self.esti_ball_val[1], self.esti_ball_val[2]])
#ball_val_list.append([np.round(self.ball_vel.linear.x,3), np.round(self.ball_vel.linear.y,3), np.round(self.ball_vel.linear.z,3)])
#real_ball_val_list.append([self.real_ball_val[0], self.real_ball_val[1], self.real_ball_val[2]])
esti_ball_val_list.append([self.esti_ball_val[0], self.esti_ball_val[1], self.esti_ball_val[2]])
"""
print("ball_val_list = np.array(", ball_val_list , ')')
print("real_ball_val_list = np.array(", real_ball_val_list , ')')
print("esti_ball_val_list = np.array(", esti_ball_val_list , ')')"""
self.esti_ball_landing_point = self.cal_landing_point(self.ball_camera_list)
esti_ball_landing_point_list.append(self.esti_ball_landing_point[:2])
if self.esti_ball_landing_point:
#print("-----------------------")
if self.esti_ball_landing_point[0] > 0:
print("send meg : ", self.esti_ball_landing_point)
self.array2data.data = self.esti_ball_landing_point
self.pub.publish(self.array2data)
#print("esti_ball_landing_point : ",self.esti_ball_landing_point)
self.draw_point_court(self.real_ball_pos_list, self.ball_camera_list, draw_landing_point = True)
#trajectroy_image = cv2.hconcat([point_image[:320,:640,:],point_image[320:,:640,:]])
t2 = time.time()
#cv2.imshow("left_frame", self.left_frame)
#cv2.imshow("main_depth_0", self.main_depth_frame)
#cv2.imshow("image_robot_tracking", robot_detect_img)
cv2.imshow("ball_detect_img", ball_detect_img)
cv2.imshow("tennis_court", tennis_court_img)
#cv2.imshow("trajectroy_image", trajectroy_image)
print("FPS :",1/(t2-t1))
key = cv2.waitKey(1)
if key == 27 :
cv2.destroyAllWindows()
if key == ord("c") :
tennis_court_img = cv2.imread(path + "/images/tennis_court.png")
tennis_court_img = cv2.resize(tennis_court_img,(0,0), fx=2, fy=2, interpolation = cv2.INTER_AREA)
def main(args):
# srv_delete_model = rospy.ServiceProxy('gazebo/delete_model', DeleteModel)
# res = srv_delete_model("ball_left")
ic = Image_converter()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv) | 35.942775 | 202 | 0.589715 | 22,123 | 0.879502 | 0 | 0 | 0 | 0 | 0 | 0 | 4,791 | 0.190467 |
aa95b0a5becffa05a70a8eaa0b843668b5455d7e | 14,697 | py | Python | methylize/genome_browser.py | FoxoTech/methylize | 383d7beebfd62d858c4fb7248e027f8faa2862dc | [
"MIT"
] | 2 | 2021-12-27T22:46:36.000Z | 2022-03-08T17:13:29.000Z | methylize/genome_browser.py | FoxoTech/methylize | 383d7beebfd62d858c4fb7248e027f8faa2862dc | [
"MIT"
] | 4 | 2021-07-15T18:43:56.000Z | 2022-03-09T21:29:55.000Z | methylize/genome_browser.py | FoxoTech/methylize | 383d7beebfd62d858c4fb7248e027f8faa2862dc | [
"MIT"
] | 1 | 2022-03-07T06:02:41.000Z | 2022-03-07T06:02:41.000Z | import time
import pymysql # for pulling UCSC data
import pandas as pd
from pathlib import Path
import logging
# app
from .progress_bar import * # tqdm, context-friendly
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
logging.getLogger('numexpr').setLevel(logging.WARNING)
# these login stats for the public database should not change.
HOST = 'genome-mysql.soe.ucsc.edu'
USER = 'genome'
DB = 'hg38'
# cpg related table schema: http://genome.ucsc.edu/cgi-bin/hgTables?db=hg38&hgta_group=regulation&hgta_track=cpgIslandExt&hgta_table=cpgIslandExt&hgta_doSchema=describe+table+schema
possible_tables = [
'refGene', # cruzdb used this in examples -- 88,819 genes
'knownGene', # 232,184 -- genes and pseudo genes too (use TranscriptType == 'coding_protein')
'ncbiRefSeq', # 173,733 genes -- won't have matching descriptions; no kgXref shared key.
# 'wgEncodeGencodeBasicV38', # 177k genes -- doesn't work
]
table_mapper = {
'txStart': 'chromStart', # knownGene transcription start, refGene start, ncbiRefSeq start
'txEnd': 'chromStart',
}
conn = None
def fetch_genes(dmr_regions_file=None, tol=250, ref=None, tissue=None, sql=None,
save=True, verbose=False, use_cached=True, no_sync=False, genome_build=None,
host=HOST, user=USER, password='', db=DB):
"""find genes that are adjacent to significantly different CpG regions provided.
Summary:
fetch_genes() annotates the DMR region output file, using the UCSC Genome Browser database as a reference
as to what genes are nearby. This is an exploratory tool, as there are many versions of the human genome
that map genes to slightly different locations.
fetch_genes() is an EXPLORATORY tool and makes a number of simplicifications:
* the DMR regions file saves one CpG probe name and location, even though clusters of probes may map to
that nearby area.
* it measures the distance from the start position of the one representative probe per region to any nearby
genes, using the `tol`erance parameter as the cutoff. Tolerance is the max number of base pairs of separation
between the probe sequence start and the gene sequence start for it to be considered as a match.
* The default `tol`erance is 250, but that is arbitrary. Increase it to expand the search area, or decrease it
to be more conservative. Remember that Illumina CpG probe sequences are 50 base pairs long, so 100 is nearly
overlapping. 300 or 500 would also be reasonable.
* "Adjacent" in the linear sequence may not necessarily mean that the CpG island is FUNCTIONALLY coupled to the
regulatory or coding region of the nearby protein. DNA superstructure can position regulatory elements near to
a coding region that are far upstream or downstream from the mapped position, and there is no easy way to identify
"adjacent" in this sense.
* Changing the `tol`erance, or the reference database will result major differences in the output, and thus
one's interpretation of the same data.
* Before interpreting these "associations" you should also consider filtering candidate genes by
specific cell types where they are expressed. You should know the tissue from which your samples originated.
And filter candidate genes to exclude those that are only expressed in your tissue during development,
if your samples are from adults, and vice versa.
Arguments:
dmr_regions_file:
pass in the output file DataFrame or FILEPATH from DMR function.
Omit if you specify the `sql` kwarg instead.
ref: default is `refGene`
use one of possible_tables for lookup:
- 'refGene' -- 88,819 genes -- default table used in comb-b and cruzdb packages.
- 'knownGene' -- 232,184 genes -- pseudo genes too (the "WHere TranscriptType == 'coding_protein'" clause would work, but these fields are missing from the data returned.)
- 'ncbiRefSeq' -- 173,733 genes -- this table won't have gene descriptions, because it cannot be joined with the 'kgXref' (no shared key).
Additionally, 'gtexGeneV8' is used for tissue-expression levels. Pseudogenes are ommited using the "WHERE score > 0" clause in the SQL.
tol: default 250
+/- this many base pairs consistutes a gene "related" to a CpG region provided.
tissue: str
if specified, adds additional columns to output with the expression levels for identified genes
in any/all tissue(s) that match the keyword. (e.g. if your methylation samples are whole blood,
specify `tissue=blood`) For all 54 tissues, use `tissue=all`
genome_build: (None, NEW, OLD)
Only the default human genome build, hg38, is currently supported. Even though many other builds are available
in the UCSC database, most tables do not join together in the same way.
use_cached:
If True, the first time it downloads a dataset from UCSC Genome Browser, it will save to disk
and use that local copy thereafter. To force it to use the online copy, set to False.
no_sync:
methylize ships with a copy of the relevant UCSC gene browser tables, and will auto-update these
every month. If you want to run this function without accessing this database, you can avoid updating
using the `no_sync=True` kwarg.
host, user, password, db:
Internal database connections for UCSC server. You would only need to mess with these of the server domain changes
from the current hardcoded value {HOST}. Necessary for tables to be updated and for `tissue` annotation.
sql:
a DEBUG mode that bypasses the function and directly queries the database for any information the user wants.
Be sure to specify the complete SQL statement, including the ref-table (e.g. refGene or ncbiRefSeq).
.. note::
This method flushes cache periodically. After 30 days, it deletes cached reference gene tables and re-downloads.
"""
if verbose:
logging.basicConfig(level=logging.INFO)
if isinstance(dmr_regions_file, pd.DataFrame):
regions = dmr_regions_file
reqd_regions = set(['name', 'chromStart'])
if set(regions.columns) & reqd_regions != reqd_regions:
raise KeyError(f"Your file of CpG regions must have these columns, at a minimum: {reqd_regions}")
LOGGER.info(f"Loaded {regions.shape[0]} CpG regions.")
elif not sql and dmr_regions_file is None:
raise Exception("Either provide a path to the DMR stats file or a sql query.")
elif not sql:
regions = pd.read_csv(dmr_regions_file) #.sort_values('z_p')
reqd_regions = set(['name', 'chromStart'])
if set(regions.columns) & reqd_regions != reqd_regions:
raise KeyError(f"Your file of CpG regions must have these columns, at a minimum: {reqd_regions}")
LOGGER.info(f"Loaded {regions.shape[0]} CpG regions from {dmr_regions_file}.")
if not ref:
ref = possible_tables[0] # refGene
global conn # allows function to reuse the same connection
if conn is None and no_sync is False:
conn = pymysql.connect(host=host, user=user, password=password, db=db, cursorclass=pymysql.cursors.DictCursor)
if sql:
with conn.cursor() as cur:
cur.execute(sql)
return list(cur.fetchall())
# these will be packed into the output CSV saved, but a nested dataframe is returned.
matches = {i:[] for i in regions.name} # cpg name --> [gene names]
distances = {i:[] for i in regions.name}
descriptions = {i:[] for i in regions.name}
# fetch WHOLE table needed, unless using cache
package_path = Path(__file__).parent
cache_file = Path(package_path, 'data', f"{ref}.pkl")
cache_available = cache_file.exists()
# don't use cache if over 1 month old:
if use_cached and cache_available and no_sync is False:
last_download = cache_file.stat().st_ctime
if time.time() - last_download > 2629746:
LOGGER.info(f"Cached genome table is over 1 month old; re-downloading from UCSC.")
cache_file.unlink()
cache_available = False
if use_cached and cache_available:
genes = pd.read_pickle(cache_file)
LOGGER.info(f"""Using cached `{ref}`: {Path(package_path, 'data', f"{ref}.pkl")} with ({len(genes)}) genes""")
elif no_sync is False: # download it
LOGGER.info(f"Downloading {ref}")
# chrom, txStart, txEnd; all 3 tables have name, but knownGene lacks a name2.
if ref == 'knownGene':
sql = f"""SELECT name as name2, txStart, txEnd, description FROM {ref} LEFT JOIN kgXref ON kgXref.kgID = {ref}.name;"""
else:
sql = f"""SELECT name, name2, txStart, txEnd, description FROM {ref} LEFT JOIN kgXref ON kgXref.refseq = {ref}.name;"""
with conn.cursor() as cur:
cur.execute(sql)
genes = list(cur.fetchall())
if use_cached:
import pickle
with open(Path(package_path, 'data', f"{ref}.pkl"),'wb') as f:
pickle.dump(genes, f)
LOGGER.info(f"Cached {Path(package_path, 'data', f'{ref}.pkl')} on first use, with {len(genes)} genes")
else:
LOGGER.info(f"Using {ref} with {len(genes)} genes")
# compare two dataframes and calc diff.
# need to loop here: but prob some matrix way of doing this faster
done = 0
for gene in tqdm(genes, total=len(genes), desc="Mapping genes"):
closeby = regions[ abs(regions.chromStart - gene['txStart']) < tol ]
if len(closeby) > 0:
for idx,item in closeby.iterrows():
matches[item['name']].append(gene['name2'])
dist = item['chromStart'] - gene['txStart']
distances[item['name']].append(dist)
desc = gene['description'].decode('utf8') if gene['description'] != None else ''
descriptions[item['name']].append(desc)
done += 1
#if done % 1000 == 0:
# LOGGER.info(f"[{done} matches]")
# also, remove duplicate gene matches for the same region (it happens a lot)
matches = {k: ','.join(set(v)) for k,v in matches.items()}
distances = {k: ','.join(set([str(j) for j in v])) for k,v in distances.items()}
descriptions = {k: ' | '.join(set(v)) for k,v in descriptions.items()}
# tidying up some of the deduping
def _tidy(desc):
if desc.startswith('|'):
desc = desc.lstrip('|')
if desc.endswith('|'):
desc = desc.rstrip('|')
return desc
descriptions = {k: _tidy(desc) for k,desc in descriptions.items()}
regions['genes'] = regions['name'].map(matches)
regions['distances'] = regions['name'].map(distances)
regions['descriptions'] = regions['name'].map(descriptions)
# add column(s) for gene tissue expression
if tissue != None:
# tissue == 'all'
tissues = fetch_genes(sql="select * from hgFixed.gtexTissueV8;")
sorted_tissues = [i['name'] for i in tissues]
gene_names = [i.split(',') for i in list(regions['genes']) if i != '']
N_regions_with_multiple_genes = len([i for i in gene_names if len(i) > 1])
if N_regions_with_multiple_genes > 0:
LOGGER.warning(f"{N_regions_with_multiple_genes} of the {len(gene_names)} regions have multiple genes matching in the same region, and output won't show tissue expression levels.")
gene_names = tuple([item for sublist in gene_names for item in sublist])
gtex = fetch_genes(sql=f"select name, expScores from gtexGeneV8 WHERE name in {gene_names} and score > 0;")
if len(gtex) > 0:
# convert to a lookup dict of gene name: list of tissue scores
gtex = {item['name']: [float(i) for i in item['expScores'].decode().split(',') if i != ''] for item in gtex}
# add tissue names
if len(tissues) != len(list(gtex.values())[0]):
LOGGER.error(f"GTEx tissue names and expression levels mismatch.")
else:
for gene, expScores in gtex.items():
labeled_scores = dict(zip(sorted_tissues, expScores))
gtex[gene] = labeled_scores
# to merge, create a new dataframe with matching genes names as index.
tissue_df = pd.DataFrame.from_dict(data=gtex, orient='index')
if tissue != 'all':
matchable = dict(zip([k.lower() for k in list(tissue_df.columns)], list(tissue_df.columns)))
keep_columns = [col_name for item,col_name in matchable.items() if tissue.lower() in item]
if keep_columns == []:
LOGGER.warning(f"No GTEx tissue types matched: {tissue}; returning all tissues instead.")
else:
tissue_df = tissue_df[keep_columns]
# this merge will ONLY WORK if there is just one gene listed in the gene column
regions = regions.merge(tissue_df, how='left', left_on='genes', right_index=True)
#finaly, add column to file and save
if save:
dmr_regions_stem = str(dmr_regions_file).replace('.csv','')
outfile = f"{dmr_regions_stem}_genes.csv"
regions.to_csv(Path(outfile))
LOGGER.info(f"Wrote {outfile}")
return regions
"""
tissue='all' (for big table) or tissue='blood' for one extra column
TODO -- incorporate the GTEx tables (expression by tissue) if user specifies one of 54 tissue types covered.
gtexGeneV8 x gtexTissue
"hgFixed.gtexTissue lists each of the 53 tissues in alphabetical order, corresponding to the comma separated expression values in gtexGene."
works: tissue_lookup = m.fetch_genes('', sql="select * from hgFixed.gtexTissueV8;")
then match tissue keyword kwarg against 'description' field and use 'name' for colname
note that expScores is a list of 54 numbers (expression levels).
chrom chromStart chromEnd name score strand geneId geneType expCount expScores
{'chrom': 'chr1',
'chromEnd': 29806,
'chromStart': 14969,
'expCount': 53,
'expScores': b'6.886,6.083,4.729,5.91,6.371,6.007,8.768,4.202,4.455,4.64,10'
b'.097,10.619,6.108,5.037,5.018,4.808,4.543,4.495,5.576,4.57,8'
b'.275,4.707,2.55,9.091,9.885,8.17,7.392,7.735,5.353,7.124,8.6'
b'17,3.426,2.375,7.669,3.826,7.094,6.365,3.263,10.723,10.507,4'
b'.843,9.193,13.25,11.635,11.771,8.641,10.448,6.522,9.313,10.3'
b'04,9.987,9.067,6.12,',
'geneId': 'ENSG00000227232.4',
'geneType': 'unprocessed_pseudogene',
'name': 'WASH7P',
'score': 427,
'strand': '-'},
"""
| 55.044944 | 192 | 0.669048 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,302 | 0.632918 |
aa95d6856b327f32c668b348733f9e3447d221f2 | 104 | py | Python | node_modules/python-shell/test/python/echo_json.py | brenocg29/TP1RedesInteligentes | 3b73b3567089f9eb2e475ec8402113bf8803bb59 | [
"Apache-2.0"
] | 22 | 2016-08-23T11:27:37.000Z | 2022-03-01T04:15:20.000Z | node_modules/python-shell/test/python/echo_json.py | brenocg29/TP1RedesInteligentes | 3b73b3567089f9eb2e475ec8402113bf8803bb59 | [
"Apache-2.0"
] | 68 | 2015-06-25T17:13:22.000Z | 2017-05-08T16:01:47.000Z | node_modules/python-shell/test/python/echo_json.py | brenocg29/TP1RedesInteligentes | 3b73b3567089f9eb2e475ec8402113bf8803bb59 | [
"Apache-2.0"
] | 10 | 2017-05-06T19:09:41.000Z | 2021-10-16T17:55:02.000Z | import sys, json
# simple JSON echo script
for line in sys.stdin:
print json.dumps(json.loads(line))
| 17.333333 | 36 | 0.740385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.240385 |
aa97298a7164be0094cd007d6b0ccc9b510f720f | 329 | py | Python | test.py | sxhfut/Kashgari | efc9510ed53f5bb78183e66d96d57a55cc290a91 | [
"MIT"
] | 1 | 2019-01-26T15:18:07.000Z | 2019-01-26T15:18:07.000Z | test.py | sxhfut/Kashgari | efc9510ed53f5bb78183e66d96d57a55cc290a91 | [
"MIT"
] | null | null | null | test.py | sxhfut/Kashgari | efc9510ed53f5bb78183e66d96d57a55cc290a91 | [
"MIT"
] | null | null | null | # encoding: utf-8
"""
@author: BrikerMan
@contact: eliyar917@gmail.com
@blog: https://eliyar.biz
@version: 1.0
@license: Apache Licence
@file: test.py.py
@time: 2019-01-25 14:43
"""
import unittest
from tests import *
from kashgari.utils.logger import init_logger
init_logger()
if __name__ == '__main__':
unittest.main()
| 15.666667 | 45 | 0.717325 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 192 | 0.583587 |
aa9a7b51d2199fe8ca1968592f12f16f55f4da67 | 536 | py | Python | crossasr/text.py | mhilmiasyrofi/CrossASRv2 | 202b9a7caadf5f8d6f115f776526960af35a73a3 | [
"MIT"
] | 3 | 2021-05-12T02:48:06.000Z | 2021-12-21T14:45:56.000Z | crossasr/text.py | mhilmiasyrofi/CrossASRv2 | 202b9a7caadf5f8d6f115f776526960af35a73a3 | [
"MIT"
] | null | null | null | crossasr/text.py | mhilmiasyrofi/CrossASRv2 | 202b9a7caadf5f8d6f115f776526960af35a73a3 | [
"MIT"
] | 1 | 2021-06-14T11:15:35.000Z | 2021-06-14T11:15:35.000Z | import functools
@functools.total_ordering
class Text:
def __init__(self, id: int, text: str):
self.id = id
self.text = text
def __eq__(self, other):
return self.id == other.id and self.text == other.text
def __lt__(self, other):
return (self.id, self.text) < (other.id, other.text)
def getId(self):
return self.id
def setId(self, id: int):
self.id = id
def getText(self):
return self.text
def setText(self, text: str):
self.text = text
| 20.615385 | 62 | 0.587687 | 491 | 0.916045 | 0 | 0 | 517 | 0.964552 | 0 | 0 | 0 | 0 |
aa9c166f4f3a357bb68cf49d28d3ae8d2761ad49 | 1,661 | py | Python | models/Forest.py | guitassinari/random-forest | 7e679f21da8f39c36bf3fcd3d02f066bad6f0305 | [
"MIT"
] | null | null | null | models/Forest.py | guitassinari/random-forest | 7e679f21da8f39c36bf3fcd3d02f066bad6f0305 | [
"MIT"
] | 1 | 2019-05-30T12:16:07.000Z | 2019-05-30T12:16:07.000Z | models/Forest.py | guitassinari/machine-learning | 7e679f21da8f39c36bf3fcd3d02f066bad6f0305 | [
"MIT"
] | 1 | 2019-05-10T20:22:23.000Z | 2019-05-10T20:22:23.000Z | from models.DecisionTree import DecisionTree
class Forest:
def __init__(self, hyper_parameters, training_set):
"""
Inicializa a floresta com suas árvores.
:param hyper_parameters: dictionary/hash contendo os hiper parâmetros
:param training_set: dataset de treinamento
"""
self.number_of_trees = hyper_parameters["n_trees"]
self.trees = []
self.training_set = training_set
sample_size = round(2*training_set.size() / 3)
# Cria todas as number_of_trees árvores de decisão
for i in range(self.number_of_trees):
# resampling usando bootstrap stratificado
tree_training_set = self.training_set.resample(sample_size)
tree = DecisionTree(hyper_parameters, tree_training_set)
self.trees.append(tree)
def predict(self, example):
"""
Pede que todas as árvores façam uma predição para o exemplo e retorna
o valor mais retornado / frequente [votação]
:param example: instância na forma de um Example para a qual se quer prever a classe
:return: classe predita para o example
"""
predictions = self.__trees_predictions_for(example)
max_frequency_so_far = 0
major = predictions[0]
for klass in predictions:
klass_frequency = predictions.count(klass)
if klass_frequency > max_frequency_so_far:
max_frequency_so_far = klass_frequency
major = klass
return major
def __trees_predictions_for(self, example):
return list(map(lambda tree: tree.predict(example), self.trees))
| 38.627907 | 92 | 0.660446 | 1,624 | 0.971292 | 0 | 0 | 0 | 0 | 0 | 0 | 592 | 0.354067 |
aa9c683934f9013e24a201545cdec92668339683 | 912 | py | Python | ansibler/exceptions/ansibler.py | ProfessorManhattan/ansibler | ca3cbd8af974b59a70e6c46b4ee7f97f68158031 | [
"MIT"
] | null | null | null | ansibler/exceptions/ansibler.py | ProfessorManhattan/ansibler | ca3cbd8af974b59a70e6c46b4ee7f97f68158031 | [
"MIT"
] | null | null | null | ansibler/exceptions/ansibler.py | ProfessorManhattan/ansibler | ca3cbd8af974b59a70e6c46b4ee7f97f68158031 | [
"MIT"
] | null | null | null | class BaseAnsiblerException(Exception):
message = "Error"
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args)
self.__class__.message = kwargs.get("message", self.message)
def __str__(self) -> str:
return self.__class__.message
class CommandNotFound(BaseAnsiblerException):
message = "Command not found"
class RolesParseError(BaseAnsiblerException):
message = "Could not parse default roles"
class MetaYMLError(BaseAnsiblerException):
message = "Invalid meta/main.yml"
class RoleMetadataError(BaseAnsiblerException):
message = "Role metadata error"
class MoleculeTestsNotFound(BaseAnsiblerException):
message = "Molecule tests not foound"
class MoleculeTestParseError(BaseAnsiblerException):
message = "Could not parse molecule test file"
class NoPackageJsonError(BaseAnsiblerException):
message = "No package.json"
| 24 | 68 | 0.737939 | 890 | 0.975877 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.208333 |
aaa22c2733b8a63c15dd9acde1ab8ad0c3984613 | 2,358 | py | Python | gestao_contato/tests.py | rbiassusi/gesta_contatos | cb0c391f99843cd637627ed3c62c9afddc7e4047 | [
"MIT"
] | null | null | null | gestao_contato/tests.py | rbiassusi/gesta_contatos | cb0c391f99843cd637627ed3c62c9afddc7e4047 | [
"MIT"
] | null | null | null | gestao_contato/tests.py | rbiassusi/gesta_contatos | cb0c391f99843cd637627ed3c62c9afddc7e4047 | [
"MIT"
] | 1 | 2019-01-07T00:48:42.000Z | 2019-01-07T00:48:42.000Z | # -*- coding: utf-8 -*-
from django.test import TestCase, Client
from models import Contato
import json
class TestCase(TestCase):
"""
Realiza o teste utilizando request a API e avaliando seu retorno
"""
def setUp(self):
self.c = Client()
def test_contato_api(self):
"""
teste da api:
1- Insere um contato e verifica o status_code retornado
2- Insere outro contato e verifica se não veio status_code diferente
3- Faz um GET e verifica se o retorno da API corresponde com o numero de contatos inseridos (2)
4- guarda a ID do primeiro contato inserido
5- faz um PUT para atualizar o valor de 'nome' do contato
6- Verifica se o nome foi realmente alterado
7- Deleta esse id do contato
8- Verifica se a contagem de contatos esta correta (1)
"""
response = self.c.post("/api/v1/contato/", json.dumps({"nome": "Rodrigo Teste 1",
"canal": "email", "valor": "rodrigobiassusi@gmail.com"}), content_type="application/json")
self.assertEqual(response.status_code, 201)
response = self.c.post(
"/api/v1/contato/", json.dumps({"nome": "Rodrigo Teste 2", "canal": "celular", "valor": "21995241837"}), content_type="application/json")
self.assertNotEqual(response.status_code, 400)
response = self.c.get("/api/v1/contato/")
objects = response.json()["objects"]
self.assertEqual(len(objects), 2)
self.assertEqual(objects[0]["nome"], "Rodrigo Teste 1")
self.assertEqual(objects[1]["nome"], "Rodrigo Teste 2")
id_1 = objects[0]["id"]
response = self.c.put("/api/v1/contato/1/",
json.dumps({"nome": "Rodrigo Teste 3"}), content_type="application/json")
self.assertEqual(response.status_code, 204)
response = self.c.get("/api/v1/contato/")
objects = response.json()["objects"]
self.assertNotEqual(objects[0]["nome"], "Rodrigo Teste 1")
self.assertEqual(objects[0]["nome"], "Rodrigo Teste 3")
self.c.delete("/api/v1/contato/1/")
response = self.c.get("/api/v1/contato/")
objects = response.json()["objects"]
self.assertNotEqual(len(objects), 2)
self.assertEqual(len(objects), 1)
| 40.655172 | 153 | 0.607718 | 2,252 | 0.954642 | 0 | 0 | 0 | 0 | 0 | 0 | 1,118 | 0.47393 |
aaa2a22421918ddfde6678f1b564035567b5ee57 | 2,073 | py | Python | bouncy.py | kary1806/bouncy | afbd8a2e030cd51c0c8b84062ce7aa2b51f549df | [
"MIT"
] | null | null | null | bouncy.py | kary1806/bouncy | afbd8a2e030cd51c0c8b84062ce7aa2b51f549df | [
"MIT"
] | null | null | null | bouncy.py | kary1806/bouncy | afbd8a2e030cd51c0c8b84062ce7aa2b51f549df | [
"MIT"
] | null | null | null | from itertools import count, tee
class Bouncy:
def __init__(self, porcentage):
"""
print the number bouncy
:type porcentage: int -> this is porcentage of the bouncy
"""
nums = count(1)
rebound = self.sum_number(map(lambda number: float(self.is_rebound(number)), count(1)))
bouncy = next(
(
number
for number, number_b in zip(nums, rebound)
if number_b / number == (porcentage / 100)
)
)
print(bouncy)
def pairs(self, iterable):
"""
return a list convert map, produces new list
:type number: int
"""
# tee() get iterator independent (default 2) with a input
a, b = tee(iterable)
# next() return next element in the secuence
next(b, None)
# zip() return new iterator
return zip(a, b)
def digits(self, number):
"""
return a list convert map, produces new list
:type number: int
"""
return list(map(int, str(number)))
def increase(self, number):
"""
return the elements as long as the previous number is less than or equal to the current one
:type number: int
"""
return all(prev <= curr for prev, curr in self.pairs(self.digits(number)))
def decrease(self, number):
"""
return the elements as long as the previous number is greater than or equal to the current one
:type number: int
"""
return all(prev >= curr for prev, curr in self.pairs(self.digits(number)))
def is_rebound(self, number):
"""
return the elements is rebound
:type number: int
"""
return not self.increase(number) and not self.decrease(number)
def sum_number(self, iterable):
"""
return a element sum total
:type iterable: list
"""
total = 0
for element in iterable:
total += element
yield total
test = Bouncy(99)
| 28.39726 | 102 | 0.554752 | 2,017 | 0.972986 | 223 | 0.107574 | 0 | 0 | 0 | 0 | 873 | 0.421129 |
aaa40f7d32c94661f35c79a2fb1ee27a71d6e4e9 | 909 | py | Python | pipeline.py | ankitshah009/BioASQ-Rabbit | 1d3073fbcdebb58b91788b6c2ab0ad9380cb2498 | [
"Apache-2.0"
] | 1 | 2019-01-29T13:37:45.000Z | 2019-01-29T13:37:45.000Z | pipeline.py | ankitshah009/BioASQ-Rabbit | 1d3073fbcdebb58b91788b6c2ab0ad9380cb2498 | [
"Apache-2.0"
] | 1 | 2018-08-27T21:02:24.000Z | 2018-08-27T21:02:24.000Z | pipeline.py | ankitshah009/BioASQ-Rabbit | 1d3073fbcdebb58b91788b6c2ab0ad9380cb2498 | [
"Apache-2.0"
] | 8 | 2018-03-26T17:36:39.000Z | 2019-02-28T14:23:25.000Z | #!/usr/bin/env python
import sys
from deiis.rabbit import Message, MessageBus
from deiis.model import Serializer, DataSet, Question
if __name__ == '__main__':
if len(sys.argv) == 1:
print 'Usage: python pipeline.py <data.json>'
exit(1)
# filename = 'data/training.json'
filename = sys.argv[1]
print 'Processing ' + filename
fp = open(filename, 'r')
dataset = Serializer.parse(fp, DataSet)
fp.close()
# The list of services to send the questions to.
pipeline = ['mmr.core', 'tiler.concat', 'results']
count=0
bus = MessageBus()
for index in range(0,10):
question = dataset.questions[index]
# for question in dataset.questions:
message = Message(body=question, route=pipeline)
bus.publish('expand.none', message)
count = count + 1
print 'Sent {} questions for ranking.'.format(count)
print 'Done.'
| 28.40625 | 56 | 0.641364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 288 | 0.316832 |
aaa4c207cfec938248e0931caed06acd8c3825c0 | 1,100 | py | Python | 106 Construct Binary Tree from Preorder and Inorder Traversal.py | gavinfish/LeetCode | 7b5461a7c3cd1b19ddb320a5fc761240551cf75a | [
"MIT"
] | 1 | 2019-09-02T14:30:23.000Z | 2019-09-02T14:30:23.000Z | 106 Construct Binary Tree from Preorder and Inorder Traversal.py | qicst23/LeetCode-pythonSolu | 35064d52f9344494330261cd59da2e8d33f8bfdb | [
"MIT"
] | null | null | null | 106 Construct Binary Tree from Preorder and Inorder Traversal.py | qicst23/LeetCode-pythonSolu | 35064d52f9344494330261cd59da2e8d33f8bfdb | [
"MIT"
] | 3 | 2018-04-09T20:48:43.000Z | 2019-09-02T14:30:37.000Z | """
Given preorder and inorder traversal of a tree, construct the binary tree.
Note:
You may assume that duplicates do not exist in the tree.
"""
__author__ = 'Danyang'
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def buildTree(self, preorder, inorder):
"""
Recursive algorithm. Pre-order, in-order, post-order traversal relationship
pre-order: [root, left_subtree, right_subtree]
in-order: [left_subtree, root, right_subtree]
recursive algorithm
:param preorder: a list of integers
:param inorder: a list of integers
:return: TreeNode, root
"""
if not preorder:
return None
root = TreeNode(preorder[0])
root_index = inorder.index(root.val)
root.left = self.buildTree(preorder[1:root_index+1], inorder[0:root_index])
root.right = self.buildTree(preorder[root_index+1:], inorder[root_index+1:])
return root
| 26.829268 | 85 | 0.600909 | 911 | 0.828182 | 0 | 0 | 0 | 0 | 0 | 0 | 550 | 0.5 |
aaa51a521d3a0300cbf24d8410ea883d91cf4af5 | 259 | py | Python | Python/python_study_4/page13/script.py | zharmedia386/Progate-Course-Repo | 0dec6bd2d5594b1624251a74f6ebcf8266c449ba | [
"MIT"
] | null | null | null | Python/python_study_4/page13/script.py | zharmedia386/Progate-Course-Repo | 0dec6bd2d5594b1624251a74f6ebcf8266c449ba | [
"MIT"
] | null | null | null | Python/python_study_4/page13/script.py | zharmedia386/Progate-Course-Repo | 0dec6bd2d5594b1624251a74f6ebcf8266c449ba | [
"MIT"
] | null | null | null | from menu_item import MenuItem
# Move the code above to menu_item.py
# Import the MenuItem class from menu_item.py
menu_item1 = MenuItem('Sandwich', 5)
print(menu_item1.info())
result = menu_item1.get_total_price(4)
print('Your total is $' + str(result))
| 21.583333 | 45 | 0.756757 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.420849 |
aaa58dde0227337d66c75dfc5e8aed3813f137aa | 398 | py | Python | ipysimulate/tools.py | JoelForamitti/ipysimulate | 06753f9ef48c82b96b1b2c736accee3dcbbf1a22 | [
"BSD-3-Clause"
] | 5 | 2021-06-03T06:38:38.000Z | 2021-12-27T17:33:06.000Z | ipysimulate/tools.py | JoelForamitti/ipysimulate | 06753f9ef48c82b96b1b2c736accee3dcbbf1a22 | [
"BSD-3-Clause"
] | null | null | null | ipysimulate/tools.py | JoelForamitti/ipysimulate | 06753f9ef48c82b96b1b2c736accee3dcbbf1a22 | [
"BSD-3-Clause"
] | null | null | null | def make_list(element, keep_none=False):
""" Turns element into a list of itself
if it is not of type list or tuple. """
if element is None and not keep_none:
element = [] # Convert none to empty list
if not isinstance(element, (list, tuple, set)):
element = [element]
elif isinstance(element, (tuple, set)):
element = list(element)
return element | 33.166667 | 51 | 0.640704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.278894 |
aaa591a53fa83f8defb9c29838d54ab4f867449f | 392 | py | Python | 2048/View.py | nsiegner/AI-ML-learning | 885c7a1e519a601aafe646419b8d4b6f683f590b | [
"Apache-2.0"
] | 1 | 2021-07-04T08:18:21.000Z | 2021-07-04T08:18:21.000Z | 2048/View.py | nsiegner/AI-ML-learning | 885c7a1e519a601aafe646419b8d4b6f683f590b | [
"Apache-2.0"
] | 2 | 2021-07-04T13:39:51.000Z | 2021-07-04T13:41:25.000Z | 2048/View.py | nsiegner/AI-ML-learning | 885c7a1e519a601aafe646419b8d4b6f683f590b | [
"Apache-2.0"
] | null | null | null | import tkinter as tk
class View():
def __init__(self):
window = tk.Tk()
self.frame = tk.Frame(master=window, width=200, height=200)
self.frame.pack()
def show_grid(self, grid):
for i in range(4):
for j in range(4):
label = tk.Label(master=self.frame, text=grid[i][j])
label.place(x=(50*j)+20, y=(50*i)+20)
| 28 | 68 | 0.540816 | 369 | 0.941327 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
aaa6b62d2482defbe3a9a248af3f048c7de3d0b9 | 310 | py | Python | other tests/test_6_create_record_label.py | pavelwearevolt/Cross_Edit_TestsAutomatization | 953691244d86c5832fe2a2705841711939a353a5 | [
"Apache-2.0"
] | null | null | null | other tests/test_6_create_record_label.py | pavelwearevolt/Cross_Edit_TestsAutomatization | 953691244d86c5832fe2a2705841711939a353a5 | [
"Apache-2.0"
] | null | null | null | other tests/test_6_create_record_label.py | pavelwearevolt/Cross_Edit_TestsAutomatization | 953691244d86c5832fe2a2705841711939a353a5 | [
"Apache-2.0"
] | null | null | null | __author__ = 'pavelkosicin'
from model.label import Label
def test_create_record_label(app):
app.label.create_recording_artist(Label(name="rl_#1", asap="WB86-8RH31.50UTS-J",
note="Mens autem qui est in festinabat non facere bonum, voluntas in malo reperit."))
| 38.75 | 129 | 0.664516 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.383871 |
aaa912e4125bf98280e59e7693dee4c4d3bd7d22 | 583 | py | Python | tensorsketch/evaluate.py | udellgroup/tensorsketch | 7d40a46232809cb1dcd306d1ca79e6a3d017e43e | [
"MIT"
] | 6 | 2019-11-05T09:04:40.000Z | 2021-11-01T13:05:43.000Z | tensorsketch/evaluate.py | udellgroup/tensorsketch | 7d40a46232809cb1dcd306d1ca79e6a3d017e43e | [
"MIT"
] | null | null | null | tensorsketch/evaluate.py | udellgroup/tensorsketch | 7d40a46232809cb1dcd306d1ca79e6a3d017e43e | [
"MIT"
] | null | null | null | import numpy as np
def eval_rerr(X, X_hat, X0=None):
"""
:param X: tensor, X0 or X0+noise
:param X_hat: output for apporoximation
:param X0: true signal, tensor
:return: the relative error = ||X- X_hat||_F/ ||X_0||_F
"""
if X0 is not None:
error = X0 - X_hat
return np.linalg.norm(error.reshape(np.size(error), 1), 'fro') / \
np.linalg.norm(X0.reshape(np.size(X0), 1), 'fro')
error = X - X_hat
return np.linalg.norm(error.reshape(np.size(error), 1), 'fro') / \
np.linalg.norm(X0.reshape(np.size(X), 1), 'fro') | 38.866667 | 74 | 0.590051 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 207 | 0.35506 |
aaaa0a2f7d99affa2c085f7c1bef27b274ad7031 | 2,208 | py | Python | mne/datasets/visual_92_categories/visual_92_categories.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 3 | 2021-01-04T08:45:56.000Z | 2021-05-19T12:25:59.000Z | mne/datasets/visual_92_categories/visual_92_categories.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 28 | 2020-05-07T00:58:34.000Z | 2020-08-29T23:02:17.000Z | mne/datasets/visual_92_categories/visual_92_categories.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 4 | 2021-09-08T14:35:26.000Z | 2022-02-25T22:34:52.000Z | # License: BSD Style.
from ...utils import verbose
from ..utils import _data_path, _data_path_doc, _get_version, _version_doc
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
verbose=None):
"""
Get path to local copy of visual_92_categories dataset.
.. note:: The dataset contains four fif-files, the trigger files and the T1
mri image. This dataset is rather big in size (more than 5 GB).
Parameters
----------
path : None | str
Location of where to look for the visual_92_categories data storing
location. If None, the environment variable or config parameter
MNE_DATASETS_VISUAL_92_CATEGORIES_PATH is used. If it doesn't exist,
the "mne-python/examples" directory is used. If the
visual_92_categories dataset is not found under the given path (e.g.,
as "mne-python/examples/MNE-visual_92_categories-data"), the data
will be automatically downloaded to the specified folder.
force_update : bool
Force update of the dataset even if a local copy exists.
update_path : bool | None
If True, set the MNE_DATASETS_VISUAL_92_CATEGORIES_PATH in mne-python
config to the given path. If None, the user is prompted.
%(verbose)s
Returns
-------
path : list of str
Local path to the given data file. This path is contained inside a list
of length one, for compatibility.
Notes
-----
The visual_92_categories dataset is documented in the following publication
Radoslaw M. Cichy, Dimitrios Pantazis, Aude Oliva (2014) Resolving
human object recognition in space and time. doi: 10.1038/NN.3635
"""
return _data_path(path=path, force_update=force_update,
update_path=update_path, name='visual_92_categories',
download=download)
data_path.__doc__ = _data_path_doc.format(
name='visual_92_categories', conf='MNE_DATASETS_VISUAL_92_CATEGORIES_PATH')
def get_version():
"""Get dataset version."""
return _get_version('visual_92_categories')
get_version.__doc__ = _version_doc.format(name='visual_92_categories')
| 36.8 | 79 | 0.695199 | 0 | 0 | 0 | 0 | 1,780 | 0.806159 | 0 | 0 | 1,658 | 0.750906 |
aaaa178c8c04cddbd491ee6f45b2a2ede27a0ba8 | 1,131 | py | Python | src/commons/helpers.py | thierrydecker/nfpy | bda460feb07719e66dc25c763172fc380559e022 | [
"Apache-2.0"
] | null | null | null | src/commons/helpers.py | thierrydecker/nfpy | bda460feb07719e66dc25c763172fc380559e022 | [
"Apache-2.0"
] | null | null | null | src/commons/helpers.py | thierrydecker/nfpy | bda460feb07719e66dc25c763172fc380559e022 | [
"Apache-2.0"
] | null | null | null | """helpers module
"""
import json
import pcap
import yaml
def get_adapters_names():
"""Finds all adapters on the system
:return: A list of the network adapters available on the system
"""
return pcap.findalldevs()
def config_loader_yaml(config_name):
"""Loads a .yml configuration file
:param config_name: The path name of the yml configuration file
:return: A dictionary of the configuration
"""
with open(config_name, 'r') as f:
config_yml = f.read()
return yaml.load(config_yml)
def log_message(queue, level, module_name, class_name, function_name, message):
"""Sends a message to a log worker process
:param queue: A queue to send the message to
:param level: A string identifying the level of the message (Either DEBUG, INFO, WARNING, ERROR, CRITICAL
:param module: A string identifying the source module of the message
:param function: A string identifying the source function module of the message
:param message: A string representing the message
:return:
"""
queue.put((level, module_name, class_name, function_name, message))
| 28.275 | 109 | 0.713528 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 728 | 0.643678 |
aaaa51e39689a6d3e1aac6a8577a83f2d366cb9b | 428 | py | Python | evaluarnumprimo.py | neriphy/numeros_primos | c1d57e671ae68becb8c4805a5564eebbd5bd9209 | [
"MIT"
] | null | null | null | evaluarnumprimo.py | neriphy/numeros_primos | c1d57e671ae68becb8c4805a5564eebbd5bd9209 | [
"MIT"
] | null | null | null | evaluarnumprimo.py | neriphy/numeros_primos | c1d57e671ae68becb8c4805a5564eebbd5bd9209 | [
"MIT"
] | null | null | null | #Evaludador de numero primo
#Created by @neriphy
numero = input("Ingrese el numero a evaluar: ")
divisor = numero - 1
residuo = True
while divisor > 1 and residuo == True:
if numero%divisor != 0:
divisor = divisor - 1
print("Evaluando")
residuo = True
elif numero%divisor == 0:
residuo = False
if residuo == True:
print(numero,"es un numero primo")
if residuo == False:
print(numero,"no es un numero primo")
| 18.608696 | 47 | 0.682243 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.308411 |
aaabc289d3f067812b7a261387d3381bf400a556 | 2,364 | py | Python | src/ssp/ml/transformer/text_preprocessor.py | gyan42/spark-streaming-playground | 147ef9cbc31b7aed242663dee36143ebf0e8043f | [
"Apache-2.0"
] | 10 | 2020-03-12T11:51:46.000Z | 2022-03-24T04:56:05.000Z | src/ssp/ml/transformer/text_preprocessor.py | gyan42/spark-streaming-playground | 147ef9cbc31b7aed242663dee36143ebf0e8043f | [
"Apache-2.0"
] | 12 | 2020-04-23T07:28:14.000Z | 2022-03-12T00:20:24.000Z | src/ssp/ml/transformer/text_preprocessor.py | gyan42/spark-streaming-playground | 147ef9cbc31b7aed242663dee36143ebf0e8043f | [
"Apache-2.0"
] | 1 | 2020-04-20T14:48:38.000Z | 2020-04-20T14:48:38.000Z | #!/usr/bin/env python
__author__ = "Mageswaran Dhandapani"
__copyright__ = "Copyright 2020, The Spark Structured Playground Project"
__credits__ = []
__license__ = "Apache License"
__version__ = "2.0"
__maintainer__ = "Mageswaran Dhandapani"
__email__ = "mageswaran1989@gmail.com"
__status__ = "Education Purpose"
import re
import pandas as pd
import swifter
from pyspark.sql.types import StringType
from sklearn.base import BaseEstimator, TransformerMixin
import spacy
from tqdm import tqdm
from pyspark.sql.functions import udf
from ssp.utils.eda import get_stop_words
STOPWORDS = get_stop_words()
nlp = spacy.load('en_core_web_sm')
def remove_stop_words(text):
res = []
for token in nlp(text):
# Remove mentions and numeric words, added after checking vectorizer terms/vocabs
if token.text not in STOPWORDS and not token.text.startswith("\u2066@") and\
not token.text.startswith("@") and\
re.search('[a-zA-Z]', token.text): #filter only words with alphabets
res.append(token.lemma_.strip())
res = " ".join(res)
return res
def preprocess(text):
# Remove https links, added after visualizing in wordcloud plot
text = re.sub("http[s]?:\/\/\S+", "", text.strip())
# General strategy for ML algos
text = remove_stop_words(text=text)
# Remove punctuation
text = re.sub('[^a-zA-Z0-9\s]', '', text)
text = text.lower()
text = text.replace("\n", " ")
return text.strip()
preprocess_udf = udf(preprocess, StringType())
class TextPreProcessor(BaseEstimator, TransformerMixin):
def __init__(self, input_col=None, output_col=None):
self._input_col = input_col
self._output_col = output_col
# Return self nothing else to do here
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if isinstance(X, pd.DataFrame):
if self._output_col:
X[self._output_col] = X[self._input_col].swifter.apply(preprocess)
return X
elif isinstance(X, list):
X = [preprocess(x) for x in tqdm(X)]
return X
elif isinstance(X, str):
return preprocess(X)
# Lematization ? for ML models
# Tweets with more than 5 mentions/hashtag then consider it to be spam/useless, check with length
return X
| 30.701299 | 105 | 0.666244 | 827 | 0.349831 | 0 | 0 | 0 | 0 | 0 | 0 | 668 | 0.282572 |
aaad7708222b1bb4cca970a38baa955e6d67bbd1 | 737 | py | Python | multi_lan_ner.py | jpotwor/multi_lan_ner | 93494fc8e440e85d7111d16e388fdce78cb04bd7 | [
"MIT"
] | null | null | null | multi_lan_ner.py | jpotwor/multi_lan_ner | 93494fc8e440e85d7111d16e388fdce78cb04bd7 | [
"MIT"
] | null | null | null | multi_lan_ner.py | jpotwor/multi_lan_ner | 93494fc8e440e85d7111d16e388fdce78cb04bd7 | [
"MIT"
] | null | null | null | import spacy
def find_entities(input_phrase, language):
models = {
'en': 'en_core_web_sm',
'pl': 'pl_core_news_sm',
'fr': 'fr_core_news_sm',
'de': 'de_core_news_sm',
'it': 'it_core_news_sm',
}
if language in models:
nlp = spacy.load(models[language])
doc = nlp(input_phrase)
res = []
for ent in doc.ents:
res.append({'text': ent.text, 'start_pos': ent.start_char, 'end_pos': ent.end_char, 'type': ent.label_})
return res
else:
raise FileNotFoundError('model %s not found, please download' % language)
if __name__ == "__main__":
print(find_entities("As I had only one hour to write this on my old Dell computer, I am aware there is space for improvement.", 'en')) | 32.043478 | 138 | 0.651289 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 293 | 0.397558 |
aaad78389190fcfac231fdc6ed3013707ef9bc63 | 488 | py | Python | Daily-Coding-Problem/Problem4/Problem4.py | grisreyesrios/Solutions--Daily-Coding-Problems | beb977e6666800b2158e43a649ed197ad2f79d0a | [
"MIT"
] | 1 | 2019-02-14T00:35:26.000Z | 2019-02-14T00:35:26.000Z | Daily-Coding-Problem/Problem4/Problem4.py | grisreyesrios/Solutions--Daily-Coding-Problems | beb977e6666800b2158e43a649ed197ad2f79d0a | [
"MIT"
] | null | null | null | Daily-Coding-Problem/Problem4/Problem4.py | grisreyesrios/Solutions--Daily-Coding-Problems | beb977e6666800b2158e43a649ed197ad2f79d0a | [
"MIT"
] | 1 | 2021-10-18T00:51:51.000Z | 2021-10-18T00:51:51.000Z | # Python programming that returns the weight of the maximum weight path in a triangle
def triangle_max_weight(arrs, level=0, index=0):
if level == len(arrs) - 1:
return arrs[level][index]
else:
return arrs[level][index] + max(
triangle_max_weight(arrs, level + 1, index), triangle_max_weight(arrs, level + 1, index + 1)
)
if __name__ == "__main__": # Driver function
arrs1 =[[1], [2, 3], [1, 5, 1]]
print(triangle_max_weight(arrs1))
| 34.857143 | 104 | 0.637295 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.229508 |
aaadabaa6eb1195381301ba6975765da7236103f | 1,114 | py | Python | src/users/models/componentsschemasmicrosoft_graph_workbooktablesortallof1.py | peombwa/Sample-Graph-Python-Client | 3396f531fbe6bb40a740767c4e31aee95a3b932e | [
"MIT"
] | null | null | null | src/users/models/componentsschemasmicrosoft_graph_workbooktablesortallof1.py | peombwa/Sample-Graph-Python-Client | 3396f531fbe6bb40a740767c4e31aee95a3b932e | [
"MIT"
] | null | null | null | src/users/models/componentsschemasmicrosoft_graph_workbooktablesortallof1.py | peombwa/Sample-Graph-Python-Client | 3396f531fbe6bb40a740767c4e31aee95a3b932e | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ComponentsschemasmicrosoftGraphWorkbooktablesortallof1(Model):
"""workbookTableSort.
:param fields:
:type fields: list[~users.models.MicrosoftgraphworkbookSortField]
:param match_case:
:type match_case: bool
:param method:
:type method: str
"""
_attribute_map = {
'fields': {'key': 'fields', 'type': '[MicrosoftgraphworkbookSortField]'},
'match_case': {'key': 'matchCase', 'type': 'bool'},
'method': {'key': 'method', 'type': 'str'},
}
def __init__(self, fields=None, match_case=None, method=None):
super(ComponentsschemasmicrosoftGraphWorkbooktablesortallof1, self).__init__()
self.fields = fields
self.match_case = match_case
self.method = method
| 33.757576 | 86 | 0.587074 | 757 | 0.679533 | 0 | 0 | 0 | 0 | 0 | 0 | 652 | 0.585278 |
aab014923354c5dbc253ff0489aad994d6e58895 | 4,687 | py | Python | python/FPgrowth/updateConfidence.py | gingi99/research_dr | 584f66738f345706e3cba1ae9cc1f417d6a0e72e | [
"MIT"
] | 1 | 2016-09-08T12:16:01.000Z | 2016-09-08T12:16:01.000Z | python/FPgrowth/updateConfidence.py | gingi99/research_dr | 584f66738f345706e3cba1ae9cc1f417d6a0e72e | [
"MIT"
] | null | null | null | python/FPgrowth/updateConfidence.py | gingi99/research_dr | 584f66738f345706e3cba1ae9cc1f417d6a0e72e | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
# Usage : python ~~.py
import sys
import os
import pickle
import collections
import pandas as pd
import numpy as np
from itertools import chain
from itertools import combinations
from itertools import compress
from itertools import product
from sklearn.metrics import accuracy_score
from multiprocessing import Pool
from multiprocessing import freeze_support
# Global Setting
DIR_UCI = '/mnt/data/uci'
# ------------------------------------------------------
# Rule Class
# ------------------------------------------------------
class Rule :
def __init__(self):
self.value = list()
self.consequent = list()
self.strength = float()
self.support = list()
self.support_v = float()
self.conf = float()
def setValue(self, values) :
self.value = values
def setConsequent(self, consequents) :
self.consequent = consequents
def setStrength(self, strength) :
self.strength = strength
def setSupport(self, supports) :
self.support = supports
def setSupportV(self, support_v):
self.support_v = support_v
def setConf(self, confidence) :
self.conf = confidence
def getValue(self) :
return(self.value)
def getConsequent(self) :
return(self.consequent)
def getStrength(self):
return(self.strength)
def getSupport(self) :
return(self.support)
def getSupportV(self) :
return(self.support_v)
def getSupportD(self) :
return(self.support * len(self.value))
def getConf(self) :
return(self.conf)
def output(self) :
print("value:" + str(self.value))
print("consequent:" + str(self.consequent))
print("strength:" + str(self.strength))
print("support:" + str(self.support))
print("support_v:" + str(self.support_v))
print("conf:" + str(self.conf))
# ======================================================
# rules load and save
# ======================================================
def loadPickleRules(fullpath_filename) :
with open(fullpath_filename, mode='rb') as inputfile:
rules = pickle.load(inputfile)
return(rules)
def savePickleRules(rules, fullpath_filename) :
with open(fullpath_filename, mode='wb') as outfile:
pickle.dump(rules, outfile, pickle.HIGHEST_PROTOCOL)
# ========================================
# rules をロードしてconfidence と ruleを満たす対象を出す
# ========================================
def updateConfidenceSupport(FILENAME, iter1, iter2, min_sup):
# rules load
fullpath_rulename = DIR_UCI+'/'+FILENAME+'/FPGrowth/rules/rules-'+str(min_sup)+'_'+str(iter1)+'-'+str(iter2)+'.pkl'
rules = loadPickleRules(fullpath_rulename)
# train data load
fullpath_train = DIR_UCI+'/'+FILENAME+'/alpha/'+FILENAME+'-train'+str(iter1)+'-'+str(iter2)+'.txt'
data = []
with open(fullpath_train) as inputfile:
for line in inputfile:
data.append(line.strip().split(' '))
# confidence and support and support_v
for rule in rules:
bunshi = [rule.getConsequent() in record and all(x in record for x in rule.getValue()) for record in data]
bunbo = [all(x in record for x in rule.getValue()) for record in data]
confidence = sum(bunshi) / sum(bunbo)
rule.setConf(confidence)
support = [i for i, x in enumerate(bunshi) if x]
rule.setSupport(support)
support_v = len(support) / len(data)
rule.setSupportV(support_v)
# update save
savePickleRules(rules, fullpath_rulename)
# ========================================
# multi に実行する
# ========================================
def multi_main(proc, FILENAME, FUN, **kargs):
pool = Pool(proc)
multiargs = []
# FPGrowth_LERS 用
if FUN == updateConfidenceSupport :
min_sup_range = kargs['min_sup_range']
for iter1, iter2, min_sup in product(range(1,2), range(1,11), min_sup_range):
multiargs.append((FILENAME, iter1, iter2, min_sup))
print(multiargs)
pool.starmap(FUN, multiargs)
else :
print("I dont' know the function.")
# ======================================================
# main
# ======================================================
if __name__ == "__main__":
# データ準備
FILENAME = "adult_cleansing2"
#FILENAME = "default_cleansing"
#FILENAME = "german_credit_categorical"
# クラスの数を設定
#classes = ['D1', 'D2']
# support range
min_sup_range = [0.05, 0.10, 0.15, 0.20, 0.25]
# 並列実行して全データで評価
proc = 32
freeze_support()
FUN = updateConfidenceSupport
multi_main(proc, FILENAME, FUN, min_sup_range = min_sup_range)
| 31.668919 | 119 | 0.584809 | 1,314 | 0.274723 | 0 | 0 | 0 | 0 | 0 | 0 | 1,217 | 0.254443 |
aab07d8e601e02e0aaa27e2764b313638874d9dd | 591 | py | Python | 24/03/0.py | pylangstudy/201708 | 126b1af96a1d1f57522d5a1d435b58597bea2e57 | [
"CC0-1.0"
] | null | null | null | 24/03/0.py | pylangstudy/201708 | 126b1af96a1d1f57522d5a1d435b58597bea2e57 | [
"CC0-1.0"
] | 39 | 2017-07-31T22:54:01.000Z | 2017-08-31T00:19:03.000Z | 24/03/0.py | pylangstudy/201708 | 126b1af96a1d1f57522d5a1d435b58597bea2e57 | [
"CC0-1.0"
] | null | null | null | #!python3.6
import difflib
from pprint import pprint
import sys
text1 = ''' 1. Beautiful is better than ugly.
2. Explicit is better than implicit.
3. Simple is better than complex.
4. Complex is better than complicated.
'''.splitlines(keepends=True)
text2 = ''' 1. Beautiful is better than ugly.
3. Simple is better than complex.
4. Complicated is better than complex.
5. Flat is better than nested.
'''.splitlines(keepends=True)
d = difflib.Differ();
result = list(d.compare(text1, text2))
print('-----')
pprint(result)
print('-----')
print(sys.stdout.writelines(result))
| 25.695652 | 46 | 0.707276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 337 | 0.57022 |
aab0d967698a31c9d52d9f317c79aa9d34e38826 | 5,703 | py | Python | domestic/views/ukef.py | uktrade/great-cms | f13fa335ddcb925bc33a5fa096fe73ef7bdd351a | [
"MIT"
] | 10 | 2020-04-30T12:04:35.000Z | 2021-07-21T12:48:55.000Z | domestic/views/ukef.py | uktrade/great-cms | f13fa335ddcb925bc33a5fa096fe73ef7bdd351a | [
"MIT"
] | 1,461 | 2020-01-23T18:20:26.000Z | 2022-03-31T08:05:56.000Z | domestic/views/ukef.py | uktrade/great-cms | f13fa335ddcb925bc33a5fa096fe73ef7bdd351a | [
"MIT"
] | 3 | 2020-04-07T20:11:36.000Z | 2020-10-16T16:22:59.000Z | from directory_forms_api_client.actions import PardotAction
from directory_forms_api_client.helpers import Sender
from django.conf import settings
from django.http import HttpResponseRedirect
from django.shortcuts import redirect
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.generic import TemplateView
from formtools.wizard.views import NamedUrlSessionWizardView
from contact.views import BaseNotifyFormView
from core import mixins
from core.datastructures import NotifySettings
from domestic.forms import (
CompanyDetailsForm,
HelpForm,
PersonalDetailsForm,
UKEFContactForm,
)
class UKEFHomeView(TemplateView):
template_name = 'domestic/ukef/home_page.html'
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['trade_finance_bullets'] = [
'working capital support',
'bond support',
'credit insurance',
]
context['project_finance_bullets'] = [
'UKEF buyer credit guarantees',
'direct lending',
'credit and bond insurance',
]
return context
class ContactView(BaseNotifyFormView):
template_name = 'domestic/ukef/contact_form.html'
form_class = UKEFContactForm
success_url = reverse_lazy('domestic:uk-export-contact-success')
notify_settings = NotifySettings(
agent_template=settings.UKEF_CONTACT_AGENT_NOTIFY_TEMPLATE_ID,
agent_email=settings.UKEF_CONTACT_AGENT_EMAIL_ADDRESS,
user_template=settings.UKEF_CONTACT_USER_NOTIFY_TEMPLATE_ID,
)
def form_valid(self, form):
user_email = form.cleaned_data['email']
self.request.session['user_email'] = user_email
return super().form_valid(form)
class SuccessPageView(TemplateView):
template_name = 'domestic/ukef/contact_form_success.html'
def get(self, *args, **kwargs):
if not self.request.session.get('user_email'):
return HttpResponseRedirect(reverse_lazy('domestic:uk-export-contact'))
return super().get(*args, **kwargs)
def get_context_data(self, **kwargs):
kwargs['user_email'] = self.request.session.get('user_email')
return super().get_context_data(**kwargs)
@method_decorator(never_cache, name='dispatch')
class GetFinanceLeadGenerationFormView(
mixins.PrepopulateFormMixin,
mixins.PreventCaptchaRevalidationMixin,
NamedUrlSessionWizardView,
):
success_url = reverse_lazy(
'domestic:uk-export-finance-lead-generation-form-success',
)
PERSONAL_DETAILS = 'your-details'
COMPANY_DETAILS = 'company-details'
HELP = 'help'
form_list = (
(PERSONAL_DETAILS, PersonalDetailsForm),
(COMPANY_DETAILS, CompanyDetailsForm),
(HELP, HelpForm),
)
templates = {
PERSONAL_DETAILS: 'domestic/finance/lead_generation_form/step-personal.html',
COMPANY_DETAILS: 'domestic/finance/lead_generation_form/step-company.html',
HELP: 'domestic/finance/lead_generation_form/step-help.html',
}
def get_form_kwargs(self, *args, **kwargs):
# skipping `PrepopulateFormMixin.get_form_kwargs`
return super(mixins.PrepopulateFormMixin, self).get_form_kwargs(*args, **kwargs)
def get_form_initial(self, step):
initial = super().get_form_initial(step)
if self.request.user.is_authenticated:
if step == self.PERSONAL_DETAILS and self.request.user.company:
initial.update(
{
'email': self.request.user.email,
'phone': getattr(self.request.user.company, 'mobile_number', ''),
'firstname': self.guess_given_name,
'lastname': self.guess_family_name,
}
)
elif step == self.COMPANY_DETAILS and self.request.user.company:
company = self.request.user.company
_sectors = getattr(company, 'sectors', [])
_industry = _sectors[0] if _sectors else None
initial.update(
{
'not_companies_house': False,
'company_number': getattr(company, 'number', ''),
'trading_name': getattr(company, 'name', ''),
'address_line_one': getattr(company, 'address_line_1', ''),
'address_line_two': getattr(company, 'address_line_2', ''),
'address_town_city': getattr(company, 'locality', ''),
'address_post_code': getattr(company, 'postal_code', ''),
'industry': _industry,
}
)
return initial
def get_template_names(self):
return [self.templates[self.steps.current]]
def done(self, form_list, **kwargs):
form_data = self.serialize_form_list(form_list)
sender = Sender(email_address=form_data['email'], country_code=None)
action = PardotAction(
pardot_url=settings.UKEF_FORM_SUBMIT_TRACKER_URL,
form_url=reverse('domestic:uk-export-finance-lead-generation-form', kwargs={'step': self.PERSONAL_DETAILS}),
sender=sender,
)
response = action.save(form_data)
response.raise_for_status()
return redirect(self.success_url)
@staticmethod
def serialize_form_list(form_list):
data = {}
for form in form_list:
data.update(form.cleaned_data)
return data
| 37.519737 | 120 | 0.650009 | 4,921 | 0.862879 | 0 | 0 | 3,327 | 0.583377 | 0 | 0 | 1,062 | 0.186218 |
aab1082f2e51d53d812f98ef5dcddda1de448fca | 25,138 | py | Python | sbol_utilities/excel_to_sbol.py | ArchitJain1201/SBOL-utilities | 398c885eb9139e0833141ef45e87181253193724 | [
"MIT"
] | 3 | 2021-12-24T09:23:39.000Z | 2022-02-08T21:01:48.000Z | sbol_utilities/excel_to_sbol.py | ArchitJain1201/SBOL-utilities | 398c885eb9139e0833141ef45e87181253193724 | [
"MIT"
] | 57 | 2021-04-26T14:36:54.000Z | 2022-03-22T14:02:37.000Z | sbol_utilities/excel_to_sbol.py | ArchitJain1201/SBOL-utilities | 398c885eb9139e0833141ef45e87181253193724 | [
"MIT"
] | 13 | 2021-11-07T15:12:52.000Z | 2022-03-21T13:09:02.000Z | import unicodedata
import warnings
import logging
import re
import argparse
import sbol3
import openpyxl
import tyto
from .helper_functions import toplevel_named, strip_sbol2_version, is_plasmid, url_to_identity, strip_filetype_suffix
from .workarounds import type_to_standard_extension
BASIC_PARTS_COLLECTION = 'BasicParts'
COMPOSITE_PARTS_COLLECTION = 'CompositeParts'
LINEAR_PRODUCTS_COLLECTION = 'LinearDNAProducts'
FINAL_PRODUCTS_COLLECTION = 'FinalProducts'
def expand_configuration(values: dict) -> dict:
"""
Initialize sheet configuration dictionary
:param values: Dictionary of overrides for defaults
:return configuration with all defaults filled in
"""
# set up the default values
default_values = {
'basic_sheet': 'Basic Parts',
'basic_parts_name': 'B1',
'basic_parts_description': 'A11',
'basic_first_row': 20,
'basic_name_col': 0,
'basic_role_col': 1,
'basic_notes_col': 2,
'basic_description_col': 4,
'basic_source_prefix_col': 5,
'basic_source_id_col': 6,
'basic_final_col': 9,
'basic_circular_col': 10,
'basic_length_col': 11,
'basic_sequence_col': 12,
'composite_sheet': 'Composite Parts',
'composite_parts_name': 'B1',
'composite_parts_description': 'A11',
'composite_first_row': 24,
'composite_name_col': 0,
'composite_notes_col': 1,
'composite_description_col': 2,
'composite_final_col': 3,
'composite_strain_col': 4,
'composite_context_col': 5,
'composite_constraints_col': 6,
'composite_first_part_col': 7,
'sources_sheet': 'data_source',
'sources_first_row': 2,
'source_name_col': 1,
'source_uri_col': 2,
'source_literal_col': 6
}
# override with supplied values
values_to_use = default_values
if values is not None:
for k, v in values.items():
if k not in default_values:
raise ValueError(f'Sheet configuration has no setting "{k}"')
values_to_use[k] = v
# initialize the dictionary
return values_to_use
def read_metadata(wb: openpyxl.Workbook, doc: sbol3.Document, config: dict):
"""
Extract metadata and build collections
:param wb: Excel workbook to extract material from
:param doc: SBOL document to build collections in
:param config: dictionary of sheet parsing configuration variables
:return: Tuple of SBOL collections for basic, composite, linear, and final parts
"""
# Read the metadata
ws_b = wb[config['basic_sheet']]
bp_name = ws_b[config['basic_parts_name']].value
bp_description = ws_b[config['basic_parts_description']].value
ws_c = wb[config['composite_sheet']]
if config['composite_parts_name']:
cp_name = ws_c[config['composite_parts_name']].value
cp_description = ws_c[config['composite_parts_description']].value
else:
cp_name = bp_name
cp_description = bp_description
# Make the collections
basic_parts = sbol3.Collection(BASIC_PARTS_COLLECTION, name=bp_name, description=bp_description)
doc.add(basic_parts)
composite_parts = sbol3.Collection(COMPOSITE_PARTS_COLLECTION, name=cp_name, description=cp_description)
doc.add(composite_parts)
linear_products = sbol3.Collection(LINEAR_PRODUCTS_COLLECTION, name='Linear DNA Products',
description='Linear DNA constructs to be fabricated')
doc.add(linear_products)
final_products = sbol3.Collection(FINAL_PRODUCTS_COLLECTION, name='Final Products',
description='Final products desired for actual fabrication')
doc.add(final_products)
# also collect any necessary data tables from extra sheets
source_table = {row[config['source_name_col']].value: row[config['source_uri_col']].value
for row in wb[config['sources_sheet']].iter_rows(min_row=config['sources_first_row'])
if row[config['source_literal_col']].value}
# return the set of created collections
return basic_parts, composite_parts, linear_products, final_products, source_table
def row_to_basic_part(doc: sbol3.Document, row, basic_parts: sbol3.Collection, linear_products: sbol3.Collection,
final_products: sbol3.Collection, config: dict, source_table: dict):
"""
Read a row for a basic part and turn it into SBOL Component
:param doc: Document to add parts to
:param row: Excel row to be processed
:param basic_parts: collection of parts to add to
:param linear_products: collection of linear parts to add to
:param final_products: collection of final parts to add to
:param config: dictionary of sheet parsing configuration variables
:param source_table: dictionary mapping source names to namespaces
:return: None
"""
# Parse material from sheet row
name = row[config['basic_name_col']].value
if name is None:
return # skip lines without names
else:
name = name.strip() # make sure we're discarding whitespace
raw_role = row[config['basic_role_col']].value
try: # look up with tyto; if fail, leave blank or add to description
role = (tyto.SO.get_uri_by_term(raw_role) if raw_role else None)
except LookupError:
logging.warning(f'Role "{raw_role}" could not be found in Sequence Ontology')
role = None
design_notes = (row[config['basic_notes_col']].value if row[config['basic_notes_col']].value else "")
description = (row[config['basic_description_col']].value if row[config['basic_description_col']].value else "")
source_prefix = row[config['basic_source_prefix_col']].value
source_id = row[config['basic_source_id_col']].value
final_product = row[config['basic_final_col']].value # boolean
circular = row[config['basic_circular_col']].value # boolean
length = row[config['basic_length_col']].value
raw_sequence = row[config['basic_sequence_col']].value
sequence = (None if raw_sequence is None else "".join(unicodedata.normalize("NFKD", raw_sequence).upper().split()))
if not ((sequence is None and length == 0) or len(sequence) == length):
raise ValueError(f'Part "{name}" has mismatched sequence length: check for bad characters and extra whitespace')
# identity comes from source if set to a literal table, from display_id if not set
identity = None
display_id = None
was_derived_from = None
namespace = sbol3.get_namespace()
if source_id and source_prefix:
source_prefix = source_prefix.strip()
if source_prefix in source_table:
if source_table[source_prefix]:
display_id = sbol3.string_to_display_id(source_id.strip())
identity = f'{source_table[source_prefix]}/{display_id}'
namespace = source_table[source_prefix]
else: # when there is no prefix, use the bare value (in SBOL3 format)
raw_url = source_id.strip()
identity = url_to_identity(strip_filetype_suffix(strip_sbol2_version(raw_url)))
was_derived_from = raw_url
namespace = identity.rsplit('/',1)[0] # TODO: use a helper function
else:
logging.info(f'Part "{name}" ignoring non-literal source: {source_prefix}')
elif source_id:
logging.warning(f'Part "{name}" has source ID specified but not prefix: {source_id}')
elif source_prefix:
logging.warning(f'Part "{name}" has source prefix specified but not ID: {source_prefix}')
if not identity:
display_id = sbol3.string_to_display_id(name)
# build a component from the material
logging.debug(f'Creating basic part "{name}"')
component = sbol3.Component(identity or display_id, sbol3.SBO_DNA, name=name, namespace=namespace,
description=f'{design_notes}\n{description}'.strip())
if was_derived_from:
component.derived_from.append(was_derived_from)
doc.add(component)
if role:
component.roles.append(role)
if circular:
component.types.append(sbol3.SO_CIRCULAR)
if sequence:
sbol_seq = sbol3.Sequence(f'{component.identity}_sequence', namespace=namespace,
encoding=sbol3.IUPAC_DNA_ENCODING, elements=sequence)
doc.add(sbol_seq)
component.sequences.append(sbol_seq.identity)
# add the component to the appropriate collections
basic_parts.members.append(component.identity)
if final_product:
linear_products.members.append(component.identity)
final_products.members.append(component.identity)
##########################################
# Functions for parsing sub-components
# form of a sub-component:
# X: identifies a component or set thereof
# RC(X): X is reversed
reverse_complement_pattern = re.compile('RC\(.+\)')
# Returns sanitized text without optional reverse complement marker
def strip_RC(name):
sanitized = name.strip()
match = reverse_complement_pattern.match(sanitized)
return (sanitized[3:-1] if (match and len(match.group())==len(sanitized)) else sanitized)
# returns true if part is reverse complement
def is_RC(name):
sanitized = name.strip()
return len(strip_RC(sanitized))<len(sanitized)
# returns a list of part names
def part_names(specification):
return [name.strip() for name in strip_RC(str(specification)).split(',')]
# list all the parts in the row that aren't fully resolved
def unresolved_subparts(doc: sbol3.Document, row, config):
return [name for spec in part_specifications(row, config) for name in part_names(spec) if not partname_to_part(doc,name)]
# get the part specifications until they stop
def part_specifications(row, config):
return (cell.value for cell in row[config['composite_first_part_col']:] if cell.value)
def partname_to_part(doc: sbol3.Document, name_or_display_id: str):
"""Look up a part by its displayID or its name, searching first by displayID, then by name
:param doc: SBOL document to search
:param name_or_display_id: string to look up
:return: object if found, None if not
"""
return doc.find(name_or_display_id) or toplevel_named(doc,name_or_display_id)
###############################################################
# Functions for making composites, combinatorials, and libraries
def make_composite_component(display_id,part_lists,reverse_complements):
# Make the composite as an engineered region
composite_part = sbol3.Component(display_id, sbol3.SBO_DNA)
composite_part.roles.append(sbol3.SO_ENGINEERED_REGION)
# for each part, make a SubComponent and link them together in sequence
last_sub = None
for part_list,rc in zip(part_lists,reverse_complements):
if not len(part_list)==1:
raise ValueError(f'Part list should have precisely one element, but is {part_list}')
sub = sbol3.SubComponent(part_list[0])
sub.orientation = (sbol3.SBOL_REVERSE_COMPLEMENT if rc else sbol3.SBOL_INLINE)
composite_part.features.append(sub)
if last_sub: composite_part.constraints.append(sbol3.Constraint(sbol3.SBOL_MEETS,last_sub,sub))
last_sub = sub
# return the completed part
return composite_part
constraint_pattern = re.compile('Part (\d+) (.+) Part (\d+)')
constraint_dict = {'same as': sbol3.SBOL_VERIFY_IDENTICAL,
'different from': sbol3.SBOL_DIFFERENT_FROM,
'same orientation as': sbol3.SBOL_SAME_ORIENTATION_AS,
'different orientation from': sbol3.SBOL_SAME_ORIENTATION_AS}
def make_constraint(constraint, part_list):
m = constraint_pattern.match(constraint)
if not m:
raise ValueError(f'Constraint "{constraint}" does not match pattern "Part X relation Part Y"')
try:
restriction = constraint_dict[m.group(2)]
except KeyError:
raise ValueError(f'Do not recognize constraint relation in "{constraint}"')
x = int(m.group(1))
y = int(m.group(3))
if x is y:
raise ValueError(f'A part cannot constrain itself: {constraint}')
for n in [x,y]:
if not (0 < n <= len(part_list)):
raise ValueError(f'Part number "{str(n)}" is not between 1 and {len(part_list)}')
return sbol3.Constraint(restriction, part_list[x-1], part_list[y-1])
def make_combinatorial_derivation(document, display_id,part_lists,reverse_complements,constraints):
# Make the combinatorial derivation and its template
template = sbol3.Component(display_id + "_template", sbol3.SBO_DNA)
document.add(template)
cd = sbol3.CombinatorialDerivation(display_id, template)
cd.strategy = sbol3.SBOL_ENUMERATE
# for each part, make a SubComponent or LocalSubComponent in the template and link them together in sequence
template_part_list = []
for part_list,rc in zip(part_lists,reverse_complements):
# it's a variable if there are multiple values or if there's a single value that's a combinatorial derivation
if len(part_list)>1 or not isinstance(part_list[0],sbol3.Component):
sub = sbol3.LocalSubComponent({sbol3.SBO_DNA}) # make a template variable
sub.name = "Part "+str(len(template_part_list)+1)
template.features.append(sub)
var = sbol3.VariableFeature(cardinality=sbol3.SBOL_ONE, variable=sub)
cd.variable_features.append(var)
# add all of the parts as variables
for part in part_list:
if isinstance(part,sbol3.Component): var.variants.append(part)
elif isinstance(part,sbol3.CombinatorialDerivation): var.variant_derivations.append(part)
else: raise ValueError("Don't know how to make library element for "+part.name+", a "+str(part))
else: # otherwise it's a fixed element of the template
sub = sbol3.SubComponent(part_list[0])
template.features.append(sub)
# in either case, orient and order the template elements
sub.orientation = (sbol3.SBOL_REVERSE_COMPLEMENT if rc else sbol3.SBOL_INLINE)
if template_part_list: template.constraints.append(sbol3.Constraint(sbol3.SBOL_MEETS,template_part_list[-1],sub))
template_part_list.append(sub)
# next, add all of the constraints to the template
#template.constraints = (make_constraint(c.strip(),template_part_list) for c in (constraints.split(',') if constraints else [])) # impacted by pySBOL3 appending
c_list = (make_constraint(c.strip(),template_part_list) for c in (constraints.split(',') if constraints else []))
for c in c_list: template.constraints.append(c)
# return the completed part
return cd
def make_composite_part(document, row, composite_parts, linear_products, final_products, config):
"""
Create a composite part from a row in the composites sheet
:param document: Document to add parts to
:param row: Excel row to be processed
:param composite_parts: collection of parts to add to
:param linear_products: collection of linear parts to add to
:param final_products: collection of final parts to add to
:param config: dictionary of sheet parsing configuration variables
"""
# Parse material from sheet row
name = row[config['composite_name_col']].value
if name is None:
return # skip lines without names
else:
name = name.strip() # make sure we're discarding whitespace
display_id = sbol3.string_to_display_id(name)
design_notes = (row[config['composite_notes_col']].value if row[config['composite_notes_col']].value else "")
description = \
(row[config['composite_description_col']].value if row[config['composite_description_col']].value else "")
final_product = row[config['composite_final_col']].value # boolean
transformed_strain = row[config['composite_strain_col']].value if config['composite_strain_col'] else None
backbone_or_locus_raw = row[config['composite_context_col']].value if config['composite_context_col'] else None
backbone_or_locus = part_names(backbone_or_locus_raw) if backbone_or_locus_raw else []
constraints = row[config['composite_constraints_col']].value if config['composite_constraints_col'] else None
reverse_complements = [is_RC(spec) for spec in part_specifications(row,config)]
part_lists = \
[[partname_to_part(document, name) for name in part_names(spec)] for spec in part_specifications(row, config)]
combinatorial = any(x for x in part_lists if len(x) > 1 or isinstance(x[0], sbol3.CombinatorialDerivation))
# Build the composite
logging.debug(f'Creating {"library" if combinatorial else "composite part"} "{name}"')
linear_dna_display_id = (f'{display_id}_ins' if backbone_or_locus else display_id)
if combinatorial:
composite_part = make_combinatorial_derivation(document, linear_dna_display_id, part_lists, reverse_complements,
constraints)
else:
composite_part = make_composite_component(linear_dna_display_id, part_lists, reverse_complements)
composite_part.name = (f'{name} insert' if backbone_or_locus else name)
composite_part.description = f'{design_notes}\n{description}'.strip()
# add the component to the appropriate collections
document.add(composite_part)
composite_parts.members.append(composite_part.identity)
if final_product:
linear_products.members.append(composite_part.identity)
###############
# Consider strain and locus information
if transformed_strain:
warnings.warn("Not yet handling strain information: "+transformed_strain)
if backbone_or_locus:
# TODO: handle integration locuses as well as plasmid backbones
backbones = [partname_to_part(document,name) for name in backbone_or_locus]
if any(b is None for b in backbones):
raise ValueError(f'Could not find specified backbone(s) "{backbone_or_locus}"')
if any(not is_plasmid(b) for b in backbones):
raise ValueError(f'Specified backbones "{backbone_or_locus}" are not all plasmids')
if combinatorial:
logging.debug(f"Embedding library '{composite_part.name}' in plasmid backbone(s) '{backbone_or_locus}'")
plasmid = sbol3.Component(f'{display_id}_template', sbol3.SBO_DNA)
document.add(plasmid)
part_sub = sbol3.LocalSubComponent([sbol3.SBO_DNA], name="Inserted Construct")
plasmid.features.append(part_sub)
plasmid_cd = sbol3.CombinatorialDerivation(display_id, plasmid, name=name)
document.add(plasmid_cd)
part_var = sbol3.VariableFeature(cardinality=sbol3.SBOL_ONE, variable=part_sub)
plasmid_cd.variable_features.append(part_var)
part_var.variant_derivations.append(composite_part)
if final_product:
final_products.members.append(plasmid_cd)
else:
if len(backbones) == 1:
logging.debug(f'Embedding part "{composite_part.name}" in plasmid backbone "{backbone_or_locus}"')
plasmid = sbol3.Component(display_id, sbol3.SBO_DNA, name=name)
document.add(plasmid)
part_sub = sbol3.SubComponent(composite_part)
plasmid.features.append(part_sub)
if final_product:
final_products.members += {plasmid}
else:
logging.debug(f'Embedding part "{composite_part.name}" in plasmid library "{backbone_or_locus}"')
plasmid = sbol3.Component(f'{display_id}_template', sbol3.SBO_DNA)
document.add(plasmid)
part_sub = sbol3.SubComponent(composite_part)
plasmid.features.append(part_sub)
plasmid_cd = sbol3.CombinatorialDerivation(display_id, plasmid, name=name)
document.add(plasmid_cd)
if final_product:
final_products.members.append(plasmid_cd)
if len(backbones) == 1:
backbone_sub = sbol3.SubComponent(backbones[0])
plasmid.features.append(backbone_sub)
else:
backbone_sub = sbol3.LocalSubComponent([sbol3.SBO_DNA])
backbone_sub.name = "Vector"
plasmid.features.append(backbone_sub)
backbone_var = sbol3.VariableFeature(cardinality=sbol3.SBOL_ONE, variable=backbone_sub)
plasmid_cd.variable_features.append(backbone_var)
backbone_var.variants += backbones
plasmid.constraints.append(sbol3.Constraint(sbol3.SBOL_MEETS, part_sub, backbone_sub))
plasmid.constraints.append(sbol3.Constraint(sbol3.SBOL_MEETS, backbone_sub, part_sub))
def excel_to_sbol(wb: openpyxl.Workbook, config: dict = None) -> sbol3.Document:
"""
Take an open Excel file, return an SBOL document
:param wb: openpyxl pointer to an Excel file
:param config: dictionary of sheet parsing configuration variables
:return: Document containing all SBOL extracted from Excel sheet
"""
config = expand_configuration(config)
doc = sbol3.Document()
logging.info('Reading metadata for collections')
basic_parts, composite_parts, linear_products, final_products, source_table = read_metadata(wb, doc, config)
logging.info('Reading basic parts')
for row in wb[config['basic_sheet']].iter_rows(min_row=config['basic_first_row']):
row_to_basic_part(doc, row, basic_parts, linear_products, final_products, config, source_table)
logging.info(f'Created {len(basic_parts.members)} basic parts')
logging.info('Reading composite parts and libraries')
# first collect all rows with names
pending_parts = [row for row in wb[config['composite_sheet']].iter_rows(min_row=config['composite_first_row'])
if row[config['composite_name_col']].value]
while pending_parts:
ready = [row for row in pending_parts if not unresolved_subparts(doc, row, config)]
if not ready:
raise ValueError("Could not resolve subparts" + ''.join(
(f"\n in '{row[config['composite_name_col']].value}':" +
''.join(f" '{x}'" for x in unresolved_subparts(doc, row, config)))
for row in pending_parts))
for row in ready:
make_composite_part(doc, row, composite_parts, linear_products, final_products, config)
pending_parts = [p for p in pending_parts if p not in ready] # subtract parts from stable list
logging.info(f'Created {len(composite_parts.members)} composite parts or libraries')
logging.info(f'Count {len(basic_parts.members)} basic parts, {len(composite_parts.members)} composites/libraries')
report = doc.validate()
logging.info(f'Validation of document found {len(report.errors)} errors and {len(report.warnings)} warnings')
return doc
def main():
"""
Main wrapper: read from input file, invoke excel_to_sbol, then write to output file
"""
parser = argparse.ArgumentParser()
parser.add_argument('excel_file', help="Excel file used as input")
parser.add_argument('-n', '--namespace', dest='namespace',
help="Namespace for Components in output file")
parser.add_argument('-l', '--local', dest='local', default=None,
help="Local path for Components in output file")
parser.add_argument('-o', '--output', dest='output_file', default='out',
help="Name of SBOL file to be written")
parser.add_argument('-t', '--file-type', dest='file_type', default=sbol3.SORTED_NTRIPLES,
help="Name of SBOL file to output to (excluding type)")
parser.add_argument('--verbose', '-v', dest='verbose', action='count', default=0,
help="Print running explanation of conversion process")
args_dict = vars(parser.parse_args())
# Extract arguments:
verbosity = args_dict['verbose']
log_level = logging.WARN if verbosity == 0 else logging.INFO if verbosity == 1 else logging.DEBUG
logging.getLogger().setLevel(level=log_level)
output_file = args_dict['output_file']
file_type = args_dict['file_type']
excel_file = args_dict['excel_file']
extension = type_to_standard_extension[file_type]
outfile_name = output_file if output_file.endswith(extension) else output_file+extension
sbol3.set_namespace(args_dict['namespace'])
# TODO: unkludge after resolution of https://github.com/SynBioDex/pySBOL3/issues/288
if args_dict['local']:
sbol3.set_namespace(f"{args_dict['namespace']}/{args_dict['local']}")
# Read file, convert, and write resulting document
logging.info('Accessing Excel file '+excel_file)
sbol_document = excel_to_sbol(openpyxl.load_workbook(excel_file, data_only=True))
sbol_document.write(outfile_name, file_type)
logging.info('SBOL file written to '+outfile_name)
if __name__ == '__main__':
main()
| 49.876984 | 164 | 0.691423 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,000 | 0.358024 |
aab1e49d7e867714a47a33900ab3c0e741463041 | 5,921 | py | Python | jsonschema/tests/test_jsonschema_test_suite.py | prdpklyn/greens-jsonschema | 36b9d726733102a2a869e0db8fb351942a159f5c | [
"MIT"
] | null | null | null | jsonschema/tests/test_jsonschema_test_suite.py | prdpklyn/greens-jsonschema | 36b9d726733102a2a869e0db8fb351942a159f5c | [
"MIT"
] | null | null | null | jsonschema/tests/test_jsonschema_test_suite.py | prdpklyn/greens-jsonschema | 36b9d726733102a2a869e0db8fb351942a159f5c | [
"MIT"
] | null | null | null | """
Test runner for the JSON Schema official test suite
Tests comprehensive correctness of each draft's validator.
See https://github.com/json-schema-org/JSON-Schema-Test-Suite for details.
"""
import sys
from jsonschema import (
Draft3Validator,
Draft4Validator,
Draft6Validator,
Draft7Validator,
draft3_format_checker,
draft4_format_checker,
draft6_format_checker,
draft7_format_checker,
)
from jsonschema.tests._suite import Suite
from jsonschema.validators import _DEPRECATED_DEFAULT_TYPES, create
SUITE = Suite()
DRAFT3 = SUITE.version(name="draft3")
DRAFT4 = SUITE.version(name="draft4")
DRAFT6 = SUITE.version(name="draft6")
DRAFT7 = SUITE.version(name="draft7")
def skip_tests_containing_descriptions(**kwargs):
def skipper(test):
descriptions_and_reasons = kwargs.get(test.subject, {})
return next(
(
reason
for description, reason in descriptions_and_reasons.items()
if description in test.description
),
None,
)
return skipper
def missing_format(checker):
def missing_format(test):
schema = test.schema
if schema is True or schema is False or "format" not in schema:
return
if schema["format"] not in checker.checkers:
return "Format checker {0!r} not found.".format(schema["format"])
return missing_format
is_narrow_build = sys.maxunicode == 2 ** 16 - 1
if is_narrow_build: # pragma: no cover
narrow_unicode_build = skip_tests_containing_descriptions(
maxLength={
"supplementary Unicode":
"Not running surrogate Unicode case, this Python is narrow.",
},
minLength={
"supplementary Unicode":
"Not running surrogate Unicode case, this Python is narrow.",
},
)
else:
def narrow_unicode_build(test): # pragma: no cover
return
TestDraft3 = DRAFT3.to_unittest_testcase(
DRAFT3.tests(),
DRAFT3.optional_tests_of(name="format"),
DRAFT3.optional_tests_of(name="bignum"),
DRAFT3.optional_tests_of(name="zeroTerminatedFloats"),
Validator=Draft3Validator,
format_checker=draft3_format_checker,
skip=lambda test: (
narrow_unicode_build(test) or
missing_format(draft3_format_checker)(test) or
skip_tests_containing_descriptions(
format={
"case-insensitive T and Z": "Upstream bug in strict_rfc3339",
},
)(test)
),
)
TestDraft4 = DRAFT4.to_unittest_testcase(
DRAFT4.tests(),
DRAFT4.optional_tests_of(name="format"),
DRAFT4.optional_tests_of(name="bignum"),
DRAFT4.optional_tests_of(name="zeroTerminatedFloats"),
Validator=Draft4Validator,
format_checker=draft4_format_checker,
skip=lambda test: (
narrow_unicode_build(test) or
missing_format(draft4_format_checker)(test) or
skip_tests_containing_descriptions(
format={
"case-insensitive T and Z": "Upstream bug in strict_rfc3339",
},
ref={
"valid tree": "An actual bug, this needs fixing.",
},
refRemote={
"number is valid": "An actual bug, this needs fixing.",
"string is invalid": "An actual bug, this needs fixing.",
},
)(test)
),
)
TestDraft6 = DRAFT6.to_unittest_testcase(
DRAFT6.tests(),
DRAFT6.optional_tests_of(name="format"),
DRAFT6.optional_tests_of(name="bignum"),
DRAFT6.optional_tests_of(name="zeroTerminatedFloats"),
Validator=Draft6Validator,
format_checker=draft6_format_checker,
skip=lambda test: (
narrow_unicode_build(test) or
missing_format(draft6_format_checker)(test) or
skip_tests_containing_descriptions(
format={
"case-insensitive T and Z": "Upstream bug in strict_rfc3339",
},
ref={
"valid tree": "An actual bug, this needs fixing.",
},
refRemote={
"number is valid": "An actual bug, this needs fixing.",
"string is invalid": "An actual bug, this needs fixing.",
},
)(test)
),
)
TestDraft7 = DRAFT7.to_unittest_testcase(
DRAFT7.tests(),
DRAFT7.format_tests(),
DRAFT7.optional_tests_of(name="bignum"),
DRAFT7.optional_tests_of(name="zeroTerminatedFloats"),
Validator=Draft7Validator,
format_checker=draft7_format_checker,
skip=lambda test: (
narrow_unicode_build(test) or
missing_format(draft7_format_checker)(test) or
skip_tests_containing_descriptions(
format={
"case-insensitive T and Z": "Upstream bug in strict_rfc3339",
},
ref={
"valid tree": "An actual bug, this needs fixing.",
},
refRemote={
"number is valid": "An actual bug, this needs fixing.",
"string is invalid": "An actual bug, this needs fixing.",
},
)(test)
),
)
TestDraft3LegacyTypeCheck = DRAFT3.to_unittest_testcase(
DRAFT3.tests_of(name="type"),
name="TestDraft3LegacyTypeCheck",
skip=skip_tests_containing_descriptions(
type={
"any": "Interestingly this couldn't really be done w/the old API.",
},
),
Validator=create(
meta_schema=Draft3Validator.META_SCHEMA,
validators=Draft3Validator.VALIDATORS,
default_types=_DEPRECATED_DEFAULT_TYPES,
),
)
TestDraft4LegacyTypeCheck = DRAFT4.to_unittest_testcase(
DRAFT4.tests_of(name="type"),
name="TestDraft4LegacyTypeCheck",
Validator=create(
meta_schema=Draft4Validator.META_SCHEMA,
validators=Draft4Validator.VALIDATORS,
default_types=_DEPRECATED_DEFAULT_TYPES,
),
)
| 30.209184 | 79 | 0.636717 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,451 | 0.24506 |
aab254d87e54d35e023480f5b36f3f53979b98fb | 8,635 | py | Python | signalworks/tracking/multitrack.py | lxkain/tracking | 00ed9a0b31c4880687a42df3bf9651e68e0c4360 | [
"MIT"
] | 2 | 2019-04-09T17:28:34.000Z | 2019-06-05T10:05:11.000Z | signalworks/tracking/multitrack.py | lxkain/tracking | 00ed9a0b31c4880687a42df3bf9651e68e0c4360 | [
"MIT"
] | 11 | 2019-04-19T23:03:38.000Z | 2019-11-22T17:59:07.000Z | signalworks/tracking/multitrack.py | lxkain/tracking | 00ed9a0b31c4880687a42df3bf9651e68e0c4360 | [
"MIT"
] | 3 | 2019-05-01T16:02:32.000Z | 2019-06-25T18:05:39.000Z | import copy
import json
import os
from collections import UserDict
from signalworks.tracking import Event, Partition, TimeValue, Value, Wave
class MultiTrack(UserDict):
"""
A dictionary containing time-synchronous tracks of equal duration and fs
"""
def __init__(self, mapping=None):
if mapping is None:
mapping = UserDict()
UserDict.__init__(self, mapping)
if __debug__: # long assert - TODO: do this on mapping, and then assign
self.check()
def check(self):
if len(self) > 1:
for i, (key, track) in enumerate(self.items()):
if track.fs != self.fs:
raise AssertionError(
f"all fs' must be equal, track #{i} ('{key}) does not match track #1"
)
if track.duration != next(iter(self.values())).duration:
raise AssertionError(
f"all durations must be equal, track #{i} ('{key}'') does not match track #1"
)
def get_fs(self):
if len(self):
return next(iter(self.values())).fs
else:
return 0 # or raise?
def set_fs(self, fs):
raise Exception("Cannot change fs, try resample()")
fs = property(get_fs, set_fs, doc="sampling frequency")
def get_duration(self):
if len(self):
if __debug__: # long assert - TODO: do this on mapping, and then assign
self.check()
return next(iter(self.values())).duration
else:
return 0
def set_duration(self, duration):
raise Exception("The duration cannot be set, it is derived from its conents")
duration = property(
get_duration, set_duration, doc="duration, as defined by its content"
)
def __eq__(self, other):
# excluding wav from comparison as long as wav writing/reading is erroneous
if (set(self.keys()) - {"wav"}) != (set(other.keys()) - {"wav"}):
return False
for k in self.keys():
if k != "wav" and self[k] != other[k]:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __setitem__(self, key, value):
if len(self):
if value.duration != self.duration:
raise AssertionError("duration does not match")
if value.fs != self.fs:
raise AssertionError("fs does not match")
UserDict.__setitem__(self, key, value)
def __str__(self):
s = ""
for key, track in self.items():
s += "%s: %s\n" % (key, track)
return s
def __add__(self, other):
if self is other:
other = copy.deepcopy(other)
obj = type(self)()
for k in self: # .iterkeys():
obj[k] = self[k] + other[k]
return obj
def resample(self, fs):
multiTrack = type(self)()
for key, track in self.items():
multiTrack[key] = track.resample(fs)
return multiTrack
def crossfade(self, other, length):
"""
append multiTrack to self, using a crossfade of a specified length in samples
"""
assert type(self) == type(other)
assert self.keys() == other.keys()
assert self.fs == other.fs
assert isinstance(length, int)
assert length > 0
assert other.duration >= length
assert self.duration >= length
multiTrack = type(self)()
for key, _ in self.items():
multiTrack[key] = self[key].crossfade(other[key], length)
return multiTrack
def select(self, a, b, keys=None):
assert a >= 0
assert a < b # or a <= b?
assert b <= self.duration
"""return a new multitrack object with all track views from time a to b"""
if keys is None:
keys = self.keys()
multiTrack = type(self)()
for key in keys:
multiTrack[key] = self[key].select(a, b)
return multiTrack
# TODO: should this be deprecated in favor of / should this call - the more general time_warp function?
def scale_duration(self, factor):
if factor != 1:
for t in self.values():
if isinstance(t, Partition):
t.time *= (
factor
) # last time parameter IS duration, so no worries about duration
elif isinstance(t, TimeValue) or isinstance(t, Event):
if factor > 1: # make room for expanded times
t.duration = int(t.duration * factor)
t.time *= factor
else:
t.time *= factor
t.duration = int(t.duration * factor)
else:
raise NotImplementedError # wave?
def time_warp(self, x, y):
"""in-place"""
for track in iter(self.values()):
track.time_warp(x, y)
default_suffix = ".mtt"
@classmethod
def read(cls, name):
"""Loads info about stored tracks from name, adding extension if missing,
and loads tracks by calling read(<name without extension>) for them.
"""
name_wo_ext = os.path.splitext(name)[
0
] # TODO: upgrade all path stuff to pathlib
if name == name_wo_ext:
name += cls.default_suffix
with open(name, "rb") as mtt_file:
track_infos = json.load(mtt_file)
self = cls()
for track_type_name, track_info_list in track_infos:
track_type = globals()[track_type_name]
track_info: UserDict = UserDict(track_info_list)
track = track_type.read(name_wo_ext, **track_info)
self[track_info["track_name"]] = track
return self
@classmethod
def read_edf(cls, path):
raise NotImplementedError
# TODO: adapt
# the following is copied from elsewhere and won't work as is
import pyedflib
with pyedflib.EdfReader(str(path)) as f:
labels = f.getSignalLabels()
for label in labels:
index = labels.index(label)
wav = Wave(f.readSignal(index), f.getSampleFrequency(index))
wav.label = label
wav.path = f.with_name(f.stem + "-" + label + ".wav")
wav.min = f.getPhysicalMinimum(index)
wav.max = f.getPhysicalMaximum(index)
wav.unit = f.getPhysicalDimension(index)
# self.add_view(wav, panel_index=panel_index, y_min=wav.min, y_max=wav.max)
@classmethod
def read_xdf(cls, path):
raise NotImplementedError
import openxdf
# TODO: below is a place holder and needs to be finalize
xdf = openxdf.OpenXDF(path)
signals = openxdf.Signal(xdf, path.with_suffix(".nkamp"))
# TODO: automate this, why are the xdf.header names different from signals.list_channels?
for label in ["ECG", "Chin"]:
# logger.info(f'reading {label} channel')
sig = signals.read_file(label)[label]
wav = Wave(sig.ravel(), 200)
wav.label = label
# wav.path = file.with_name(file.stem + '-' + label + '.wav')
wav.min = -3200
wav.max = 3200
wav.unit = "1"
# self.add_view(wav, panel_index=panel_index, y_min=wav.min, y_max=wav.max)
def write(self, name):
"""Saves info about stored tracks to name, adding extension if missing,
and calls write(<name without extension>) for the contained tracks.
Note!: not saving wav as long as wav writing/reading is erroneous
"""
name_wo_ext = os.path.splitext(name)[0]
if name == name_wo_ext:
name += self.default_suffix
track_infos = [] # list of dicts storing track info
for track_name, track in sorted(self.items()):
if track_name == "wav":
continue
track_info = {
"track_name": track_name,
"fs": int(track.get_fs()),
"duration": int(track.get_duration()),
}
if type(track) == Value:
track_info.update({"value_type": type(track.get_value()).__name__})
track.write(name_wo_ext, **track_info)
track_infos.append((type(track).__name__, sorted(track_info.items())))
with open(name, "wt") as mtt_file:
json.dump(track_infos, mtt_file)
| 36.901709 | 107 | 0.555067 | 8,490 | 0.983208 | 0 | 0 | 2,428 | 0.281181 | 0 | 0 | 2,115 | 0.244933 |
aab47503ee8d0e3164856b1141204d18fa2f42fa | 41 | py | Python | fixture/__init__.py | hippa777/python_training | 568c12e1a21c3c7eb40a1af25a9db83690a1b26d | [
"Apache-2.0"
] | null | null | null | fixture/__init__.py | hippa777/python_training | 568c12e1a21c3c7eb40a1af25a9db83690a1b26d | [
"Apache-2.0"
] | null | null | null | fixture/__init__.py | hippa777/python_training | 568c12e1a21c3c7eb40a1af25a9db83690a1b26d | [
"Apache-2.0"
] | null | null | null | from .contact_helper import ContactHelper | 41 | 41 | 0.902439 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
aab4c92f391c8df0afca6e908ce237106398f88f | 1,676 | py | Python | blur.py | yasue32/afm-denoise | 5342578fada8a6ce68a507afbbbd82f367760366 | [
"MIT"
] | 1 | 2022-03-10T09:06:52.000Z | 2022-03-10T09:06:52.000Z | blur.py | yasue32/afm-denoise | 5342578fada8a6ce68a507afbbbd82f367760366 | [
"MIT"
] | null | null | null | blur.py | yasue32/afm-denoise | 5342578fada8a6ce68a507afbbbd82f367760366 | [
"MIT"
] | null | null | null | import cv2
import matplotlib.pyplot as plt
import glob
import os
filepath ="afm_dataset4/20211126/"
files = [line.rstrip() for line in open((filepath+"sep_trainlist.txt"))]
files = glob.glob("orig_img/20211112/*")
def variance_of_laplacian(image):
# compute the Laplacian of the image and then return the focus
# measure, which is simply the variance of the Laplacian
return cv2.Laplacian(image, cv2.CV_64F).var()
gt_fm = []
input_fm = []
for i, file in enumerate(files):
image = cv2.imread(file)
# image = cv2.imread(filepath + file)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
fm = variance_of_laplacian(image)
input_fm.append(fm)
# file_gt = "/".join(file.split("/")[:-1] + ["gt.png"])
# image = cv2.imread(filepath + file_gt)
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# fm = variance_of_laplacian(image)
# gt_fm.append(fm)
# if (i+1)%25==0:
if fm < 500:
text = "Blurry"
elif fm>2000:
text = "Noisy"
else:
text = "Not blurry"
# show the image
os.makedirs("blur/"+file[:-9], exist_ok=True)
cv2.putText(image, "{}: {:.2f}".format(text, fm), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 1)
cv2.imwrite("blur/"+file, image)
# fig = plt.figure()
# plt.imshow(image)
# fig.savefig("blur/"+file)
print("iter", i)
# print("gt:", sum(gt_fm)/len(gt_fm))
print("input:", sum(input_fm)/len(input_fm))
fig = plt.figure()
plt.scatter(list(range(len(input_fm))), input_fm)
# plt.scatter(list(range(len(gt_fm))), gt_fm)
fig.savefig("img_1126.png")
# print("gt:", sum(gt_fm)/len(gt_fm))
# print("input:", sum(input_fm)/len(input_fm))
# print(len(gt_fm)) | 29.403509 | 110 | 0.648568 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 779 | 0.464797 |
aab5358c186091fffe57d4f1b00d5fec32b9e7f8 | 139 | py | Python | python/component/base/utils.py | ModerateFish/component | 13e778648e7ff3132e30543b0aa9a436ee286f99 | [
"Apache-2.0"
] | null | null | null | python/component/base/utils.py | ModerateFish/component | 13e778648e7ff3132e30543b0aa9a436ee286f99 | [
"Apache-2.0"
] | null | null | null | python/component/base/utils.py | ModerateFish/component | 13e778648e7ff3132e30543b0aa9a436ee286f99 | [
"Apache-2.0"
] | null | null | null | import os
def check_path(path):
if not path or not path.strip() or os.path.exists(path):
return
os.makedirs(path)
pass | 19.857143 | 60 | 0.647482 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
aab57c61fdbffbd48b08ceac3432b1c6895bbeba | 1,027 | py | Python | python/solutii/ingrid_stoleru/Cursor.py | broascaiulian/labs | 068c7f440c7a29cb6a3e1dbb8e4bb7dfaff5a050 | [
"MIT"
] | null | null | null | python/solutii/ingrid_stoleru/Cursor.py | broascaiulian/labs | 068c7f440c7a29cb6a3e1dbb8e4bb7dfaff5a050 | [
"MIT"
] | null | null | null | python/solutii/ingrid_stoleru/Cursor.py | broascaiulian/labs | 068c7f440c7a29cb6a3e1dbb8e4bb7dfaff5a050 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# *-* coding: UTF-8 *-*
"""Solutia problemei Cursor"""
DIRECTIONS = {" stanga ": [-1, 0], " dreapta ": [1, 0],
" jos ": [0, -1], " sus ": [0, 1]}
def distanta(string, pozitie):
"""Determinarea distantei"""
directie, valoare = string.split()
directie = directie.lower()
if directie in DIRECTIONS:
directie = DIRECTIONS[directie]
pozitie[0] = pozitie[0]+directie[0]*int(valoare)
pozitie[1] = pozitie[1]+directie[1]*int(valoare)
def main():
"""Apelarea functiei"""
try:
fisier = open("Cursor_Date", "r")
mesaje = fisier.read()
fisier.close()
except IOError:
print "Nu am putut obține coordonatele."
return
pozitie = [0, 0]
for linie in mesaje.splitlines():
if linie:
distanta(linie, pozitie)
print pozitie
rezultat = (pozitie[0]**2 + pozitie[1]**2) ** 0.5
print rezultat
if __name__ == "__main__":
main()
| 26.333333 | 57 | 0.542356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 223 | 0.216926 |
aab5dd1420b09051fc9fe578384a7add1adbe417 | 1,932 | py | Python | hoods/models.py | badruu/neighborhood | 85d30f7451f921c533dc4463aad76ed2d39f8023 | [
"MIT"
] | null | null | null | hoods/models.py | badruu/neighborhood | 85d30f7451f921c533dc4463aad76ed2d39f8023 | [
"MIT"
] | 6 | 2021-03-19T01:10:18.000Z | 2022-03-11T23:49:18.000Z | hoods/models.py | badruu/neighborhood | 85d30f7451f921c533dc4463aad76ed2d39f8023 | [
"MIT"
] | null | null | null | from django.db import models
import datetime
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
from django.core.validators import MaxValueValidator, MinValueValidator
class Hoods(models.Model):
name = models.CharField(max_length = 100)
location = models.CharField(max_length = 100)
image = models.ImageField(upload_to = 'images/', default = 'default.jpg')
description = models.TextField(max_length = 300, default = 'No description')
population = models.IntegerField(default = '0')
admin = models.ForeignKey(User, on_delete = models.CASCADE)
timestamp = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.name
def create_hood(self):
self.save()
def delete_hood(self):
self.delete()
def find_neighbourhood(hoods_id):
neighbourhood = Hoods.objects.get(id = hoods_id)
return neighbourhood
def update_hood(self, item, value):
self.update(item = value)
def update_occupants(self, value):
self.update(population = value)
class Business(models.Model):
name = models.CharField(max_length = 100)
user = models.ForeignKey(User, on_delete = models.CASCADE)
hood_id = models.ForeignKey(Hoods, on_delete = models.CASCADE)
email_address = models.EmailField(max_length=254)
timestamp = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.name
def create_business(self):
self.save()
def delete_business(self):
self.delete()
def find_business(business_id):
business = Business.objects.get(id = business_id)
return business
def update_business(self, item, value):
self.update(item = value)
@classmethod
def search_business(cls, name):
businesses = cls.objects.filter(name__icontains=name).all()
return businesses | 31.16129 | 80 | 0.699793 | 1,702 | 0.880952 | 0 | 0 | 142 | 0.073499 | 0 | 0 | 41 | 0.021222 |
aab613077debdcab1c40bda4b0c2298ea8cef417 | 5,710 | py | Python | src/clf_comparison.py | UBC-MDS/DSCI522_group17 | 7be4df0d09258a8b021f61e0d7a35022f49a2fdd | [
"MIT"
] | 1 | 2020-12-07T19:52:28.000Z | 2020-12-07T19:52:28.000Z | src/clf_comparison.py | UBC-MDS/DSCI522_group17 | 7be4df0d09258a8b021f61e0d7a35022f49a2fdd | [
"MIT"
] | 14 | 2020-11-18T10:59:07.000Z | 2020-12-14T23:49:56.000Z | src/clf_comparison.py | UBC-MDS/DSCI522_group17 | 7be4df0d09258a8b021f61e0d7a35022f49a2fdd | [
"MIT"
] | 3 | 2020-11-18T10:04:37.000Z | 2020-11-20T08:31:14.000Z | # Author: Pan Fan, Chun Chieh Chang, Sakshi Jain
# Date: 2020/11/27
"""Compare the performance of different classifier and train the best model given cross_validate results .
Usage: src/clf_comparison.py <input_file> <input_file1> <output_file> <output_file1>
Options:
<input_file> Path (including filename and file extension) to transformed train file
<input_file1> Path (including filename and file extension) to transformed test file
<output_file> Path (including filename and file extension) to cross validate result file
<output_file1> Path (including filename and file extension) to store untuned model predictions
"""
#import packages
from docopt import docopt
import pandas as pd
import sys
import os
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import (
cross_validate,
GridSearchCV,
RandomizedSearchCV
)
from joblib import dump, load
from sklearn.metrics import f1_score, make_scorer
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
opt = docopt(__doc__)
def main(input_file, input_file1, output_file, output_file1):
# read train_df.csv
train = pd.read_csv(input_file)
test = pd.read_csv(input_file1)
# create split the train_df
X_train, y_train = train.drop(columns=["quality_level"]), train["quality_level"]
X_test, y_test = test.drop(columns=["quality_level"]), test["quality_level"]
# check if target folder exists
try:
os.makedirs(os.path.dirname(output_file))
except FileExistsError:
pass
# define classifiers
classifiers = {
"Logistic_Regression": LogisticRegression(random_state = 123, class_weight = 'balanced'),
"Random_Forest": RandomForestClassifier(random_state = 123, class_weight = 'balanced'),
"DummyClassifier": DummyClassifier(random_state = 123),
"SVC" : SVC(random_state = 123, class_weight = 'balanced'),
"K_Nearest_Neighbors": KNeighborsClassifier()
}
f1 = make_scorer(f1_score, average = 'weighted', labels = ['Excellent'])
def score_with_metrics(models, scoring=f1):
"""
Return cross-validation scores for given models as a dataframe.
Parameters
----------
models : dict
a dictionary with names and scikit-learn models
scoring : list/dict/string
scoring parameter values for cross-validation
Returns
----------
None
"""
results_df = {}
for (name, model) in models.items():
clf = model
scores = cross_validate(
clf, X_train, y_train, return_train_score=True, scoring=scoring
)
df = pd.DataFrame(scores)
results_df[name] = df.mean()
clf.fit(X_train, y_train)
# save the model
dump(clf, 'results/'+name+'.joblib')
return pd.DataFrame(results_df)
res = score_with_metrics(classifiers)
res = res.transpose()
best_model = res.idxmax()['test_score']
best_clf = classifiers[best_model]
best_clf.fit(X_train, y_train)
pred = best_clf.predict(X_test)
test_scores = f1_score(y_test, pred, average = 'weighted', labels = ['Excellent'])
best_score = pd.DataFrame({'Model': [best_model], 'Test_Score':[test_scores]})
res.to_csv(output_file, index = True)
best_score.to_csv(output_file1, index = False)
# perform hyperparameter tuning on two of the best models
param_RF = {'n_estimators':[int(i) for i in np.linspace(start = 100, stop = 1000, num = 10).tolist()],
'max_depth':[int(i) for i in np.linspace(start = 10, stop = 1000, num = 100).tolist()]}
param_log = {
"C": [0.0001, 0.001, 0.01, 0.1, 1.0, 10, 100, 1000]}
rf_search = RandomizedSearchCV(classifiers['Random_Forest'],
param_RF, cv = 5,
n_jobs = -1,
scoring = f1,
n_iter = 20, random_state = 123)
log_search = GridSearchCV(classifiers['Logistic_Regression'],
param_log, cv = 5,
n_jobs = -1,
scoring = f1
)
rf_search.fit(X_train, y_train)
log_search.fit(X_train, y_train)
rf_best = rf_search.best_estimator_
log_best = log_search.best_estimator_
tuned_results = {}
rf_score = cross_validate(rf_best, X_train, y_train, return_train_score=True, scoring=f1)
log_score = cross_validate(log_best, X_train, y_train, return_train_score=True, scoring=f1)
tuned_results['Random Forest'] = pd.DataFrame(rf_score).mean()
tuned_results['Logistic Regression'] = pd.DataFrame(log_score).mean()
tuned_results = pd.DataFrame(tuned_results).transpose()
tuned_results.to_csv('results/tuned_cv_results.csv', index = True)
rf_best.fit(X_train, y_train)
dump(rf_best, 'results/Bestrfmodel.joblib')
pred = rf_best.predict(X_test)
best_f1 = f1_score(y_test, pred, average = 'weighted', labels = ['Excellent'])
best_tuned_model_test = pd.DataFrame({'Model': ['Random Forest'], 'Test_Score':[best_f1]})
best_tuned_model_test.to_csv('results/best_tuned_model.csv', index = False)
if __name__ == "__main__":
main(opt["<input_file>"], opt["<input_file1>"], opt["<output_file>"], opt["<output_file1>"])
| 37.565789 | 106 | 0.648511 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,758 | 0.307881 |
aab7ca71f6747c996ae533f8dc5d23355cbe2498 | 1,294 | py | Python | hebbmodel/fc.py | aimir-lab/hebbian-learning-cnn | ddaf3a66c1c374960dc680f671e64f3f20387590 | [
"MIT"
] | 18 | 2019-09-13T10:19:11.000Z | 2021-11-13T22:05:06.000Z | hebbmodel/fc.py | GabrieleLagani/HebbianLearningThesis | 0f98f7a3e380e55c9fca6340f4fb0cc5f24917d8 | [
"MIT"
] | null | null | null | hebbmodel/fc.py | GabrieleLagani/HebbianLearningThesis | 0f98f7a3e380e55c9fca6340f4fb0cc5f24917d8 | [
"MIT"
] | 5 | 2019-11-24T08:16:14.000Z | 2021-02-15T11:41:18.000Z | import torch.nn as nn
import params as P
import hebbmodel.hebb as H
class Net(nn.Module):
# Layer names
FC = 'fc'
CLASS_SCORES = FC # Symbolic name of the layer providing the class scores as output
def __init__(self, input_shape=P.INPUT_SHAPE):
super(Net, self).__init__()
# Shape of the tensors that we expect to receive as input
self.input_shape = input_shape
if len(input_shape) != 3: self.input_shape = (input_shape[0], 1, 1)
# Here we define the layers of our network
# FC Layers
self.fc = H.HebbianMap2d(
in_channels=self.input_shape[0],
out_size=P.NUM_CLASSES,
kernel_size=(self.input_shape[1], self.input_shape[2]),
competitive=False,
eta=0.1,
) # conv kernels with the same height, width depth as input (equivalent to a FC layer), 10 kernels (one per class)
# Here we define the flow of information through the network
def forward(self, x):
out = {}
# Linear FC layer, outputs are the class scores
fc_out = self.fc(x.view(-1, *self.input_shape)).view(-1, P.NUM_CLASSES)
# Build dictionary containing outputs from convolutional and FC layers
out[self.FC] = fc_out
return out
# Function for setting teacher signal for supervised hebbian learning
def set_teacher_signal(self, y):
self.fc.set_teacher_signal(y)
| 30.093023 | 117 | 0.718702 | 1,223 | 0.945131 | 0 | 0 | 0 | 0 | 0 | 0 | 550 | 0.425039 |
aab8ee0d00657fec39263780f21c6f66db24843f | 275 | py | Python | pure_sklearn/feature_extraction/__init__.py | ashetty1-m/pure-predict | 05a0f105fb43532af1a0713dc34b26574d51b563 | [
"Apache-2.0"
] | 62 | 2020-02-14T15:54:12.000Z | 2021-11-23T14:12:32.000Z | pure_sklearn/feature_extraction/__init__.py | ashetty1-m/pure-predict | 05a0f105fb43532af1a0713dc34b26574d51b563 | [
"Apache-2.0"
] | 9 | 2020-04-05T16:19:33.000Z | 2022-02-08T14:54:56.000Z | pure_sklearn/feature_extraction/__init__.py | ashetty1-m/pure-predict | 05a0f105fb43532af1a0713dc34b26574d51b563 | [
"Apache-2.0"
] | 5 | 2021-02-26T14:04:17.000Z | 2022-02-10T23:06:16.000Z | """
The :mod:`pure_sklearn.feature_extraction` module deals with feature extraction
from raw data. It currently includes methods to extract features from text.
"""
from ._dict_vectorizer import DictVectorizerPure
from . import text
__all__ = ["DictVectorizerPure", "text"]
| 27.5 | 79 | 0.789091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 189 | 0.687273 |
aab991f211f7427de19a3a6c9a2b406d03220528 | 3,887 | py | Python | plugin.video.mrstealth.serialu.net/uppod.py | mrstealth/kodi-isengard | 2f37ba5320c1618fbe635f5683e7329a63195c16 | [
"MIT"
] | null | null | null | plugin.video.mrstealth.serialu.net/uppod.py | mrstealth/kodi-isengard | 2f37ba5320c1618fbe635f5683e7329a63195c16 | [
"MIT"
] | null | null | null | plugin.video.mrstealth.serialu.net/uppod.py | mrstealth/kodi-isengard | 2f37ba5320c1618fbe635f5683e7329a63195c16 | [
"MIT"
] | null | null | null | #-------------------------------------------------------------------------------
# Uppod decoder
#-------------------------------------------------------------------------------
import urllib2
import cookielib
def decode(param):
try:
#-- define variables
loc_3 = [0,0,0,0]
loc_4 = [0,0,0]
loc_2 = ''
#-- define hash parameters for decoding
dec = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='
hash1 = ["0", "5", "u", "w", "6", "n", "H", "o", "B", "p", "N", "M", "D", "R", "z", "G", "V", "e", "i", "3", "m", "W", "U", "7", "g", "="]
hash2 = ["c", "T", "I", "4", "Q", "Z", "v", "Y", "y", "X", "k", "b", "8", "a", "J", "d", "1", "x", "L", "t", "l", "2", "f", "s", "9", "h"]
#-- decode
for i in range(0, len(hash1)):
re1 = hash1[i]
re2 = hash2[i]
param = param.replace(re1, '___')
param = param.replace(re2, re1)
param = param.replace('___', re2)
i = 0
while i < len(param):
j = 0
while j < 4 and i+j < len(param):
loc_3[j] = dec.find(param[i+j])
j = j + 1
loc_4[0] = (loc_3[0] << 2) + ((loc_3[1] & 48) >> 4);
loc_4[1] = ((loc_3[1] & 15) << 4) + ((loc_3[2] & 60) >> 2);
loc_4[2] = ((loc_3[2] & 3) << 6) + loc_3[3];
j = 0
while j < 3:
if loc_3[j + 1] == 64 or loc_4[j] == 0:
break
loc_2 += unichr(loc_4[j])
j = j + 1
i = i + 4;
except:
loc_2 = ''
return loc_2
def decodeSourceURL(uhash):
print "*** Got uppod uhash: %s" % uhash
return decode(uhash)
def getDecodedHashFromSourceURL(url, referer):
print "*** Decoded source URL: %s" % url
# NOTE: set cookie
cj = cookielib.MozillaCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
# Accept text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
# Accept-Encoding gzip, deflate
# Accept-Language de-de,de;q=0.8,en-us;q=0.5,en;q=0.3
# Connection keep-alive
# Cookie SERIALU=cd640e59142f39cc54ed65461dd60e10; MarketGidStorage=%7B%220%22%3A%7B%22svspr%22%3A%22%22%2C%22svsds%22%3A3%2C%22TejndEEDj%22%3A%22MTM4MDU1NzM0NTY2NTQ0OTk0NTMz%22%7D%2C%22C44994%22%3A%7B%22page%22%3A3%2C%22time%22%3A1380557356398%7D%7D; amcu_n=2; advmaker_pop=1
# DNT 1
# Host serialu.net
# Referer http://serialu.net/media/stil-nov/uppod.swf
# User-Agent Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:24.0) Gecko/20100101 Firefox/24.0
request = urllib2.Request(url, None)
request.add_header('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')
request.add_header('Accept-Encoding', 'gzip, deflate')
request.add_header('Accept-Language', 'de-de,de;q=0.8,en-us;q=0.5,en;q=0.3')
request.add_header('Connection', 'keep-alive')
# request.add_header('Cookie', 'SERIALU=cd640e59142f39cc54ed65461dd60e10; MarketGidStorage=%7B%220%22%3A%7B%22svspr%22%3A%22%22%2C%22svsds%22%3A3%2C%22TejndEEDj%22%3A%22MTM4MDU1NzM0NTY2NTQ0OTk0NTMz%22%7D%2C%22C44994%22%3A%7B%22page%22%3A3%2C%22time%22%3A1380557356398%7D%7D; amcu_n=2; advmaker_pop=1')
request.add_header('DNT', 1)
request.add_header('Host', 'serialu.net')
request.add_header('Referer', 'http://serialu.net/media/stil-nov/uppod.swf')
request.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:24.0) Gecko/20100101 Firefox/24.0')
return urllib2.urlopen(request).read()
| 44.170455 | 306 | 0.516337 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,860 | 0.478272 |
aabb5248f82bb100273f378ab5c8a89f7d30d6fb | 11,796 | py | Python | Eir/DTMC/spatialModel/randomMovement/randMoveSIRDV.py | mjacob1002/Eir | ab9cb4e353796ba3ab79b1673adc251d434717cf | [
"MIT"
] | 35 | 2021-06-11T02:33:05.000Z | 2021-12-11T06:24:17.000Z | Eir/DTMC/spatialModel/randomMovement/randMoveSIRDV.py | mjacob1002/Eir | ab9cb4e353796ba3ab79b1673adc251d434717cf | [
"MIT"
] | 2 | 2021-05-18T09:24:37.000Z | 2021-06-02T13:27:41.000Z | Eir/DTMC/spatialModel/randomMovement/randMoveSIRDV.py | mjacob1002/Eir | ab9cb4e353796ba3ab79b1673adc251d434717cf | [
"MIT"
] | 8 | 2021-07-03T12:15:30.000Z | 2021-10-31T20:20:29.000Z | import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
from Eir.DTMC.spatialModel.randomMovement.randMoveSIRD import RandMoveSIRD
from Eir.utility import Person1 as Person
class RandMoveSIRDV(RandMoveSIRD):
"""
An SIRDV model that follows the Random Movement Model. When the individuals in the simulation move,
they move according to a randomly generated angle and a randomly generated distance.
Parameters:
----------
S0: int
The starting number of susceptible individuals in the simulation.
I0: int
The starting number of infectious individuals in the simulation.
R0: int
The starting number of recovered individuals in the simulation.
V0: int
The starting number of vaccinated individuals in the simulation.
gamma: float
The recovery probability of an individual going from I -> R.
mu: float
The probability someone dies given that they do not recover in that same time step.
eta: float
The probability that someone goes from S->V, given that the person didn't go from S->E in that same timestep.
planeSize : float
The length of each side of the square plane in which the individuals are confined to. For example,
if planeSize=50, then the region which people in the simulation are confined to is the square with
vertices (0,0), (50,0), (50,50), and (0,50).
move_r: float
The mean of the movement radius of each person in the simulation. Will be used as mean along with
sigma_R as the standard deviation to pull from a normal distribution movement radii each time
_move(day) function is called.
sigma_R: float
The standard deviation of the movement radius of each person in the simulation. Will be used along with
move_R as the mean to pull from a normal distribution movement radii each time _move(day) function is
called.
spread_r: float
The mean of the spreading radius of each person in the simulation. Will be used along with sigma_r
as the standard deviation to pull from an normal distribution spreading radii for each individaul person
when the RandMoveSIS object is initialized.
sigma_r: float
The standard deviation of the spreading radius of each person in the simulation.
Will be used along with spread_r as the mean to pull from an normal distribution spreading radii
for each individaul person when the RandMoveSIS object is initialized.
days: int
The number of days that was simulated.
w0: float optional
The probability of infection if the distance between an infectious person and susceptible person is 0. Default is 1.0.
alpha: float optional
A constant used in the _infect() method. The greater the constant, the greater the infection probability. Default is 2.0.
Attributes
----------
S: ndarray
A numpy array that stores the number of people in the susceptible state on each given day of the simulation.
I: ndarray
A numpy array that stores the number of people in the infected state on each given day of the simulation.
R: ndarray
A numpy array that stores the number of people in the recovered state on each given day of the simulation.
D: ndarray
A numpy array that stores the number of people in the dead state on each given day of the simulation.
V: ndarray
A numpy array that stores the number of people in the vaccinated staet on each given day of the simulation.
popsize: int
The total size of the population in the simulation. Given by S0 + I0 + R0 + V0.
Scollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is SUSCEPTIBLE. Has a total of popsize Person objects,
with numbers [0, popsize).
Icollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is INFECTED. Has a total of popsize Person objects,
with numbers [0, popsize).
Rcollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is RECOVERED. Has a total of popsize Person objects,
with numbers [0, popsize).
Dcollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is DEAD. Has a total of popsize Person objects,
with numbers [0, popsize).
Vcollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is VACCINATED. Has a total of popsize Person objects,
with numbers [0, popsize).
details: Simul_Details
An object that can be returned to give a more in-depth look into the simulation. With this object,
one can see transmission chains, state changes, the movement history of each individaul, the state
history of each person, and more.
"""
def __init__(self, S0, I0, R0, V0, gamma, mu, eta, planeSize, move_r:float, sigma_R:float, spread_r:float, sigma_r: float, days:int, w0=1.0, alpha=2.0, timeDelay=-4):
self.intCheck([S0, I0, R0, V0, days])
self.floatCheck(gamma, mu, eta, planeSize, move_r, sigma_R, spread_r, sigma_r, w0, alpha, timeDelay)
self.negValCheck(S0, I0, R0, V0, gamma, mu, eta, planeSize, move_r, sigma_R, spread_r, sigma_r, days, w0, alpha)
self.probValCheck([gamma, mu, eta, w0])
self.timeDelay = timeDelay
super(RandMoveSIRDV, self).__init__(S0=S0, I0=I0, R0=0, gamma=gamma, mu=mu, planeSize=planeSize, move_r=move_r, sigma_R=sigma_R, spread_r=spread_r, sigma_r=sigma_r,
days=days)
self.eta = eta
self.Dcollect = []
self.Scollect, self.Icollect, self.Rcollect, self.Vcollect = [], [], [], []
spreading_r = np.random.normal(spread_r, sigma_r, S0+I0)
# generate the random x, y locations with every position within the plane being equally likely
loc_x = np.random.random(S0+I0) * planeSize
loc_y = np.random.random(S0+I0) * planeSize
# create the special objects:
for i in range(self.popsize):
# create the person object
# for this model, the people will move with random radius R each timestep
# therefore, the R component can be made 0, as that is only relevant for the
# periodic mobility model
p1 = Person(loc_x[i], loc_y[i], 0, spreading_r[i])
p2 = Person(loc_x[i], loc_y[i], 0, spreading_r[i])
p3 = Person(loc_x[i], loc_y[i], 0, spreading_r[i])
p4 = Person(loc_x[i], loc_y[i], 0, spreading_r[i])
p5 = Person(loc_x[i], loc_y[i], 0, spreading_r[i])
self.details.addLocation(0, (loc_x[i], loc_y[i]))
# if the person is in the susceptible objects created
if i < S0:
p1.isIncluded = True
self.details.addStateChange(i, "S", 0)
elif S0 <= i < S0+I0:
p2.isIncluded = True
self.details.addStateChange(i, "I", 0)
elif i < S0 +I0 + R0:
p3.isIncluded=True
self.details.addStateChange(i, "R", 0)
else:
p4.isIncluded=True
self.details.addStateChange(i, "V", 0)
# append them to the data structure
self.Scollect.append(p1)
self.Icollect.append(p2)
self.Rcollect.append(p3)
self.Vcollect.append(p4)
self.Dcollect.append(p5)
self.details.addLocation(0, (p1.x, p1.y))
self.D = np.zeros(days+1)
self.V = np.zeros(days+1)
self.V[0] = V0
def _StoV(self):
return self._changeHelp(self.Scollect, self.eta)
def run(self, getDetails=True):
"""
Run the actual simulation.
Parameters
----------
getDetails: bool optional
If getDetails=True, then run will return a Simul_Details object which will allow the user to
examine details of the simulation that aren't immediately obvious.
Returns
-------
Simul_Details:
Allows the user to take a deeper look into the dynamics of the simulation by examining transmission
chains. User can also examine transmission history and state changes of individuals in the object
by utilizing the Simul_Details object.
"""
# for all the days in the simulation
for i in range(1, self.days+1):
#print("Day ", i)
#print("Location: (", self.Scollect[0].x, ",", self.Scollect[0].y, ").")
# run the state changes
StoI = self._StoI(i)
StoV = set()
if i > self.timeDelay:
StoV = self._StoV()
ItoR = self._ItoR()
ItoD = self._ItoD()
# change the indices of the transfers
self._stateChanger(StoI, self.Icollect, "I", i)
self._stateChanger(ItoR, self.Rcollect, "R", i)
self._stateChanger(ItoD, self.Dcollect, "D", i)
self._stateChanger(StoV, self.Vcollect, "V", i)
# make everyone move randomly, don't move dead people
self._move(i, [self.Scollect, self.Icollect, self.Rcollect, self.Vcollect])
# change the values in the arrays
self.S[i] = self.S[i-1] - len(StoI) - len(StoV)
self.I[i] = self.I[i-1] + len(StoI) - len(ItoR) - len(ItoD)
self.R[i] = self.R[i-1] + len(ItoR)
self.V[i] = self.V[i-1] + len(StoV)
self.D[i] = self.D[i-1] + len(ItoD)
if getDetails:
return self.details
def toDataFrame(self):
"""
Gives user access to pandas dataframe with amount of people in each state on each day.
Returns
-------
pd.DataFrame
DataFrame object containing the number of susceptibles and number of infecteds on each day.
"""
# create the linspaced numpy array
t = np.linspace(0, self.days, self.days + 1)
# create a 2D array with the days and susceptible and infected arrays
# do it over axis one so that it creates columns days, susceptible, infected
arr = np.stack([t, self.S, self.I, self.R, self.V, self.D], axis=1)
df = pd.DataFrame(arr, columns=["Days", "Susceptible", "Infected", "Recovered", "Vaccinated", "Dead"])
return df
def plot(self):
t = np.linspace(0, self.days, self.days+1)
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(nrows=5, sharex='all')
ax1.plot(t, self.S, label="Susceptible", color='r')
ax1.set_title("Random Movement SIRDV")
ax1.set_ylabel("# Susceptibles")
ax2.plot(t, self.I, label="Infected", color='g')
ax2.set_ylabel("# Active Cases")
ax3.plot(t, self.R, label="Recovered", color='c')
ax3.set_ylabel("# Recovered")
ax4.plot(t, self.V, label="Vaccinated", color='b')
ax4.set_ylabel("# Vaccinated")
ax5.set_xlabel("Days")
ax5.set_ylabel("# Dead")
ax5.plot(t, self.D, label="Dead")
ax1.legend()
ax2.legend()
ax3.legend()
ax4.legend()
ax5.legend()
plt.show()
| 44.014925 | 172 | 0.622753 | 11,579 | 0.981604 | 0 | 0 | 0 | 0 | 0 | 0 | 7,165 | 0.607409 |
aabc2c45a2f070f9b91c1f8410ef7d7691faf98d | 183 | py | Python | localtalk/application.py | mattcollie/LocalTalk | d17765243cd23d09024544a763a18226be16c50c | [
"MIT"
] | null | null | null | localtalk/application.py | mattcollie/LocalTalk | d17765243cd23d09024544a763a18226be16c50c | [
"MIT"
] | null | null | null | localtalk/application.py | mattcollie/LocalTalk | d17765243cd23d09024544a763a18226be16c50c | [
"MIT"
] | null | null | null | from localtalk import create_app, create_server
app = create_app()
server = create_server()
# server.start()
if __name__ == '__main__':
app.run(debug=True, host='localhost')
| 15.25 | 47 | 0.715847 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.202186 |
aabc5d5e01aac743c814d779d94ae7e8069ace0d | 3,087 | py | Python | generate_numpy_data.py | kamleshpawar17/FeTS2021 | 95035ee500721e1c3f9c4b7aed3c105f0d499274 | [
"MIT"
] | 1 | 2022-02-22T00:38:33.000Z | 2022-02-22T00:38:33.000Z | generate_numpy_data.py | kamleshpawar17/FeTS2021 | 95035ee500721e1c3f9c4b7aed3c105f0d499274 | [
"MIT"
] | null | null | null | generate_numpy_data.py | kamleshpawar17/FeTS2021 | 95035ee500721e1c3f9c4b7aed3c105f0d499274 | [
"MIT"
] | null | null | null | import glob
import os
import numpy as np
import nibabel as nb
import argparse
def get_dir_list(train_path):
fnames = glob.glob(train_path)
list_train = []
for k, f in enumerate(fnames):
list_train.append(os.path.split(f)[0])
return list_train
def ParseData(list_data):
'''
Creates a list of all the slices
'''
data_instance = []
for dir_name in list_data:
fname = glob.glob(os.path.join(dir_name, '*seg.nii.gz'))
f = nb.load(fname[0])
img = f.get_fdata().astype('float32')
h, w, d = f.shape # sag, cor, ax
for slc in range(h):
if np.sum(img[slc, :, :]) != 0:
data_instance.append([dir_name, 'sag', slc])
for slc in range(w):
if np.sum(img[:, slc, :]) != 0:
data_instance.append([dir_name, 'cor', slc])
for slc in range(d):
if np.sum(img[:, :, slc]) != 0:
data_instance.append([dir_name, 'ax', slc])
print('Number of images: ', len(data_instance))
return data_instance
def get_slice(dir_name, orient, slc, cont, isNorm=True):
'''
takes the directory name, orientation, slice number and reads a slice, zero pad/crop and normalize
'''
# ---- get slice for given contrast image ---- #
fname = glob.glob(os.path.join(dir_name, cont))
f = nb.load(fname[0])
img = np.squeeze(f.get_fdata()).astype('float32')
if orient == 'sag':
x = img[slc, :, :]
elif orient == 'cor':
x = img[:, slc, :]
else:
x = img[:, :, slc]
return np.expand_dims(x, 0)
def get_batchsize_one(dir_name, orient, slc):
'''
takes index and generates one sample of input data
'''
# ---- get images ---- #
x_t1 = get_slice(dir_name, orient, slc, '*flair.nii.gz')
x_t2 = get_slice(dir_name, orient, slc, '*t1.nii.gz')
x_t1ce = get_slice(dir_name, orient, slc, '*t2.nii.gz')
x_flair = get_slice(dir_name, orient, slc, '*t1ce.nii.gz')
x_seg = get_slice(dir_name, orient, slc, '*seg.nii.gz', isNorm=False).astype('int')
x_seg[x_seg==4] = 3
x_inp = np.concatenate((x_t1, x_t2, x_t1ce, x_flair, x_seg), 0)
# (flair, t1, t2, t1ce)
return x_inp
def generate_data(src_path, dst_path):
data_instance = ParseData(get_dir_list(src_path))
for k, data in enumerate(data_instance):
print(k, ' of ', len(data_instance))
dir_name, orient, slc = data[0], data[1], data[2]
x_inp = get_batchsize_one(dir_name, orient, slc)
fname = os.path.join(dst_path, str(k)+'.npy')
np.save(fname, x_inp)
# ---- Arguments ---- #
ap = argparse.ArgumentParser()
ap.add_argument("-sp", "--src_path", type=str, default='./data/nifti/train/*/*seg.nii.gz')
ap.add_argument("-dp", "--dst_path", type=str, default='./data/np/train/')
args = vars(ap.parse_args())
if __name__ == '__main__':
'''
Script to convert nifti images to numpy array for faster loading
'''
src_path = args['src_path']
dst_path = args['dst_path']
generate_data(src_path, dst_path)
| 34.3 | 103 | 0.597344 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 717 | 0.232264 |
aabc8bf96be40a3c1147bd07e49acc07038b9620 | 220 | py | Python | podcast/tests/urls.py | richardcornish/django-applepodcast | 50732acfbe1ca258e5afb44c117a6ac5fa0c1219 | [
"BSD-3-Clause"
] | 7 | 2017-11-18T13:02:13.000Z | 2021-07-31T21:55:24.000Z | podcast/tests/urls.py | dmitriydef/django-applepodcast | 50732acfbe1ca258e5afb44c117a6ac5fa0c1219 | [
"BSD-3-Clause"
] | 24 | 2017-07-17T21:53:58.000Z | 2018-02-16T07:13:39.000Z | podcast/tests/urls.py | dmitriydef/django-applepodcast | 50732acfbe1ca258e5afb44c117a6ac5fa0c1219 | [
"BSD-3-Clause"
] | 4 | 2017-09-21T12:43:54.000Z | 2020-07-19T21:56:30.000Z | try:
from django.urls import include, re_path
except ImportError:
from django.conf.urls import include, url as re_path
urlpatterns = [
re_path(r'^podcast/', include('podcast.urls', namespace='podcast')),
]
| 22 | 72 | 0.713636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.159091 |
aabcc6bd93dd565fe8912f5540d060fac3483c64 | 2,246 | py | Python | odoo_actions/odoo_client/common.py | catalyst-cloud/adjutant-odoo | 6d1e473710e1757b92b4344d65d5bd106677fe36 | [
"Apache-2.0"
] | 1 | 2020-05-01T18:28:39.000Z | 2020-05-01T18:28:39.000Z | odoo_actions/odoo_client/common.py | catalyst-cloud/adjutant-odoo | 6d1e473710e1757b92b4344d65d5bd106677fe36 | [
"Apache-2.0"
] | null | null | null | odoo_actions/odoo_client/common.py | catalyst-cloud/adjutant-odoo | 6d1e473710e1757b92b4344d65d5bd106677fe36 | [
"Apache-2.0"
] | 2 | 2018-09-20T05:01:34.000Z | 2020-10-17T04:31:47.000Z | from collections import Iterable
class BaseManager(object):
# you must initialise self.resource_env in __init__
fields = None
class Meta:
abstract = True
def _is_iterable(self, ids):
if isinstance(ids, str) or not isinstance(ids, Iterable):
ids = [ids, ]
return ids
def get(self, ids, read=False):
"""Get one or more Resources by id.
'ids' can be 1 id, or a list of ids.
<resource>.get(<id>) returns: [<object_of_id>]
<resource>.get([<id>]) returns: [<object_of_id>]
<resource>.get([<id_1>, <id_2>]) returns:
[<object_of_id_1>, <object_of_id_2>]
Always returns a list even when 1 id is given.
This is done for consistency.
"""
if read:
return self.resource_env.read(
self._is_iterable(ids), fields=self.fields)
return self.resource_env.browse(self._is_iterable(ids))
def list(self, filters, get=True, read=False):
"""Get a list of Resources.
'filters' is a list of search options.`
[('field', '=', value), ]
"""
ids = self.resource_env.search(filters)
if get:
return self.get(ids, read)
else:
return ids
def create(self, **fields):
"""Create a Resource.
'fields' is the dict of kwargs to pass to create.
Allows slighly nicer syntax than having to pass in a dict.
"""
return self.resource_env.create(fields)
def load(self, fields, rows):
"""Loads in a Resource.
'fields' is a list of fields to import. - list(str)
'rows' is the item data. - list(list(str))
"""
return self.resource_env.load(fields=fields, data=rows)
def delete(self, ids):
"""Delete 1 or more Resources by id.
'ids' can be 1 id, or a list of ids.
<resource>.delete(<id>) deletes: <object_of_id>
<resource>.delete([<id>]) deletes: <object_of_id>
<resource>.delete([<id_1>, <id_2>]) deletes:
<object_of_id_1> and <object_of_id_2>
returns True if deleted or not present.
"""
return self.resource_env.unlink(self._is_iterable(ids))
| 29.168831 | 66 | 0.580142 | 2,210 | 0.983972 | 0 | 0 | 0 | 0 | 0 | 0 | 1,243 | 0.553428 |
aabdff6b46e83b814599086ebf3ca4b5caeb3757 | 2,070 | py | Python | biokeypy/moduleForShowingJudges.py | zacandcheese/biokeypy | d421e8be0b407fd1df395c79ffde409ca80066e2 | [
"MIT"
] | null | null | null | biokeypy/moduleForShowingJudges.py | zacandcheese/biokeypy | d421e8be0b407fd1df395c79ffde409ca80066e2 | [
"MIT"
] | null | null | null | biokeypy/moduleForShowingJudges.py | zacandcheese/biokeypy | d421e8be0b407fd1df395c79ffde409ca80066e2 | [
"MIT"
] | null | null | null | #moduleForShowingJudges
#cmd /K "$(FULL_CURRENT_PATH)"
#cd ~/Documents/GitHub/Keyboard-Biometric-Project/Project_Tuples
#sudo python -m pip install statistics
#python analyzeData.py
"""
Author: Zachary Nowak and Matthew Nowak
Date: 3/09/2018
Program Description: This code can record the
Press Time and Flight Time of a tuple as a user
types a passage and it saves a matrix to a file.
"""
__version__ = '1.0'
__author__ = 'Zachary Nowak'
"""STANDARD LIBRARY IMPORTS"""
import json
import platform
import os
"""LOCAL LIBRARY IMPORTS"""
import moduleForSavingTimelines as ST
import moduleForRecordingWithGUI as GUI
import moduleForCreatingPasswordSentence as PS
import moduleForDeconstructingTimelines as DT
import moduleForAuthenticatingUsers as AU
import moduleForFindingTuples as FT
import moduleForGettingSentence as GS
import moduleForPlotting as P
"""FOLDER IMPORTS"""
infile = "data/451.txt"# passage for training people.
#tupleList = FT.allPeople()
tupleList = ["his", "the","ing"]
location = ""
if(platform.system() == "Windows"):#WINDOWS
name = input("What is your name: ")
while(not(location in ["y","n","z","c"])):
location = input("Is this training data?(y/n) ")
if(location == "n"):
location = "Applying/"
passage = ("The thing likes learning his history.There the thing sings.This is what the thing sings.").split(".")
elif(location == "z"):
os.chdir("judgeslib")
P.plot(tupleList)
elif(location == "c"):
os.chdir("judgeslib")
DT.clearAll()
else:
location = "Database/"
passages = open(infile,"r").read().split(".")
passage2 = passages[1].split(",")
passage = passages + passage2
passage.remove(passages[1])
"""TYPE THE PASSAGE AND RECORD THE TIME LINE"""
pressTimeLine,pressCharTimeLine,releaseTimeLine,releaseCharTimeLine = GUI.start_recording(passage)
os.chdir("judgeslib/")
ST.saveTimeLine(pressTimeLine,pressCharTimeLine,name,location)
DT.userSummary(name,location)
if(location == "Applying/"):
#AU.newData(tupleList)
print("Now to verify")
AU.verify(tupleList,name)
#IMPLIMENT MATPLOTLIB
#IMPLIMENT CLEAR FEATURE
| 27.236842 | 114 | 0.746377 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 956 | 0.461836 |
aabebbb877a1c20f697d7ae81aa297513ca02e1b | 55 | py | Python | dcstats/__init__.py | aplested/DC_Pyps | da33fc7d0e7365044e368488d1c7cbbae7473cc7 | [
"MIT"
] | 1 | 2021-03-25T18:09:25.000Z | 2021-03-25T18:09:25.000Z | dcstats/__init__.py | aplested/DC_Pyps | da33fc7d0e7365044e368488d1c7cbbae7473cc7 | [
"MIT"
] | null | null | null | dcstats/__init__.py | aplested/DC_Pyps | da33fc7d0e7365044e368488d1c7cbbae7473cc7 | [
"MIT"
] | null | null | null | from dcstats import *
from _version import __version__
| 18.333333 | 32 | 0.836364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
aabf2126b9910565d37c2a5e085fe433495cd4f3 | 1,081 | py | Python | 206_reverse_linked_list.py | wasim92007/leetcode | 6f5add68ec35aec445b32668129990c66549c584 | [
"MIT"
] | null | null | null | 206_reverse_linked_list.py | wasim92007/leetcode | 6f5add68ec35aec445b32668129990c66549c584 | [
"MIT"
] | null | null | null | 206_reverse_linked_list.py | wasim92007/leetcode | 6f5add68ec35aec445b32668129990c66549c584 | [
"MIT"
] | null | null | null | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:
## By passing the case if empty linked list
if not head:
return head
else:
## We will be using two nodes method with prev and curr
## intialized to None and head respectively
prev, curr = None, head
## We will be traversing throught the linked list
while curr:
## Let us temporalily save the rest of the linked list
## right to the curr node in rest_ll
rest_ll = curr.next
## Make the curr point to the pre
curr.next = prev
## Prev point to the curr
prev = curr
## Update curr to point to the rest of the ll
curr = rest_ll
return prev
| 34.870968 | 74 | 0.513414 | 929 | 0.859389 | 0 | 0 | 0 | 0 | 0 | 0 | 529 | 0.489362 |
aabfd5bbe9382d35bafe6c720396d8fc59846fad | 973 | py | Python | python/test_func.py | cuihua-more/code_strudy | fd4e190b7a3640869105db4e09b5d0101bed18e9 | [
"Apache-2.0"
] | null | null | null | python/test_func.py | cuihua-more/code_strudy | fd4e190b7a3640869105db4e09b5d0101bed18e9 | [
"Apache-2.0"
] | null | null | null | python/test_func.py | cuihua-more/code_strudy | fd4e190b7a3640869105db4e09b5d0101bed18e9 | [
"Apache-2.0"
] | null | null | null | import unittest
def get_formatted_name(first, last, middle = ""):
"""生成整洁的姓名"""
if middle:
full_name = f"{first} {middle} {last}"
else:
full_name = f"{first} {last}"
return full_name.title()
class NamesTestCase(unittest.TestCase): #创建一个测试类,继承于unittest.TestCase 这样才能Python自动测试
"""测试get_formatted_name函数"""
def test_first_last_name(self): # 具体的测试方法 运行这个测试案例时,所有以test开头的方法都会被自动执行
"""能够正确的处理像Jains Jpolin这样的姓名吗"""
formatted_name = get_formatted_name("jains", "jpolin") # 测试方法的具体实现
self.assertEqual(formatted_name, "Jains Jpolin") # 断言,执行结果是否个期望的结果一致
def test_first_last_middle_name(self): # test开头
"""能够正确的处理像Wolfgang Amadeus Mozart这样的姓名吗"""
formatted_name = get_formatted_name("wolfgang", "mozart", "amadeus")
self.assertEqual(formatted_name, "Wolfgang Amadeus Mozart")
if __name__ == "__main__": # __name__是一个程序执行时的特殊变量,如果作为主程序执行时,这个值就是__main__
unittest.main() # 运行测试案例 | 37.423077 | 84 | 0.700925 | 863 | 0.673692 | 0 | 0 | 0 | 0 | 0 | 0 | 737 | 0.575332 |
aac0f353687286013f76de3fe4744864c20ace98 | 2,350 | py | Python | api.py | bart02/RaspTomskBot | 331df3acd0ae1ffaadaa778130733c4749035d2b | [
"MIT"
] | null | null | null | api.py | bart02/RaspTomskBot | 331df3acd0ae1ffaadaa778130733c4749035d2b | [
"MIT"
] | 2 | 2019-04-06T12:00:53.000Z | 2020-07-03T12:49:34.000Z | api.py | bart02/RaspTomskBot | 331df3acd0ae1ffaadaa778130733c4749035d2b | [
"MIT"
] | null | null | null | import requests as r
from collections import defaultdict
class session():
req = {"jsonrpc": "2.0", "id": 1}
sid = None
def __init__(self, server='http://raspisanie.admin.tomsk.ru/api/rpc.php'):
self.server = server
self.sid = self.request("startSession")['sid']
def request(self, method, **params):
if self.sid:
params['sid'] = self.sid
params['ok_id'] = ''
body = dict(self.req, **{"method": method, "params": params})
ans = r.post(self.server, json=body).json()
if 'result' in ans:
return ans['result']
elif 'error' in ans:
if ans['error']['code'] == -33100: # new session
print('New session')
self.__init__(self.server)
return self.request(method, **params)
else:
raise Exception(ans['error'])
else:
raise Exception(ans)
def search_stop(self, query):
result = self.request('getStopsByName', str=query)
stops = defaultdict(list)
for e in result:
e['st_id'] = [e['st_id']]
stops[e['st_title']].append(e)
for stop, obj in stops.items():
info = obj[0]
for e in obj[1:]:
info['st_id'].append(e['st_id'][0])
info.pop('st_lat')
info.pop('st_long')
stops[stop] = info
return list(stops.values())
def get_stop_arrivals(self, stop_id):
return self.request('getStopArrive', st_id=stop_id)
def get_stops_arrivals(self, stops_id):
m = {}
for stop_id in stops_id:
for bus in self.get_stop_arrivals(stop_id):
if not (bus['mr_num'], bus['rl_racetype']) in m:
m[(bus['mr_num'], bus['rl_racetype'])] = {'to': bus['laststation_title'],
'to_eng': bus['laststation_title_en'],
'units': [{'time': bus['tc_arrivetime'], 'inv': bool(int(bus['u_inv']))}]}
else:
m[(bus['mr_num'], bus['rl_racetype'])]['units'].append({'time': bus['tc_arrivetime'], 'inv': bool(int(bus['u_inv']))})
return m
| 35.606061 | 139 | 0.488936 | 2,285 | 0.97234 | 0 | 0 | 0 | 0 | 0 | 0 | 465 | 0.197872 |
aac10d4f658b5c83786e200cf1103c6f1cea1eed | 785 | py | Python | donor/models.py | noRubidium/VampirePty | 69d9d42c0c5eddc3b363270287e468064d8b3d6c | [
"MIT"
] | null | null | null | donor/models.py | noRubidium/VampirePty | 69d9d42c0c5eddc3b363270287e468064d8b3d6c | [
"MIT"
] | null | null | null | donor/models.py | noRubidium/VampirePty | 69d9d42c0c5eddc3b363270287e468064d8b3d6c | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.db import models
from hospital.models import Hospital
# Create your models here.
class Donor(models.Model):
name = models.CharField(max_length = 200)
username = models.CharField(max_length = 200)
password = models.CharField(max_length = 200)
gender = models.CharField(max_length = 1)
blood_type = models.CharField(max_length=3)
linking_agent = models.ForeignKey(Hospital, on_delete = models.CASCADE)
DOB = models.DateField()
address = models.CharField(max_length = 200)
phone = models.CharField(max_length = 200)
last_verified = models.DateField()
latitude = models.DecimalField(decimal_places = 2, max_digits = 5)
longitude = models.DecimalField(decimal_places = 2, max_digits = 5)
| 39.25 | 75 | 0.742675 | 649 | 0.826752 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.033121 |
aac268001a0c60ebe8f24fe5cd1af73800034677 | 763 | py | Python | tests/e2e/example/flowapi/test_handler.py | rog-works/lambda-fw | 715b36fc2d8d0ea0388aa4ac1336dc8cd5543778 | [
"CNRI-Python"
] | null | null | null | tests/e2e/example/flowapi/test_handler.py | rog-works/lambda-fw | 715b36fc2d8d0ea0388aa4ac1336dc8cd5543778 | [
"CNRI-Python"
] | 15 | 2020-12-05T13:52:13.000Z | 2020-12-19T10:14:40.000Z | tests/e2e/example/flowapi/test_handler.py | rog-works/lambda-fw | 715b36fc2d8d0ea0388aa4ac1336dc8cd5543778 | [
"CNRI-Python"
] | null | null | null | from unittest import TestCase
from lf3py.test.helper import data_provider
from tests.helper.example.flowapi import perform_api
class TestHandler(TestCase):
@data_provider([
(
{
'path': '/models',
'httpMethod': 'GET',
'headers': {},
'queryStringParameters': {},
},
{
'statusCode': 200,
'headers': {'Content-Type': 'application/json'},
'body': {
'models': [
{'id': 1234},
],
},
},
),
])
def test_index(self, event: dict, expected: dict):
self.assertEqual(perform_api(event), expected)
| 25.433333 | 64 | 0.441678 | 631 | 0.826999 | 0 | 0 | 598 | 0.783748 | 0 | 0 | 135 | 0.176933 |
aac281ff0acf085ecad07a736ee96b4b5d3fb62e | 7,651 | py | Python | hard-gists/3a2a081e4f3089920fd8aecefecbe280/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/3a2a081e4f3089920fd8aecefecbe280/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/3a2a081e4f3089920fd8aecefecbe280/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | '''Trains a simple convnet on the MNIST dataset.
Does flat increment from T. Xiao "Error-Driven Incremental Learning in Deep Convolutional
Neural Network for Large-Scale Image Classification"
Starts with just 3 classes, trains for 12 epochs then
incrementally trains the rest of the classes by reusing
the trained weights.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential, model_from_json
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
def build_data(classes,total_classes,X_train_all,y_train_all,X_test_all,y_test_all):
train_ind = []
test_ind = []
for c in classes:
train_ind.extend(list(np.where(y_train_all==c)[0]))
test_ind.extend(list(np.where(y_test_all==c)[0]))
X_train = X_train_all[train_ind,:,:]
X_test = X_test_all[test_ind,:,:]
y_train_true = y_train_all[train_ind]
y_train = np.zeros(y_train_true.shape)
y_test_true = y_test_all[test_ind]
y_test = np.zeros(y_test_true.shape)
for i,c in enumerate(classes):
train_ind = list(np.where(y_train_true==c)[0])
test_ind = list(np.where(y_test_true==c)[0])
y_train[train_ind] = i
y_test[test_ind] = i
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, total_classes)
Y_test = np_utils.to_categorical(y_test, total_classes)
return X_train, Y_train, X_test, Y_test
def build_model(old_model=None):
model = Sequential()
if old_model is None:
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='valid',
input_shape=(1, img_rows, img_cols)))
else:
weights = old_model.layers[0].get_weights()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='valid',weights=weights,
input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
if old_model is None:
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
else:
weights = old_model.layers[2].get_weights()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,weights=weights))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
if old_model is None:
model.add(Dense(128))
else:
weights = old_model.layers[7].get_weights()
model.add(Dense(128,weights=weights))
model.add(Activation('relu'))
model.add(Dropout(0.5))
return model
if __name__ == '__main__':
MODEL_TRAINED = False
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(X_train_all, y_train_all), (X_test_all, y_test_all) = mnist.load_data()
if not MODEL_TRAINED:
batch_size = 256
total_classes = 10
nb_epoch = 12
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
classes = [9,1,6]
X_train, Y_train, X_test, Y_test = build_data(classes,3,
X_train_all,y_train_all,X_test_all,y_test_all)
model1 = build_model()
model1.add(Dense(len(classes)))
model1.add(Activation('softmax'))
model1.compile(loss='categorical_crossentropy',optimizer='adadelta',metrics=['accuracy'])
model1.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
# Save this model for later interrogation
json_string = model1.to_json()
open('model1_incremental_architecture.json', 'w').write(json_string)
model1.save_weights('model1_incremental_weights.h5')
score = model1.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# Now create a new model with all total_classes in the softmax layer. Copy over the weights to
# this new network and initialize the new class connections randomly.
model2 = build_model(old_model=model1)
model2.add(Dense(total_classes))
# Replace the corresponding weights of the new network with the previously trained class weights
weights = model2.layers[-1].get_weights()
old_weights = model1.layers[-2].get_weights() # Last dense layer is second to last layer
weights[0][:,-len(classes):] = old_weights[0]
weights[1][-len(classes):] = old_weights[1]
model2.layers[-1].set_weights(weights)
model2.add(Activation('softmax'))
model2.compile(loss='categorical_crossentropy',optimizer='adadelta',metrics=['accuracy'])
new_classes = [7, 0, 3, 5, 2, 8, 4]
class_mapping = new_classes[:]
class_mapping.extend(classes)
X_train, Y_train, X_test, Y_test = build_data(new_classes,10,
X_train_all,y_train_all,X_test_all,y_test_all)
model2.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model2.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# Save the incrementally trained model
json_string = model2.to_json()
open('model2_incremental_architecture.json', 'w').write(json_string)
model2.save_weights('model2_incremental_weights.h5')
X_test = X_test_all.reshape(X_test_all.shape[0], 1, img_rows, img_cols)
X_test = X_test.astype('float32')
X_test /= 255
# Convert class vectors to binary class matrices
# Note, that when a new image is presented to this network, the label of the image must be
# fed into class_mapping to get the "real" label of the output
y_test = np.array([class_mapping.index(c) for c in y_test_all])
Y_test = np_utils.to_categorical(y_test, total_classes)
score = model2.evaluate(X_test, Y_test, verbose=1)
print('Total Test score:', score[0])
print('Total Test accuracy:', score[1])
else:
# Load the incrementally trained model and test it
model = model_from_json(open('model2_incremental_architecture.json').read())
model.load_weights('model2_incremental_weights.h5')
model.compile(loss='categorical_crossentropy',optimizer='adadelta',metrics=['accuracy'])
classes = [7, 0, 3, 5, 2, 8, 4, 9, 1, 6]
X_train, Y_train, X_test, Y_test = build_data(classes,10,
X_train_all,y_train_all,X_test_all,y_test_all)
score = model.evaluate(X_test, Y_test, verbose=1)
print('Total Test score:', score[0])
print('Total Test accuracy:', score[1])
score = model.evaluate(X_train, Y_train, verbose=1)
print('Total Train score:', score[0])
print('Total Train accuracy:', score[1])
| 40.268421 | 104 | 0.654947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,838 | 0.24023 |
aac2c6dc40769fdda9ba80e76f62c737e34017f8 | 717 | py | Python | loss_fn/hybrid_loss.py | alireza-nasiri/SoundCLR | 778a4c24b5f15f5ce563ebe71dd443d3e77eb4ef | [
"MIT"
] | 7 | 2021-03-03T18:53:59.000Z | 2022-03-03T03:15:36.000Z | loss_fn/hybrid_loss.py | alireza-nasiri/SoundCLR | 778a4c24b5f15f5ce563ebe71dd443d3e77eb4ef | [
"MIT"
] | 3 | 2021-04-12T13:05:01.000Z | 2021-06-22T02:23:03.000Z | loss_fn/hybrid_loss.py | alireza-nasiri/SoundCLR | 778a4c24b5f15f5ce563ebe71dd443d3e77eb4ef | [
"MIT"
] | 4 | 2021-03-17T02:23:59.000Z | 2021-11-23T14:08:27.000Z | import torch
import torch.nn as nn
from loss_fn import contrastive_loss
import config
class HybridLoss(nn.Module):
def __init__(self, alpha=0.5, temperature=0.07):
super(HybridLoss, self).__init__()
self.contrastive_loss = contrastive_loss.SupConLoss(temperature)
self.alpha = alpha
def cross_entropy_one_hot(self, input, target):
_, labels = target.max(dim=1)
return nn.CrossEntropyLoss()(input, labels)
def forward(self, y_proj, y_pred, label, label_vec):
contrastiveLoss = self.contrastive_loss(y_proj.unsqueeze(1), label.squeeze(1))
entropyLoss = self.cross_entropy_one_hot(y_pred, label_vec)
return contrastiveLoss * self.alpha, entropyLoss * (1 - self.alpha)
| 29.875 | 80 | 0.739191 | 628 | 0.875872 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
aac45d3626a2953afb5b3ec68ea68c1867bfdeda | 2,815 | py | Python | keystone_tempest_plugin/services/identity/clients.py | ilay09/keystone | e45049dfd46ab7d3e4c6aa48a3046f622f4a3b1e | [
"Apache-2.0"
] | null | null | null | keystone_tempest_plugin/services/identity/clients.py | ilay09/keystone | e45049dfd46ab7d3e4c6aa48a3046f622f4a3b1e | [
"Apache-2.0"
] | 1 | 2019-08-18T09:25:49.000Z | 2019-08-18T09:25:49.000Z | keystone_tempest_plugin/services/identity/clients.py | ilay09/keystone | e45049dfd46ab7d3e4c6aa48a3046f622f4a3b1e | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import six
from six.moves import http_client
from tempest import config
from tempest.lib.common import rest_client
CONF = config.CONF
# We only use the identity catalog type
SERVICE_TYPE = 'identity'
class Identity(rest_client.RestClient):
"""Tempest REST client for keystone."""
# Used by the superclass to build the correct URL paths
api_version = 'v3'
def __init__(self, auth_provider):
super(Identity, self).__init__(
auth_provider,
SERVICE_TYPE,
CONF.identity.region,
endpoint_type='adminURL')
class Federation(Identity):
"""Tempest REST client for keystone's Federated Identity API."""
subpath_prefix = 'OS-FEDERATION'
subpath_suffix = None
def _build_path(self, entity_id=None):
subpath = '%s/%s' % (self.subpath_prefix, self.subpath_suffix)
return '%s/%s' % (subpath, entity_id) if entity_id else subpath
def _delete(self, entity_id, **kwargs):
url = self._build_path(entity_id)
resp, body = super(Federation, self).delete(url, **kwargs)
self.expected_success(http_client.NO_CONTENT, resp.status)
return rest_client.ResponseBody(resp, body)
def _get(self, entity_id=None, **kwargs):
url = self._build_path(entity_id)
resp, body = super(Federation, self).get(url, **kwargs)
self.expected_success(http_client.OK, resp.status)
body = json.loads(body if six.PY2 else body.decode('utf-8'))
return rest_client.ResponseBody(resp, body)
def _patch(self, entity_id, body, **kwargs):
url = self._build_path(entity_id)
resp, body = super(Federation, self).patch(url, body, **kwargs)
self.expected_success(http_client.OK, resp.status)
body = json.loads(body if six.PY2 else body.decode('utf-8'))
return rest_client.ResponseBody(resp, body)
def _put(self, entity_id, body, **kwargs):
url = self._build_path(entity_id)
resp, body = super(Federation, self).put(url, body, **kwargs)
self.expected_success(http_client.CREATED, resp.status)
body = json.loads(body if six.PY2 else body.decode('utf-8'))
return rest_client.ResponseBody(resp, body)
| 35.632911 | 75 | 0.690941 | 2,014 | 0.715453 | 0 | 0 | 0 | 0 | 0 | 0 | 836 | 0.29698 |
aac54b5cd7377439826c8dbbf5c7d47f77639abb | 606 | py | Python | history/migrations/0007_auto_20141026_2348.py | atish3/mig-website | 1bcf4c0b93078cccab6b4a25c93c29a2b5efa4be | [
"Apache-2.0"
] | 4 | 2017-10-02T17:44:14.000Z | 2020-02-14T17:13:57.000Z | history/migrations/0007_auto_20141026_2348.py | atish3/mig-website | 1bcf4c0b93078cccab6b4a25c93c29a2b5efa4be | [
"Apache-2.0"
] | 152 | 2015-01-04T00:08:44.000Z | 2022-01-13T00:43:03.000Z | history/migrations/0007_auto_20141026_2348.py | atish3/mig-website | 1bcf4c0b93078cccab6b4a25c93c29a2b5efa4be | [
"Apache-2.0"
] | 4 | 2015-04-16T04:27:05.000Z | 2021-03-21T20:45:24.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('history', '0006_committeemember_member'),
]
operations = [
migrations.AlterField(
model_name='meetingminutes',
name='meeting_type',
field=models.CharField(default=b'MM', max_length=2, choices=[(b'NI', b'New Initiatives'), (b'MM', b'Main Meetings'), (b'OF', b'Officer Meetings'), (b'AD', b'Advisory Board Meetings'), (b'CM', b'Committee Meeting Minutes')]),
),
]
| 30.3 | 236 | 0.630363 | 497 | 0.820132 | 0 | 0 | 0 | 0 | 0 | 0 | 228 | 0.376238 |
aac5f2fa729ffc2397e788cc09dace7dcaea0b65 | 1,521 | py | Python | utils/web_socket_client.py | deezusdyse/Hand-controlled-breakout | 9dbab2f7edd2d98adf9a2a2e0d910d34a3819fcf | [
"MIT"
] | 78 | 2018-05-08T19:07:31.000Z | 2021-12-30T19:06:12.000Z | utils/web_socket_client.py | deezusdyse/Hand-controlled-breakout | 9dbab2f7edd2d98adf9a2a2e0d910d34a3819fcf | [
"MIT"
] | 5 | 2018-05-05T08:41:22.000Z | 2021-06-28T12:10:20.000Z | utils/web_socket_client.py | deezusdyse/Hand-controlled-breakout | 9dbab2f7edd2d98adf9a2a2e0d910d34a3819fcf | [
"MIT"
] | 29 | 2018-05-18T15:09:15.000Z | 2022-03-13T11:00:35.000Z | ## Author: Victor Dibia
## Web socket client which is used to send socket messages to a connected server.
import websocket
import time
import json
from websocket import WebSocketException, WebSocketConnectionClosedException
import sys
#import _thread as thread
import websocket
ws = websocket.WebSocket()
retry_threshold = 5
socketurl = ""
def send_message(message, source):
global ws
payload = json.dumps(
{'event': 'detect', 'data': message, "source": source})
# print("sending message")
try:
ws.send(payload)
except WebSocketException:
print(
"Error: something went wrong with the socket. Retrying after ", retry_threshold)
reconnect_socket()
except WebSocketConnectionClosedException:
print("Error: Connection is closed. Retrying after ", retry_threshold)
reconnect_socket()
except BrokenPipeError:
print("Error: Broken Pipe. Retrying after ", retry_threshold)
reconnect_socket()
except:
print("Unexpected error:", sys.exc_info()[0])
raise
def reconnect_socket():
time.sleep(retry_threshold)
print("Reconnecting websocket ......", socketurl)
socket_init(socketurl)
ws = None
def socket_init(url):
global ws, socketurl
socketurl = url
ws = websocket.WebSocket()
try:
ws.connect(url)
print("Websocket connection successful")
except ConnectionRefusedError:
print("Websocket Connection refused")
reconnect_socket()
| 23.765625 | 92 | 0.684418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 444 | 0.291913 |
aac63a5d8e54599f688823a384ac72b4a3f7a76c | 233 | py | Python | app/helpers/header_helpers.py | petechd/eq-questionnaire-runner | 1c5b182a7f8bc878cfdd767ae080410fa679abd6 | [
"MIT"
] | 3 | 2020-09-28T13:21:21.000Z | 2021-05-05T14:14:51.000Z | app/helpers/header_helpers.py | petechd/eq-questionnaire-runner | 1c5b182a7f8bc878cfdd767ae080410fa679abd6 | [
"MIT"
] | 402 | 2019-11-06T17:23:03.000Z | 2022-03-31T16:03:35.000Z | app/helpers/header_helpers.py | petechd/eq-questionnaire-runner | 1c5b182a7f8bc878cfdd767ae080410fa679abd6 | [
"MIT"
] | 10 | 2020-03-03T14:23:27.000Z | 2022-01-31T12:21:21.000Z | def get_span_and_trace(headers):
try:
trace, span = headers.get("X-Cloud-Trace-Context").split("/")
except (ValueError, AttributeError):
return None, None
span = span.split(";")[0]
return span, trace
| 25.888889 | 69 | 0.626609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.124464 |
aac6c00a86537ccce38fe26c02205e248a7305d8 | 8,085 | py | Python | scFates/tools/pseudotime.py | LouisFaure/scFates | e925b5316c77d923514ac14572eeb738d9f5dd2c | [
"BSD-3-Clause"
] | 4 | 2021-04-27T09:17:28.000Z | 2021-12-30T12:31:22.000Z | scFates/tools/pseudotime.py | LouisFaure/scFates | e925b5316c77d923514ac14572eeb738d9f5dd2c | [
"BSD-3-Clause"
] | 4 | 2021-04-27T09:17:26.000Z | 2021-11-26T13:45:18.000Z | scFates/tools/pseudotime.py | LouisFaure/scFates | e925b5316c77d923514ac14572eeb738d9f5dd2c | [
"BSD-3-Clause"
] | null | null | null | from anndata import AnnData
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from joblib import delayed
from tqdm import tqdm
import sys
import igraph
from .utils import ProgressParallel
from .. import logging as logg
from .. import settings
def pseudotime(adata: AnnData, n_jobs: int = 1, n_map: int = 1, copy: bool = False):
"""\
Compute pseudotime.
Projects cells onto the tree, and uses distance from the root as a pseudotime value.
Parameters
----------
adata
Annotated data matrix.
n_jobs
Number of cpu processes to use in case of performing multiple mapping.
n_map
number of probabilistic mapping of cells onto the tree to use. If n_map=1 then likelihood cell mapping is used.
copy
Return a copy instead of writing to adata.
Returns
-------
adata : anndata.AnnData
if `copy=True` it returns or else add fields to `adata`:
`.obs['edge']`
assigned edge.
`.obs['t']`
assigned pseudotime value.
`.obs['seg']`
assigned segment of the tree.
`.obs['milestone']`
assigned region surrounding forks and tips.
`.uns['pseudotime_list']`
list of cell projection from all mappings.
"""
if "root" not in adata.uns["graph"]:
raise ValueError(
"You need to run `tl.root` or `tl.roots` before projecting cells."
)
adata = adata.copy() if copy else adata
graph = adata.uns["graph"]
reassign, recolor = False, False
if "milestones" in adata.obs:
if adata.obs.milestones.dtype.name == "category":
tmp_mil = adata.obs.milestones.cat.categories.copy()
reassign = True
if "milestones_colors" in adata.uns:
tmp_mil_col = adata.uns["milestones_colors"].copy()
recolor = True
logg.info("projecting cells onto the principal graph", reset=True)
if n_map == 1:
df_l = [map_cells(graph, multi=False)]
else:
df_l = ProgressParallel(
n_jobs=n_jobs, total=n_map, file=sys.stdout, desc=" mappings"
)(delayed(map_cells)(graph=graph, multi=True) for m in range(n_map))
# formatting cell projection data
df_summary = df_l[0]
df_summary["seg"] = df_summary["seg"].astype("category")
df_summary["edge"] = df_summary["edge"].astype("category")
# remove pre-existing palette to avoid errors with plotting
if "seg_colors" in adata.uns:
del adata.uns["seg_colors"]
if set(df_summary.columns.tolist()).issubset(adata.obs.columns):
adata.obs[df_summary.columns] = df_summary
else:
adata.obs = pd.concat([adata.obs, df_summary], axis=1)
# list(map(lambda x: x.column))
# todict=list(map(lambda x: dict(zip(["cells"]+["_"+s for s in x.columns.tolist()],
# [x.index.tolist()]+x.to_numpy().T.tolist())),df_l))
names = np.arange(len(df_l)).astype(str).tolist()
# vals = todict
dictionary = dict(zip(names, df_l))
adata.uns["pseudotime_list"] = dictionary
if n_map > 1:
adata.obs["t_sd"] = (
pd.concat(
list(
map(
lambda x: pd.Series(x["t"]),
list(adata.uns["pseudotime_list"].values()),
)
),
axis=1,
)
.apply(np.std, axis=1)
.values
)
milestones = pd.Series(index=adata.obs_names)
for seg in graph["pp_seg"].n:
cell_seg = adata.obs.loc[adata.obs["seg"] == seg, "t"]
if len(cell_seg) > 0:
milestones[
cell_seg.index[
(cell_seg - min(cell_seg) - (max(cell_seg - min(cell_seg)) / 2) < 0)
]
] = graph["pp_seg"].loc[int(seg), "from"]
milestones[
cell_seg.index[
(cell_seg - min(cell_seg) - (max(cell_seg - min(cell_seg)) / 2) > 0)
]
] = graph["pp_seg"].loc[int(seg), "to"]
adata.obs["milestones"] = milestones
adata.obs.milestones = (
adata.obs.milestones.astype(int).astype("str").astype("category")
)
adata.uns["graph"]["milestones"] = dict(
zip(
adata.obs.milestones.cat.categories,
adata.obs.milestones.cat.categories.astype(int),
)
)
while reassign:
if "tmp_mil_col" not in locals():
break
if len(tmp_mil_col) != len(adata.obs.milestones.cat.categories):
break
rename_milestones(adata, tmp_mil)
if recolor:
adata.uns["milestones_colors"] = tmp_mil_col
reassign = False
logg.info(" finished", time=True, end=" " if settings.verbosity > 2 else "\n")
logg.hint(
"added\n"
" .obs['edge'] assigned edge.\n"
" .obs['t'] pseudotime value.\n"
" .obs['seg'] segment of the tree assigned.\n"
" .obs['milestones'] milestone assigned.\n"
" .uns['pseudotime_list'] list of cell projection from all mappings."
)
return adata if copy else None
def map_cells(graph, multi=False):
import igraph
g = igraph.Graph.Adjacency((graph["B"] > 0).tolist(), mode="undirected")
# Add edge weights and node labels.
g.es["weight"] = graph["B"][graph["B"].nonzero()]
if multi:
rrm = (
np.apply_along_axis(
lambda x: np.random.choice(np.arange(len(x)), size=1, p=x),
axis=1,
arr=graph["R"],
)
).T.flatten()
else:
rrm = np.apply_along_axis(np.argmax, axis=1, arr=graph["R"])
def map_on_edges(v):
vcells = np.argwhere(rrm == v)
if vcells.shape[0] > 0:
nv = np.array(g.neighborhood(v, order=1))
nvd = np.array(g.shortest_paths(v, nv)[0])
spi = np.apply_along_axis(np.argmax, axis=1, arr=graph["R"][vcells, nv[1:]])
ndf = pd.DataFrame(
{
"cell": vcells.flatten(),
"v0": v,
"v1": nv[1:][spi],
"d": nvd[1:][spi],
}
)
p0 = graph["R"][vcells, v].flatten()
p1 = np.array(
list(
map(lambda x: graph["R"][vcells[x], ndf.v1[x]], range(len(vcells)))
)
).flatten()
alpha = np.random.uniform(size=len(vcells))
f = np.abs(
(np.sqrt(alpha * p1 ** 2 + (1 - alpha) * p0 ** 2) - p0) / (p1 - p0)
)
ndf["t"] = (
graph["pp_info"].loc[ndf.v0, "time"].values
+ (
graph["pp_info"].loc[ndf.v1, "time"].values
- graph["pp_info"].loc[ndf.v0, "time"].values
)
* alpha
)
ndf["seg"] = 0
isinfork = (graph["pp_info"].loc[ndf.v0, "PP"].isin(graph["forks"])).values
ndf.loc[isinfork, "seg"] = (
graph["pp_info"].loc[ndf.loc[isinfork, "v1"], "seg"].values
)
ndf.loc[~isinfork, "seg"] = (
graph["pp_info"].loc[ndf.loc[~isinfork, "v0"], "seg"].values
)
return ndf
else:
return None
df = list(map(map_on_edges, range(graph["B"].shape[1])))
df = pd.concat(df)
df.sort_values("cell", inplace=True)
df.index = graph["cells_fitted"]
df["edge"] = df.apply(lambda x: str(int(x[1])) + "|" + str(int(x[2])), axis=1)
df.drop(["cell", "v0", "v1", "d"], axis=1, inplace=True)
return df
def rename_milestones(adata, new, copy: bool = False):
adata = adata.copy() if copy else adata
adata.uns["graph"]["milestones"] = dict(
zip(new, list(adata.uns["graph"]["milestones"].values()))
)
adata.obs.milestones = adata.obs.milestones.cat.rename_categories(new)
return adata if copy else None
| 31.830709 | 119 | 0.533952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,251 | 0.278417 |
aac7fccb1c9445a4ec71826fa59e6dfe79f78dc7 | 6,786 | py | Python | libai/data/samplers/samplers.py | Oneflow-Inc/libai | e473bd3962f07b1e37232d2be39c8257df0ec0f3 | [
"Apache-2.0"
] | 55 | 2021-12-10T08:47:06.000Z | 2022-03-28T09:02:15.000Z | libai/data/samplers/samplers.py | Oneflow-Inc/libai | e473bd3962f07b1e37232d2be39c8257df0ec0f3 | [
"Apache-2.0"
] | 106 | 2021-11-03T05:16:45.000Z | 2022-03-31T06:16:23.000Z | libai/data/samplers/samplers.py | Oneflow-Inc/libai | e473bd3962f07b1e37232d2be39c8257df0ec0f3 | [
"Apache-2.0"
] | 13 | 2021-12-29T08:12:08.000Z | 2022-03-28T06:59:45.000Z | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow.utils.data import Sampler
class CyclicSampler(Sampler):
"""
This sampler supports cyclic sampling, and it is also compatible with
non-data parallelism and data parallelism.
Arguments:
dataset: dataset to be sampled.
micro_batch_size: batch size for per model instance.
global_batch_size is micro_batch_size times data_parallel_size.
shuffle: whether to shuffle the dataset.
consumed_samples: the number of samples that have been trained at the current time,
used for resuming training (default: ``0``).
data_parallel_rank: local rank for data parallelism.
data_parallel_size: the size of data parallelism.
seed: random seed, used for reproducing experiments (default: ``0``).
"""
def __init__(
self,
dataset,
micro_batch_size,
shuffle=False,
consumed_samples=0,
data_parallel_rank=0,
data_parallel_size=1,
seed=0,
):
self.dataset = dataset
self.data_size = len(self.dataset)
self.shuffle = shuffle
self.data_parallel_rank = data_parallel_rank
self.data_parallel_size = data_parallel_size
self.micro_batch_size = micro_batch_size
self.actual_batch_size = self.micro_batch_size * self.data_parallel_size
self.data_size_per_epoch = self.data_size // self.actual_batch_size * self.micro_batch_size
self.consumed_samples = consumed_samples
self.seed = seed
def __iter__(self):
"""divide the data into data_parallel_size buckets,
and shuffle it if `shuffle` is set to `True`.
Each processor samples from its own buckets and data_loader
will load the corresponding data.
"""
epoch = self.consumed_samples // self.data_size_per_epoch
current_epoch_samples = self.consumed_samples % self.data_size_per_epoch
batch = []
while True:
bucket_offset = current_epoch_samples // self.data_parallel_size
start_idx = self.data_parallel_rank * self.data_size_per_epoch
if self.shuffle:
generator = flow.Generator()
generator.manual_seed(self.seed + epoch)
random_idx = flow.randperm(self.data_size_per_epoch, generator=generator).tolist()
indices = [start_idx + x for x in random_idx[bucket_offset:]]
else:
seq_idx = flow.arange(self.data_size_per_epoch).tolist()
indices = [start_idx + x for x in seq_idx[bucket_offset:]]
epoch += 1
if hasattr(self.dataset, "supports_prefetch") and self.dataset.supports_prefetch:
self.dataset.prefetch(indices)
for idx in indices:
batch.append(idx)
if len(batch) == self.micro_batch_size:
self.consumed_samples += self.actual_batch_size
yield batch
batch = []
current_epoch_samples = 0
def __len__(self):
return self.data_size
def set_consumed_samples(self, consumed_samples):
"""You can recover the training iteration by setting `consumed_samples`."""
self.consumed_samples = consumed_samples
def set_epoch(self, epoch):
"""Used for restoring training status."""
self.epoch = epoch
class SingleRoundSampler(Sampler):
"""
This sampler supports single round sampling, and it is also compatible with
non data parallelism and data parallelism.
Arguments:
dataset: dataset to be sampled.
micro_batch_size: batch size for per model instance, global_batch_size
is micro_batch_size times data_parallel_size.
shuffle: whether to shuffle the dataset.
data_parallel_rank: local rank for data parallelism.
data_parallel_size: the size of data parallelism.
seed: random seed, used for reproducing experiments (default: ``0``).
drop_last: whether to drop the remaining data (default: ``False``).
"""
def __init__(
self,
dataset,
micro_batch_size,
shuffle=False,
data_parallel_rank=0,
data_parallel_size=1,
seed=0,
drop_last=False,
):
self.dataset = dataset
self.data_size = len(self.dataset)
self.shuffle = shuffle
self.data_parallel_rank = data_parallel_rank
self.data_parallel_size = data_parallel_size
self.micro_batch_size = micro_batch_size
self.seed = seed
self.drop_last = drop_last
def __iter__(self):
bucket_size = self.data_size // self.data_parallel_size
remain = self.data_size % self.data_parallel_size
start_idx = self.data_parallel_rank * bucket_size
if self.data_parallel_rank < remain:
bucket_size += 1
start_idx += min(self.data_parallel_rank, remain)
if self.shuffle:
generator = flow.Generator()
generator.manual_seed(self.seed)
random_idx = flow.randperm(bucket_size, generator=generator).tolist()
indices = [start_idx + x for x in random_idx]
else:
seq_idx = flow.arange(bucket_size).tolist()
indices = [start_idx + x for x in seq_idx]
if hasattr(self.dataset, "supports_prefetch") and self.dataset.supports_prefetch:
self.dataset.prefetch(indices)
batch = []
for idx in indices:
batch.append(idx)
if len(batch) == self.micro_batch_size:
yield batch
batch = []
if not self.drop_last:
if self.data_parallel_rank >= remain and remain > 0:
batch.append(0)
if len(batch) > 0:
yield batch
def __len__(self):
global_batch_size = self.micro_batch_size * self.data_parallel_size
if self.drop_last:
return self.data_size // global_batch_size
else:
return (self.data_size + global_batch_size - 1) // global_batch_size
| 36.483871 | 99 | 0.645299 | 6,097 | 0.898467 | 2,744 | 0.404362 | 0 | 0 | 0 | 0 | 2,370 | 0.349248 |
aaca2df0baf423c25bdc23220c495efb4199a83e | 1,198 | py | Python | nipype/interfaces/niftyseg/tests/test_lesions.py | mfalkiewicz/nipype | 775e21b78fb1ffa2ff9cb12e6f052868bd44d052 | [
"Apache-2.0"
] | 1 | 2015-01-19T13:12:27.000Z | 2015-01-19T13:12:27.000Z | nipype/interfaces/niftyseg/tests/test_lesions.py | bpinsard/nipype | 373bdddba9f675ef153951afa368729e2d8950d2 | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/niftyseg/tests/test_lesions.py | bpinsard/nipype | 373bdddba9f675ef153951afa368729e2d8950d2 | [
"Apache-2.0"
] | null | null | null | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import pytest
from ....testing import example_data
from ...niftyreg import get_custom_path
from ...niftyreg.tests.test_regutils import no_nifty_tool
from .. import FillLesions
@pytest.mark.skipif(no_nifty_tool(cmd='seg_FillLesions'),
reason="niftyseg is not installed")
def test_seg_filllesions():
# Create a node object
seg_fill = FillLesions()
# Check if the command is properly defined
cmd = get_custom_path('seg_FillLesions', env_dir='NIFTYSEGDIR')
assert seg_fill.cmd == cmd
# test raising error with mandatory args absent
with pytest.raises(ValueError):
seg_fill.run()
# Assign some input data
in_file = example_data('im1.nii')
lesion_mask = example_data('im2.nii')
seg_fill.inputs.in_file = in_file
seg_fill.inputs.lesion_mask = lesion_mask
expected_cmd = '{cmd} -i {in_file} -l {lesion_mask} -o {out_file}'.format(
cmd=cmd,
in_file=in_file,
lesion_mask=lesion_mask,
out_file='im1_lesions_filled.nii.gz',
)
assert seg_fill.cmdline == expected_cmd
| 29.219512 | 78 | 0.687813 | 0 | 0 | 0 | 0 | 903 | 0.753756 | 0 | 0 | 417 | 0.34808 |
aaca67fdbf8819510b7cdbb0ad4af6d2dd57073a | 2,358 | py | Python | tests/test_services/test_run_filters/actions.py | Jumpscale/ays_jumpscale8 | 4ff4a2fb3b95de6f46ea494bd5b5a2a0fb9ecdb1 | [
"Apache-2.0"
] | 4 | 2017-06-07T08:10:06.000Z | 2017-11-10T02:20:38.000Z | tests/test_services/test_run_filters/actions.py | Jumpscale/ays9 | 63bd414ff06372ba885c55eec528f427e63bcbe1 | [
"Apache-2.0"
] | 242 | 2017-05-18T10:51:48.000Z | 2019-09-18T15:09:47.000Z | tests/test_services/test_run_filters/actions.py | Jumpscale/ays_jumpscale8 | 4ff4a2fb3b95de6f46ea494bd5b5a2a0fb9ecdb1 | [
"Apache-2.0"
] | 5 | 2017-06-16T15:43:25.000Z | 2017-09-29T12:48:06.000Z | def init_actions_(service, args):
"""
this needs to returns an array of actions representing the depencies between actions.
Looks at ACTION_DEPS in this module for an example of what is expected
"""
# some default logic for simple actions
return {
'test': ['install']
}
def test(job):
"""
Tests run filters
"""
import sys
RESULT_OK = 'OK : %s'
RESULT_FAILED = 'FAILED : %s'
RESULT_ERROR = 'ERROR : %s %%s' % job.service.name
model = job.service.model
model.data.result = RESULT_OK % job.service.name
try:
services_to_check = {
'test_run_filters': {
'instance': 'main',
'actions': [('install', ['ok']), ('test', ['running'])]
},
'test_run_filter1': {
'instance': 'main',
'actions': [('install', ['ok']), ('test', ['running', 'ok', 'scheduled'])]
},
'test_run_filter2': {
'instance': 'main',
'actions': [('install', ['ok']), ('test', ['new'])]
}
}
for actor, actor_info in services_to_check.items():
srv = job.service.aysrepo.servicesFind(actor=actor, name=actor_info['instance'])[0]
for action_info in actor_info['actions']:
if str(srv.model.actions[action_info[0]].state) not in action_info[1]:
model.data.result = RESULT_FAILED % ('Action [%s] on service [%s] has unexpected state. Expected [%s] found [%s]' % (action_info[0],
'%s!%s' % (actor, actor_info['instance']),
action_info[1],
str(srv.model.actions[action_info[0]].state)
))
except:
model.data.result = RESULT_ERROR % str(sys.exc_info()[:2])
finally:
job.service.save()
| 39.966102 | 180 | 0.415606 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 640 | 0.271416 |
aacab0e2817127cfdd9da58cb922f41c6e1f2756 | 1,297 | py | Python | api/test_processor_api.py | AlexRogalskiy/asma | b028b65e93b0ae4b7540d5ff70e1ff07fd92130f | [
"MIT"
] | 4 | 2020-08-12T04:00:23.000Z | 2022-02-12T13:38:44.000Z | api/test_processor_api.py | nadeembinshajahan/asma | b028b65e93b0ae4b7540d5ff70e1ff07fd92130f | [
"MIT"
] | 2 | 2022-02-12T13:38:50.000Z | 2022-02-12T13:40:09.000Z | api/test_processor_api.py | AlexRogalskiy/asma | b028b65e93b0ae4b7540d5ff70e1ff07fd92130f | [
"MIT"
] | 1 | 2022-02-12T13:38:44.000Z | 2022-02-12T13:38:44.000Z | from fastapi.testclient import TestClient
import os
import sys
sys.path.append("..")
from libs.config_engine import ConfigEngine
from api.config_keys import Config
from api.processor_api import ProcessorAPI
import pytest
config_path='/repo/config-coral.ini'
config = ConfigEngine(config_path)
app_instance = ProcessorAPI(config)
api = app_instance.app
client = TestClient(api)
sample_config_path='/repo/api/config-sample.ini'
config_backup_path='/repo/config-coral-backup.ini'
# make a copy for config file
# read sample config file
config_sample = ConfigEngine(sample_config_path)
sections = config_sample.get_sections()
config_sample_json = {}
for section in sections:
config_sample_json[section] = config_sample.get_section_dict(section)
#@pytest.mark.order1
def test_set_config():
response = client.post(
"/set-config",
json=config_sample_json,
)
assert response.status_code == 200
assert response.json() == config_sample_json
#@pytest.mark.order2
def test_get_config():
config = ConfigEngine(config_path)
app_instance = ProcessorAPI(config)
api = app_instance.app
client = TestClient(api)
response_get = client.get("/get-config")
assert response_get.status_code == 200
assert response_get.json() == config_sample_json
| 24.471698 | 73 | 0.762529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 208 | 0.16037 |
aacdab9b402913a1a133e50f920b43e0617690ff | 1,353 | py | Python | setup.py | kiminh/lambda-learner | f409a2982a1fbb19e6331ced66d7342d113449d1 | [
"BSD-2-Clause"
] | 1 | 2021-01-11T18:38:12.000Z | 2021-01-11T18:38:12.000Z | setup.py | kiminh/lambda-learner | f409a2982a1fbb19e6331ced66d7342d113449d1 | [
"BSD-2-Clause"
] | null | null | null | setup.py | kiminh/lambda-learner | f409a2982a1fbb19e6331ced66d7342d113449d1 | [
"BSD-2-Clause"
] | null | null | null | from os import path
from setuptools import find_namespace_packages, setup
this_directory = path.abspath(path.dirname(__file__))
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(
name='lambda-learner',
namespace_packages=['linkedin'],
version='0.0.1',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=['Programming Language :: Python :: 3',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved'],
license='BSD-2-CLAUSE',
keywords='lambda-learner incremental training',
package_dir={'': 'src'},
packages=find_namespace_packages(where='src', exclude=['test*', 'doc']),
url='https://github.com/linkedin/lambda-learner',
project_urls={
'Documentation': 'https://github.com/linkedin/lambda-learner/blob/main/README.md',
'Source': 'https://github.com/linkedin/lambda-learner',
'Tracker': 'https://github.com/linkedin/lambda-learner/issues',
},
include_package_data=True,
python_requires='>=3.6',
install_requires=[
'numpy >= 1.14',
'scipy >= 1.0.0',
'scikit-learn >= 0.18.1',
'typing-extensions >= 3.7.4',
],
tests_require=[
'pytest',
]
)
| 34.692308 | 90 | 0.632668 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 608 | 0.449372 |
aace2561c26b084c7dd7639860b3d85239529375 | 222 | py | Python | graphiql_strawberry_debug_toolbar/serializers.py | przemub/django-graphiql-strawberry-debug-toolbar | 14882b215a63a5b73a26fd9e641ac8fe98f65eaa | [
"MIT"
] | 67 | 2018-03-18T13:06:59.000Z | 2021-12-21T19:07:13.000Z | graphiql_strawberry_debug_toolbar/serializers.py | przemub/django-graphiql-strawberry-debug-toolbar | 14882b215a63a5b73a26fd9e641ac8fe98f65eaa | [
"MIT"
] | 15 | 2018-03-15T13:12:33.000Z | 2022-02-10T14:46:33.000Z | graphiql_strawberry_debug_toolbar/serializers.py | przemub/django-graphiql-strawberry-debug-toolbar | 14882b215a63a5b73a26fd9e641ac8fe98f65eaa | [
"MIT"
] | 15 | 2019-06-19T12:04:53.000Z | 2022-03-16T16:55:09.000Z | from django.core.serializers.json import DjangoJSONEncoder
class CallableJSONEncoder(DjangoJSONEncoder):
def default(self, obj):
if callable(obj):
return obj()
return super().default(obj)
| 24.666667 | 58 | 0.689189 | 160 | 0.720721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
aad00b037048f38be17375ec73355e9d7864cd27 | 9,563 | py | Python | Famcy/_util_/_fsubmission.py | nexuni/Famcy | 80f8f18fe1614ab3c203ca3466b9506b494470bf | [
"Apache-2.0"
] | null | null | null | Famcy/_util_/_fsubmission.py | nexuni/Famcy | 80f8f18fe1614ab3c203ca3466b9506b494470bf | [
"Apache-2.0"
] | 12 | 2022-02-05T04:56:44.000Z | 2022-03-30T09:59:26.000Z | Famcy/_util_/_fsubmission.py | nexuni/Famcy | 80f8f18fe1614ab3c203ca3466b9506b494470bf | [
"Apache-2.0"
] | null | null | null | import abc
import enum
import json
import pickle
import time
import Famcy
import _ctypes
import os
import datetime
from flask import session
from werkzeug.utils import secure_filename
# GLOBAL HELPER
def get_fsubmission_obj(parent, obj_id):
""" Inverse of id() function. But only works if the object is not garbage collected"""
print("get_fsubmission_obj parent: ", parent)
if parent:
return parent.find_obj_by_id(parent, obj_id)
print("cannot find obj")
def alert_response(info_dict, form_id):
"""
Template for generating alert response
"""
inner_text = '''
<div class="alert %s" id="alert_msg_%s" role="alert">
%s
</div>
''' % (info_dict["alert_type"], form_id, info_dict["alert_message"])
extra_script = '''
$("#alert_msg_%s").fadeTo(2000, 500).slideUp(500, function(){
$("#alert_msg_%s").slideUp(500);
$("#alert_msg_%s").remove();
});
''' % (form_id, form_id, form_id)
return inner_text, extra_script
def exception_handler(func):
"""
This is the decorator to
assign the exception response
when there is an exception.
"""
def inner_function(*args, **kwargs):
try:
func(*args, **kwargs)
except:
# # Arg1 is intend to be the submission id of the submission object
# fsubmission_obj = get_fsubmission_obj(None, args[1])
# inner_text, extra_script = alert_response({"alert_type":"alert-warning", "alert_message":"系統異常", "alert_position":"prepend"}, fsubmission_obj.origin.id)
# # args[0] is the sijax response object
# args[0].html_prepend('#'+fsubmission_obj.target.id, inner_text)
# args[0].script(extra_script)
# args[0].script("$('#loading_holder').css('display','none');")
pass
return inner_function
def put_submissions_to_list(fsubmission_obj, sub_dict):
"""
This is the helper function to put the
submission content to a list of arguments
- Input:
* sub_dict: submission dictionary
"""
input_parent = fsubmission_obj.origin.find_parent(fsubmission_obj.origin, "input_form")
ordered_submission_list = []
if input_parent:
for child, _, _, _, _ in input_parent.layout.content:
if child.name in sub_dict.keys():
ordered_submission_list.append(sub_dict[child.name])
return ordered_submission_list
def allowed_file(filename, extension_list):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in extension_list
class FResponse(metaclass=abc.ABCMeta):
def __init__(self, target=None):
self.target = target
self.finish_loading_script = "$('#loading_holder').css('display','none');"
def run_all_script_tag(self, html, sijax_response):
pure_html = ""
def _find_script(_pure_html, _html):
start = _html.find("<script>")
_pure_html += _html[:start]
if start > 0:
end = _html.find("</script>")
sijax_response.script(_html[start+8:end])
return _pure_html, _html[end+9:]
return _pure_html, False
while html:
pure_html, html = _find_script(pure_html, html)
return pure_html
@abc.abstractmethod
def response(self, sijax_response):
"""
This is the function that gives
response to the sijax input
"""
pass
class FSubmissionSijaxHandler(object):
"""
This is the sijax handler for
handling the specific submission id
and offer a response.
"""
current_page = None
@staticmethod
# @exception_handler
def famcy_submission_handler(obj_response, fsubmission_id, info_dict, **kwargs):
"""
This is the main submission handler that handles all
the submission traffics.
"""
print("==========================famcy_submission_handler")
# Get the submission object
fsubmission_obj = get_fsubmission_obj(FSubmissionSijaxHandler.current_page, fsubmission_id)
if "jsAlert" in info_dict.keys():
temp_func = fsubmission_obj.jsAlertHandler
response_obj = temp_func(fsubmission_obj, info_dict)
# response_obj = fsubmission_obj.jsAlertHandler(fsubmission_obj, info_dict)
else:
info_list = put_submissions_to_list(fsubmission_obj, info_dict)
# Run user defined handle submission
# Will assume all data ready at this point
temp_func = fsubmission_obj.func
response_obj = temp_func(fsubmission_obj, info_list)
# response_obj = fsubmission_obj.func(fsubmission_obj, info_list)
# Response according to the return response
if isinstance(response_obj, list):
for res_obj in response_obj:
res_obj.target = res_obj.target if res_obj.target else fsubmission_obj.target
res_obj.response(obj_response)
elif response_obj:
response_obj.target = response_obj.target if response_obj.target else fsubmission_obj.target
response_obj.response(obj_response)
else:
inner_text, extra_script = alert_response({"alert_type":"alert-warning", "alert_message":"系統異常", "alert_position":"prepend"}, fsubmission_obj.origin.id)
# args[0] is the sijax response object
obj_response.html_prepend('#'+fsubmission_obj.target.id, inner_text)
obj_response.script(extra_script)
obj_response.script("$('#loading_holder').css('display','none');")
session["current_page"] = FSubmissionSijaxHandler.current_page
@staticmethod
# @exception_handler
def _dump_data(obj_response, files, form_values, fsubmission_obj, **kwargs):
def dump_files():
if 'file' not in files:
return {"indicator": True, "message": 'Bad upload'}
file_data = files['file']
file_name = file_data.filename
if file_name is None:
return {"indicator": True, "message": 'Nothing uploaded'}
upload_form = fsubmission_obj.origin.find_parent(fsubmission_obj.origin, "upload_form")
upload_file = upload_form.find_class(upload_form, "uploadFile")
filename = ""
for _upload_file in upload_file:
if file_data and allowed_file(file_data.filename, _upload_file.value["accept_type"]):
print("file_data.save")
filename = datetime.datetime.now().strftime("%Y%m%d%H%M%S")+"_"+secure_filename(file_data.filename)
file_data.save(os.path.join(_upload_file.value["file_path"], filename))
file_type = file_data.content_type
file_size = len(file_data.read())
return {"indicator": True, "message": filename}
temp_func = fsubmission_obj.func
response_obj = temp_func(fsubmission_obj, [[dump_files()]])
# Response according to the return response
if isinstance(response_obj, list):
for res_obj in response_obj:
res_obj.target = res_obj.target if res_obj.target else fsubmission_obj.target
res_obj.response(obj_response)
elif response_obj:
response_obj.target = response_obj.target if response_obj.target else fsubmission_obj.target
response_obj.response(obj_response)
else:
inner_text, extra_script = alert_response({"alert_type":"alert-warning", "alert_message":"系統異常", "alert_position":"prepend"}, fsubmission_obj.origin.id)
# args[0] is the sijax response object
obj_response.html_prepend('#'+fsubmission_obj.target.id, inner_text)
obj_response.script(extra_script)
obj_response.script("$('#loading_holder').css('display','none');")
@staticmethod
# @exception_handler
def upload_form_handler(obj_response, files, form_values):
print("==========================upload_form_handler")
if isinstance(form_values["fsubmission_obj"], str):
fsubmission_obj = get_fsubmission_obj(FSubmissionSijaxHandler.current_page, form_values["fsubmission_obj"])
else:
fsubmission_obj = get_fsubmission_obj(FSubmissionSijaxHandler.current_page, form_values["fsubmission_obj"][0])
FSubmissionSijaxHandler._dump_data(obj_response, files, form_values, fsubmission_obj)
session["current_page"] = FSubmissionSijaxHandler.current_page
class FSubmission:
"""
This is the submission object that
handles all the famcy submission
system.
- Rep
* func: user defined function
* target: the target of the submission block
* origin: the origin widget of the submission
"""
def __init__(self, origin):
self.func = None
self.func_link = None
self.origin = origin
self.target = origin
def getFormData(self):
"""
This is the getter method
to get the form layout data.
"""
data = getattr(self.origin.parent, "layout", None)
assert data, "Submission origin has no data. "
return data
def jsAlertHandler(self, submission_obj, info_dict):
"""
info_dict = {"alert_type": "", "alert_message": "", "alert_position": ""}
"""
print("jsAlertHandler=============")
return Famcy.UpdateAlert(alert_type=info_dict["alert_type"], alert_message=info_dict["alert_message"], alert_position=info_dict["alert_position"])
def tojson(self):
_json_dict = {}
_json_dict = {"target": self.target.link, "origin": self.origin.link, "func": self.func_link}
return json.dumps(_json_dict)
class FBackgroundTask(FSubmission):
"""
This is the background task submission
object for the background loop.
"""
def __init__(self, origin):
super(FBackgroundTask, self).__init__(origin)
self.background_info_dict = {}
self.obj_key = "background"+str(id(self))
# if not Famcy.SubmissionObjectTable.has_key(self.obj_key):
# Famcy.SubmissionObjectTable[self.obj_key] = self
def associate(self, function, info_dict={}, target=None, update_attr={}):
self.func = function
self.target = target if target else self
self.background_info_dict = info_dict
self.target_attr = update_attr
def tojson(self, str_format=False):
self.func(self, [])
_ = self.target.render_inner()
content = {"data": self.background_info_dict, "submission_id": str(self.obj_key),
"page_id": self.origin.id, "target_id": self.target.id, "target_innerHTML": self.target.body.html, "target_attribute": self.target_attr}
return content if not str_format else json.dumps(content)
| 33.911348 | 157 | 0.736066 | 7,230 | 0.754146 | 0 | 0 | 4,406 | 0.459581 | 0 | 0 | 3,407 | 0.355377 |
aad089a6f4a448fc23d035f432e9858d598d7704 | 1,982 | py | Python | user/forms.py | apuc/django-rest-framework | 863f2dcca5f2a677ac0e477fc704cc54cd9a53f8 | [
"MIT"
] | null | null | null | user/forms.py | apuc/django-rest-framework | 863f2dcca5f2a677ac0e477fc704cc54cd9a53f8 | [
"MIT"
] | 6 | 2021-03-30T14:08:14.000Z | 2021-09-08T02:21:23.000Z | user/forms.py | apuc/django-rest-framework | 863f2dcca5f2a677ac0e477fc704cc54cd9a53f8 | [
"MIT"
] | null | null | null | from crispy_forms import layout
from crispy_forms.helper import FormHelper
from django.conf import settings
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django import forms
from .models import UserProfile
class RegisterForm(UserCreationForm):
username = forms.CharField(label='Username', max_length=45)
email = forms.EmailField(label='Email')
password1 = forms.CharField(
min_length=settings.MIN_PASSWORD_LENGTH,
label='Password',
strip=False,
help_text=f'Enter {settings.MIN_PASSWORD_LENGTH} digits and chars',
widget=forms.PasswordInput()
)
password2 = forms.CharField(
min_length=settings.MIN_PASSWORD_LENGTH,
label='Repeat the password',
strip=False,
widget=forms.PasswordInput()
)
photo = forms.ImageField(required=False)
class Meta:
model = UserProfile
fields = (
'username',
'email',
'password1',
'password2',
'photo'
)
def crispy_init(self):
"""Initialize crispy-forms helper."""
self.helper = FormHelper()
self.helper.form_id = 'id-RegistrationForm'
self.helper.form_class = 'form-group'
self.helper.form_method = 'post'
self.helper.form_action = reverse_lazy('user:api-register')
self.helper.layout = layout.Layout(
layout.Field('username'),
layout.Field('email'),
layout.Field('password1'),
layout.Field('password2'),
layout.Field('photo'),
layout.Div(
layout.Submit(
'submit',
'Register',
css_class='btn-success my-2 px-4'
),
css_class='text-center'
)
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.crispy_init()
| 30.492308 | 75 | 0.592331 | 1,722 | 0.868819 | 0 | 0 | 0 | 0 | 0 | 0 | 345 | 0.174067 |
aad22ce0ae134c7841d5d6eb61bc2075cbcc5f71 | 3,187 | py | Python | wadi.py | sensepost/wadi | 7d29ee53d63425029c653fb7c20b4ff4c15f289b | [
"CC0-1.0"
] | 137 | 2015-10-23T14:58:42.000Z | 2021-11-18T09:59:16.000Z | wadi.py | sensepost/wadi | 7d29ee53d63425029c653fb7c20b4ff4c15f289b | [
"CC0-1.0"
] | 11 | 2015-10-31T06:51:50.000Z | 2022-02-20T20:22:04.000Z | wadi.py | sensepost/wadi | 7d29ee53d63425029c653fb7c20b4ff4c15f289b | [
"CC0-1.0"
] | 62 | 2015-10-23T14:58:49.000Z | 2021-11-18T09:18:13.000Z | import sys
import os
from multiprocessing import Process, Queue, Manager
from threading import Timer
from wadi_harness import Harness
from wadi_debug_win import Debugger
import time
import hashlib
def test(msg):
while True:
print 'Process 2:' + msg
#print msg
def test2():
print 'Process 1'
time.sleep(2)
while True:
print 'Process 1'
def run_harness(t):
harness = Harness(sys.argv[1],sys.argv[2],t)
harness.run()
def run_debugger(q):
debugger = Debugger(q)
debugger.run_Browser('IE')
def timeout_debug(dp):
print '[*] Terminating Debugger Process PID: %d' % dp.pid
dp.terminate()
class wadi():
def __init__(self, args=None):
if args:
self.args = args
else:
pass
def writeTestCases(self,tcases,msg):
self.msg = msg[0]
self.code = msg[1]
self.add = msg[2]
self.testcases = tcases
self.hash = hashlib.md5()
self.b = self.code+self.add
self.hash.update(self.b)
self.dgst = self.hash.hexdigest()
self.path = "./"+self.dgst
if os.path.exists(self.path):
print "[*] Duplicate Crash: %s" % self.dgst
else:
os.makedirs(self.path)
f = open(self.path + "/" +self.dgst+".crash","w+b")
f.write(self.msg)
f.close()
print "[*] Written Crash file to: %s" % self.dgst+".crash"
for i in range(10):
self.tcase = self.testcases.pop()
f2 = open(self.path+"/"+self.dgst+"_"+str(i)+".html","w+b")
f2.write(self.tcase)
f2.close()
print "[*] Written testcases to %s" % self.path+"/"+self.dgst+str(i)+".html"
print "[*] Last TestCase Folder '%s'" % self.dgst
def close(self):
sys.exit()
def run(self):
self.queue = Manager().list()
self.tcases = Manager().list()
self.server_pid = None
self.debugger_pid = None
self.init = 0
while True:
if not self.server_pid:
self.server_process = Process(target=run_harness, args=(self.tcases,))
self.server_process.start()
self.server_pid = self.server_process.pid
print '[*] Running Server Process %s ' % (self.server_pid,)
#self.server_pid =
if not self.debugger_pid:
self.debugger_process = Process(target=run_debugger,args=(self.queue,))
self.debugger_process.start()
self.debugger_pid = self.debugger_process.pid
timer = Timer(120.0,timeout_debug,(self.debugger_process,))
timer.daemon = True
timer.start()
if not self.debugger_process.is_alive():
print "[*] Debugger Process %s exited" % self.debugger_pid
timer.cancel()
self.lenq = len(self.queue)
self.lentc = len(self.tcases)
if self.lenq:
self.msg = self.queue.pop()
#self.msg = self.queue.get()
print "[*] Wooops Crash !!!!"
print "[*] %s" % self.msg[0]
else:
print "[*] No Crashes"
#if not self.tcases.empty():
if self.lentc and self.lenq:
#self.tc = self.tcases.get()
self.writeTestCases(self.tcases, self.msg)
else:
print "[*] No TestCases"
self.debugger_pid = None
else:
pass
if __name__ == '__main__':
#try:
w = wadi()
w.run()
#except:
# w.close()
| 24.898438 | 81 | 0.612488 | 2,453 | 0.769689 | 0 | 0 | 0 | 0 | 0 | 0 | 538 | 0.168811 |
aad2be6c93fd38e19f49dbbdc525b7e3001efbe1 | 7,720 | py | Python | learnIndependentRegressionModel.py | zawlin/multi-modal-regression | 61aa6c066834ab1373275decc38e361db5c2cf04 | [
"MIT"
] | 29 | 2018-06-21T06:46:17.000Z | 2021-09-02T02:47:30.000Z | learnIndependentRegressionModel.py | zawlin/multi-modal-regression | 61aa6c066834ab1373275decc38e361db5c2cf04 | [
"MIT"
] | 1 | 2018-11-15T01:51:47.000Z | 2018-11-21T10:48:31.000Z | learnIndependentRegressionModel.py | zawlin/multi-modal-regression | 61aa6c066834ab1373275decc38e361db5c2cf04 | [
"MIT"
] | 7 | 2018-06-21T06:46:53.000Z | 2021-10-04T09:32:24.000Z | # -*- coding: utf-8 -*-
"""
Independent model based on Geodesic Regression model R_G
"""
import torch
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torch.nn.functional as F
from dataGenerators import ImagesAll, TestImages, my_collate
from axisAngle import get_error2, geodesic_loss
from poseModels import model_3layer
from helperFunctions import classes
from featureModels import resnet_model
import numpy as np
import scipy.io as spio
import gc
import os
import time
import progressbar
import argparse
from tensorboardX import SummaryWriter
parser = argparse.ArgumentParser(description='Pure Regression Models')
parser.add_argument('--gpu_id', type=str, default='0')
parser.add_argument('--render_path', type=str, default='data/renderforcnn/')
parser.add_argument('--augmented_path', type=str, default='data/augmented2/')
parser.add_argument('--pascal3d_path', type=str, default='data/flipped_new/test/')
parser.add_argument('--save_str', type=str)
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--feature_network', type=str, default='resnet')
parser.add_argument('--N0', type=int, default=2048)
parser.add_argument('--N1', type=int, default=1000)
parser.add_argument('--N2', type=int, default=500)
parser.add_argument('--init_lr', type=float, default=1e-4)
parser.add_argument('--num_epochs', type=int, default=3)
args = parser.parse_args()
print(args)
# assign GPU
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
# save stuff here
results_file = os.path.join('results', args.save_str)
model_file = os.path.join('models', args.save_str + '.tar')
plots_file = os.path.join('plots', args.save_str)
log_dir = os.path.join('logs', args.save_str)
# relevant variables
ydata_type = 'axis_angle'
ndim = 3
num_classes = len(classes)
mse_loss = nn.MSELoss().cuda()
gve_loss = geodesic_loss().cuda()
ce_loss = nn.CrossEntropyLoss().cuda()
# DATA
# datasets
real_data = ImagesAll(args.augmented_path, 'real', ydata_type)
render_data = ImagesAll(args.render_path, 'render', ydata_type)
test_data = TestImages(args.pascal3d_path, ydata_type)
# setup data loaders
real_loader = DataLoader(real_data, batch_size=args.num_workers, shuffle=True, num_workers=args.num_workers, pin_memory=True, collate_fn=my_collate)
render_loader = DataLoader(render_data, batch_size=args.num_workers, shuffle=True, num_workers=args.num_workers, pin_memory=True, collate_fn=my_collate)
test_loader = DataLoader(test_data, batch_size=32)
print('Real: {0} \t Render: {1} \t Test: {2}'.format(len(real_loader), len(render_loader), len(test_loader)))
max_iterations = min(len(real_loader), len(render_loader))
# my_model
class IndependentModel(nn.Module):
def __init__(self):
super().__init__()
self.num_classes = num_classes
self.feature_model = resnet_model('resnet50', 'layer4').cuda()
self.pose_model = model_3layer(args.N0, args.N1, args.N2, ndim).cuda()
def forward(self, x):
x = self.feature_model(x)
x = self.pose_model(x)
x = np.pi*F.tanh(x)
return x
model = IndependentModel()
# print(model)
# loss and optimizer
optimizer = optim.Adam(model.parameters(), lr=args.init_lr)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)
# store stuff
writer = SummaryWriter(log_dir)
count = 0
val_loss = []
# OPTIMIZATION functions
def training_init():
global count, val_loss
model.train()
bar = progressbar.ProgressBar(max_value=max_iterations)
for i, (sample_real, sample_render) in enumerate(zip(real_loader, render_loader)):
# forward steps
xdata_real = Variable(sample_real['xdata'].cuda())
ydata_real = Variable(sample_real['ydata'].cuda())
output_real = model(xdata_real)
xdata_render = Variable(sample_render['xdata'].cuda())
ydata_render = Variable(sample_render['ydata'].cuda())
output_render = model(xdata_render)
output_pose = torch.cat((output_real, output_render))
gt_pose = torch.cat((ydata_real, ydata_render))
loss = mse_loss(output_pose, gt_pose)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# store
count += 1
writer.add_scalar('train_loss', loss.item(), count)
if i % 1000 == 0:
ytest, yhat_test, test_labels = testing()
spio.savemat(results_file, {'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels})
tmp_val_loss = get_error2(ytest, yhat_test, test_labels, num_classes)
writer.add_scalar('val_loss', tmp_val_loss, count)
val_loss.append(tmp_val_loss)
# cleanup
del xdata_real, xdata_render, ydata_real, ydata_render
del output_real, output_render, sample_real, sample_render, loss, output_pose, gt_pose
bar.update(i)
# stop
if i == max_iterations:
break
render_loader.dataset.shuffle_images()
real_loader.dataset.shuffle_images()
def training():
global count, val_loss
model.train()
bar = progressbar.ProgressBar(max_value=max_iterations)
for i, (sample_real, sample_render) in enumerate(zip(real_loader, render_loader)):
# forward steps
xdata_real = Variable(sample_real['xdata'].cuda())
ydata_real = Variable(sample_real['ydata'].cuda())
output_real = model(xdata_real)
xdata_render = Variable(sample_render['xdata'].cuda())
ydata_render = Variable(sample_render['ydata'].cuda())
output_render = model(xdata_render)
output_pose = torch.cat((output_real, output_render))
gt_pose = torch.cat((ydata_real, ydata_render))
loss = gve_loss(output_pose, gt_pose)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# store
count += 1
writer.add_scalar('train_loss', loss.item(), count)
if i % 1000 == 0:
ytest, yhat_test, test_labels = testing()
spio.savemat(results_file, {'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels})
tmp_val_loss = get_error2(ytest, yhat_test, test_labels, num_classes)
writer.add_scalar('val_loss', tmp_val_loss, count)
val_loss.append(tmp_val_loss)
# cleanup
del xdata_real, xdata_render, ydata_real, ydata_render
del output_real, output_render, sample_real, sample_render, loss, output_pose, gt_pose
bar.update(i)
# stop
if i == max_iterations:
break
render_loader.dataset.shuffle_images()
real_loader.dataset.shuffle_images()
def testing():
model.eval()
ypred = []
ytrue = []
labels = []
for i, sample in enumerate(test_loader):
xdata = Variable(sample['xdata'].cuda())
label = Variable(sample['label'].cuda())
output = model(xdata)
ypred.append(output.data.cpu().numpy())
ytrue.append(sample['ydata'].numpy())
labels.append(sample['label'].numpy())
del xdata, label, output, sample
gc.collect()
ypred = np.concatenate(ypred)
ytrue = np.concatenate(ytrue)
labels = np.concatenate(labels)
model.train()
return ytrue, ypred, labels
def save_checkpoint(filename):
torch.save(model.state_dict(), filename)
# initialization
training_init()
ytest, yhat_test, test_labels = testing()
print('\nMedErr: {0}'.format(get_error2(ytest, yhat_test, test_labels, num_classes)))
for epoch in range(args.num_epochs):
tic = time.time()
scheduler.step()
# training step
training()
# save model at end of epoch
save_checkpoint(model_file)
# validation
ytest, yhat_test, test_labels = testing()
print('\nMedErr: {0}'.format(get_error2(ytest, yhat_test, test_labels, num_classes)))
# time and output
toc = time.time() - tic
print('Epoch: {0} done in time {1}s'.format(epoch, toc))
# cleanup
gc.collect()
writer.close()
val_loss = np.stack(val_loss)
spio.savemat(plots_file, {'val_loss': val_loss})
# evaluate the model
ytest, yhat_test, test_labels = testing()
print('\nMedErr: {0}'.format(get_error2(ytest, yhat_test, test_labels, num_classes)))
spio.savemat(results_file, {'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels})
| 33.71179 | 152 | 0.748964 | 357 | 0.046244 | 0 | 0 | 0 | 0 | 0 | 0 | 1,137 | 0.14728 |
aad37494decad9fd0ad1fb72dcfce3587fe45cdf | 1,033 | py | Python | rick_and_morty_app/views.py | esalcedo94/final_project | 7dce4fae8248d820698220d3289bfb49bd96b2cd | [
"MIT"
] | null | null | null | rick_and_morty_app/views.py | esalcedo94/final_project | 7dce4fae8248d820698220d3289bfb49bd96b2cd | [
"MIT"
] | 4 | 2021-03-19T01:50:05.000Z | 2021-09-22T18:52:13.000Z | rick_and_morty_app/views.py | esalcedo94/final_project | 7dce4fae8248d820698220d3289bfb49bd96b2cd | [
"MIT"
] | null | null | null | # from django.shortcuts import render, redirect, get_object_or_404
from .forms import CharacterForm
from rick_and_morty_app.models import Character
from django.views.generic import ListView, CreateView, UpdateView, DetailView, DeleteView
from django.urls import reverse_lazy # new
# Create your views here.
class HomePageView(ListView):
model = Character
template_name = 'character_list.html'
class CreateCharacterView(CreateView):
model = Character
form_class = CharacterForm
template_name = 'character_form.html'
success_url = reverse_lazy('character_list')
class CharacterDetailView(DetailView):
model = Character
template_name = 'character_details.html'
class CharacterUpdate(UpdateView):
model = Character
fields = ['name', 'lastEpisode']
template_name = 'character_update.html'
success_url = reverse_lazy('character_list')
class DeleteCharacter(DeleteView):
model = Character
template_name = 'character_delete.html'
success_url = reverse_lazy('character_list')
| 31.30303 | 89 | 0.771539 | 715 | 0.692159 | 0 | 0 | 0 | 0 | 0 | 0 | 275 | 0.266215 |
aad397b94b0cb0be8ca7c28476744dda7ab4e655 | 339 | py | Python | samples/contacts/pathUtils.py | Trevol/Mask_RCNN | 18308082e2c5fd5b4df5d6e40f009b3ebd66c26d | [
"MIT"
] | null | null | null | samples/contacts/pathUtils.py | Trevol/Mask_RCNN | 18308082e2c5fd5b4df5d6e40f009b3ebd66c26d | [
"MIT"
] | null | null | null | samples/contacts/pathUtils.py | Trevol/Mask_RCNN | 18308082e2c5fd5b4df5d6e40f009b3ebd66c26d | [
"MIT"
] | null | null | null | import os, sys
def mrcnnPath():
filePath = os.path.dirname(os.path.realpath(__file__))
return os.path.abspath(os.path.join(filePath, os.pardir, os.pardir))
def currentFilePath(file=None):
file = file if file else __file__
return os.path.dirname(os.path.realpath(file))
def mrcnnToPath():
sys.path.append(mrcnnPath()) | 28.25 | 72 | 0.719764 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
aad3cf7fbb7dfdec62fdd48f0ef851241425453c | 6,840 | py | Python | event_extractor/train/train.py | chenking2020/event_extract_master | 6b8d470d2caa5ec6785eae07bca04e66fb3734b7 | [
"MIT"
] | 30 | 2021-01-22T08:06:07.000Z | 2022-03-25T14:01:25.000Z | event_extractor/train/train.py | chenking2020/EventTrainServer | 6b8d470d2caa5ec6785eae07bca04e66fb3734b7 | [
"MIT"
] | 4 | 2021-03-29T09:28:12.000Z | 2022-03-25T14:01:14.000Z | event_extractor/train/train.py | chenking2020/EventTrainServer | 6b8d470d2caa5ec6785eae07bca04e66fb3734b7 | [
"MIT"
] | 3 | 2021-01-22T08:06:08.000Z | 2022-02-21T04:04:19.000Z | from __future__ import print_function
import sys, os
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from event_extractor.dataprocess import data_loader
from event_extractor.train.eval import evaluate
import importlib
import time
class TrainProcess(object):
def __init__(self, params):
self.params = params
def load_data(self):
# ToDo 暂时从本地读取文件,以后改成从库中读取,暂时按照本地文件分训练、验证和测试,以后改成自动切分
data_path = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "data",
self.params["lang"])
self.all_train_sentences = data_loader.load_sentences(os.path.join(data_path, "train.json"),
self.params["lang"], self.params["seq_len"])
self.all_dev_sentences = data_loader.load_sentences(os.path.join(data_path, "dev.json"), self.params["lang"],
self.params["seq_len"])
_w, self.word_to_id, self.id_to_word = data_loader.word_mapping(self.all_train_sentences)
_s, self.seg_to_id, self.id_to_seg = data_loader.seg_mapping(self.all_train_sentences)
self.id2eventtype, self.eventtype2id, self.id2role, self.role2id = data_loader.load_schema(
os.path.join(data_path, "event_schema.json"))
train_data = data_loader.prepare_dataset(self.all_train_sentences, self.eventtype2id, self.role2id,
self.word_to_id, self.seg_to_id)
dev_data = data_loader.prepare_dataset(self.all_dev_sentences, self.eventtype2id, self.role2id,
self.word_to_id, self.seg_to_id)
self.train_manager = data_loader.BatchManager(train_data, self.params["batch_size"], len(self.eventtype2id),
len(self.role2id), is_sorted=True)
self.dev_manager = data_loader.BatchManager(dev_data, self.params["batch_size"], len(self.eventtype2id),
len(self.role2id), is_sorted=True)
def train(self):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
model_path = os.path.join(BASE_DIR, "checkpoint")
event_module = importlib.import_module(
"event_extractor.model.{}".format(self.params["model_name"]))
event_model = event_module.EventModule(self.params, len(self.word_to_id), len(self.seg_to_id),
len(self.eventtype2id), len(self.role2id))
event_model.rand_init_word_embedding()
event_model.rand_init_seg_embedding()
event_model.rand_init_s1_position_embedding()
event_model.rand_init_k1_position_embedding()
event_model.rand_init_k2_position_embedding()
optimizer = event_model.set_optimizer()
# tot_length = len(self.all_train_sentences)
print("has train data: {}".format(len(self.all_train_sentences)))
print("has dev data: {}".format(len(self.all_dev_sentences)))
best_f1 = float('-inf')
best_f1_epoch = 0
start_time = time.time()
patience_count = 0
for epoch_idx in range(self.params["epoch"]):
event_model.train()
print("-------------------------------------------------------------------------------------")
epoch_loss = 0
iter_step = 0
for batch in self.train_manager.iter_batch(shuffle=True):
text, t1, t2, s1, s2, k1, k2, o1, o2 = batch
iter_step += 1
step_start_time = time.time()
event_model.zero_grad()
loss = event_model(t1, t2, s1, s2, k1, k2, o1, o2)
epoch_loss += event_model.to_scalar(loss)
loss.backward()
# event_model.clip_grad_norm()
optimizer.step()
print("epoch: %s, current step: %s, current loss: %.4f time use: %s" % (
epoch_idx, iter_step, loss / len(t1),
time.time() - step_start_time))
epoch_loss /= iter_step
# update lr
event_model.adjust_learning_rate(optimizer)
f1, p, r = evaluate(event_model, self.dev_manager)
print("dev: f1: {}, p: {}, r: {}".format(f1, p, r))
if f1 >= best_f1:
best_f1 = f1
best_f1_epoch = epoch_idx
patience_count = 0
print('best average f1: %.4f in epoch_idx: %d , saving...' % (best_f1, best_f1_epoch))
try:
event_model.save_checkpoint({
'epoch': epoch_idx,
'state_dict': event_model.state_dict(),
'optimizer': optimizer.state_dict()}, {
'word_to_id': self.word_to_id,
'id_to_word': self.id_to_word,
'seg_to_id': self.seg_to_id,
'id_to_seg': self.id_to_seg,
"id2eventtype": self.id2eventtype,
"eventtype2id": self.eventtype2id,
"id2role": self.id2role,
"role2id": self.role2id
}, {'params': self.params},
os.path.join(model_path, 'event'))
except Exception as inst:
print(inst)
else:
patience_count += 1
print(
'poor current average f1: %.4f, best average f1: %.4f in epoch_idx: %d' % (
f1, best_f1, best_f1_epoch))
print('epoch: ' + str(epoch_idx) + '\t in ' + str(self.params["epoch"]) + ' take: ' + str(
time.time() - start_time) + ' s')
if patience_count >= self.params["patience"] and epoch_idx >= self.params["least_iters"]:
break
if __name__ == '__main__':
# train_d = TrainProcess(
# {"task_name": "event_test", "lang": "zh", "model_name": "dgcnn", "batch_size": 32, "epochs": 200,
# "seq_len": 500, "emb_dim": 128, "drop_out": 0.25,
# "update": "adam", "lr": 0.0001, "lr_decay": 0.05, "clip_grad": 5, "epoch": 500, "patience": 15,
# "least_iters": 50, "gpu": -1})
# train_d.load_data()
# train_d.train()
train_d = TrainProcess(
{"task_name": "event_test", "lang": "en", "model_name": "dgcnn", "batch_size": 32, "epochs": 200,
"seq_len": 500, "emb_dim": 128, "drop_out": 0.25,
"update": "adam", "lr": 0.0001, "lr_decay": 0.05, "clip_grad": 5, "epoch": 500, "patience": 15,
"least_iters": 50, "gpu": -1})
train_d.load_data()
train_d.train()
| 47.832168 | 117 | 0.550146 | 5,842 | 0.842758 | 0 | 0 | 0 | 0 | 0 | 0 | 1,479 | 0.213358 |
aad4319f7007dc6fffcb823fea0b0e260ea324c5 | 3,298 | py | Python | src/oci/object_storage/models/commit_multipart_upload_part_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/object_storage/models/commit_multipart_upload_part_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/object_storage/models/commit_multipart_upload_part_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CommitMultipartUploadPartDetails(object):
"""
To use any of the API operations, you must be authorized in an IAM policy. If you are not authorized,
talk to an administrator. If you are an administrator who needs to write policies to give users access, see
`Getting Started with Policies`__.
__ https://docs.cloud.oracle.com/Content/Identity/Concepts/policygetstarted.htm
"""
def __init__(self, **kwargs):
"""
Initializes a new CommitMultipartUploadPartDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param part_num:
The value to assign to the part_num property of this CommitMultipartUploadPartDetails.
:type part_num: int
:param etag:
The value to assign to the etag property of this CommitMultipartUploadPartDetails.
:type etag: str
"""
self.swagger_types = {
'part_num': 'int',
'etag': 'str'
}
self.attribute_map = {
'part_num': 'partNum',
'etag': 'etag'
}
self._part_num = None
self._etag = None
@property
def part_num(self):
"""
**[Required]** Gets the part_num of this CommitMultipartUploadPartDetails.
The part number for this part.
:return: The part_num of this CommitMultipartUploadPartDetails.
:rtype: int
"""
return self._part_num
@part_num.setter
def part_num(self, part_num):
"""
Sets the part_num of this CommitMultipartUploadPartDetails.
The part number for this part.
:param part_num: The part_num of this CommitMultipartUploadPartDetails.
:type: int
"""
self._part_num = part_num
@property
def etag(self):
"""
**[Required]** Gets the etag of this CommitMultipartUploadPartDetails.
The entity tag (ETag) returned when this part was uploaded.
:return: The etag of this CommitMultipartUploadPartDetails.
:rtype: str
"""
return self._etag
@etag.setter
def etag(self, etag):
"""
Sets the etag of this CommitMultipartUploadPartDetails.
The entity tag (ETag) returned when this part was uploaded.
:param etag: The etag of this CommitMultipartUploadPartDetails.
:type: str
"""
self._etag = etag
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 31.113208 | 245 | 0.654942 | 2,758 | 0.836264 | 0 | 0 | 2,788 | 0.845361 | 0 | 0 | 2,227 | 0.675258 |
aad4be15ea0538f236e4812d499d7d48b22d6200 | 3,114 | py | Python | sorter/lib/data_handler.py | 1shooperman/gr-sorter | 6efa1b1fc9c7a5d0c8c77d8018122b3bac5730e6 | [
"MIT"
] | null | null | null | sorter/lib/data_handler.py | 1shooperman/gr-sorter | 6efa1b1fc9c7a5d0c8c77d8018122b3bac5730e6 | [
"MIT"
] | 17 | 2018-09-03T15:48:33.000Z | 2021-05-07T20:14:24.000Z | sorter/lib/data_handler.py | 1shooperman/gr-sorter | 6efa1b1fc9c7a5d0c8c77d8018122b3bac5730e6 | [
"MIT"
] | null | null | null | ''' data_handler.py '''
import os
from sorter.lib.db import DB
from sorter.lib.book_utils import get_by_id, get_by_isbn
from sorter.lib.parse_xml import parse_isbn13_response, parse_id_response
def store_data(books, db_file):
'''
Store the book data in the provided database
'''
database = DB(db_file)
database.create_connection()
query = '''INSERT INTO rankings(id, isbn, isbn13, title,
image_url, publication_year, ratings_count, average_rating,
author, link) VALUES(?,?,?,?,?,?,?,?,?,?)'''
for book in books:
database.insertupdate(query, book)
database.close_connection()
def get_books(db_file):
'''
Get the previously stored books data
'''
database = DB(db_file)
database.create_connection()
books = database.query('select * from rankings')
database.close_connection()
return books
def get_books_with_missing_data(db_file):
'''
Get the previously stored books data
'''
database = DB(db_file)
database.create_connection()
books = database.query('select * from rankings where publication_year is null')
database.close_connection()
return books
def dump_data(db_file):
'''
Delete the provided data file
'''
if os.path.isfile(db_file):
os.remove(db_file)
def clean_data(db_name, defaults):
'''
Plug in missing data:
book[0] = ID
book[1] = ISBN
book[2] = ISBN13
book[3] = title
book[4] = image url
book[5] = pub year
book[6] = Total Ratings
book[7] = avg rating
book[8] = author
book[9] = link
'''
db_file = os.path.abspath(db_name)
if os.path.isfile(db_file):
books = get_books_with_missing_data(db_file)
map(update_book, books, ([db_file] * len(books)), ([defaults] * len(books)))
def update_book(book, db_file, defaults):
'''
Add the missing book data
'''
qry = None
if book[2] is not None:
xml_response = get_by_isbn(book[2], defaults)
new_book = parse_isbn13_response(xml_response)
qry = 'UPDATE rankings set publication_year = ? where isbn13 = ?'
vals = [new_book[5], book[2]]
elif book[0] is not None:
xml_response = get_by_id(book[0], defaults)
new_book = parse_id_response(xml_response)
qry = 'UPDATE rankings set publication_year = ?, isbn = ?, isbn13 = ? where id = ?'
vals = [new_book[5], new_book[1], new_book[2], book[0]]
if qry is not None:
database = DB(db_file)
database.create_connection()
database.insertupdate(qry, vals)
database.close_connection()
def manually_update_books(data, db_file):
'''
Update books based on parsed POST data
'''
database = DB(db_file)
database.create_connection()
for book in data:
if book['attr'] == 'id':
continue
qry = 'UPDATE rankings set %s = ? where id = ?' % book['attr']
vals = [book['value'], int(book['book_id'])]
database.insertupdate(qry, vals)
database.close_connection()
| 25.95 | 91 | 0.624599 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,079 | 0.3465 |
aad6602e278463189f96a290a221da480e19eb2d | 3,054 | py | Python | data_conversions/prepare_las_filelists.py | nazarred/PointCNN | 41043270a2ccde4fddf03e7e90e5c3f511c7454c | [
"MIT"
] | null | null | null | data_conversions/prepare_las_filelists.py | nazarred/PointCNN | 41043270a2ccde4fddf03e7e90e5c3f511c7454c | [
"MIT"
] | null | null | null | data_conversions/prepare_las_filelists.py | nazarred/PointCNN | 41043270a2ccde4fddf03e7e90e5c3f511c7454c | [
"MIT"
] | null | null | null | #!/usr/bin/python3
'''Prepare Filelists for Semantic3D Segmentation Task.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import math
import pathlib
import random
import argparse
from datetime import datetime
from logger import setup_logging
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--folder', '-f', help='Path to data folder')
parser.add_argument('--h5_num', '-d', help='Number of h5 files to be loaded each time', type=int, default=4)
parser.add_argument('--repeat_num', '-r', help='Number of repeatly using each loaded h5 list', type=int, default=2)
parser.add_argument(
'--log_path', '-lp', help='Path where log file should be saved.')
args = parser.parse_args()
setup_logging(args.log_path)
logger = logging.getLogger(__name__)
logger.info(args)
root = args.folder if args.folder else '../../data/las/'
splits = ['train', 'val', 'test']
split_filelists = dict()
for split in splits:
if not pathlib.Path(os.path.join(root, split)).exists():
continue
split_filelists[split] = ['./%s/%s\n' % (split, filename) for filename in os.listdir(os.path.join(root, split))
if filename.endswith('.h5')]
train_h5 = split_filelists.get('train')
if train_h5:
random.shuffle(train_h5)
train_list = os.path.join(root, 'train_data_files.txt')
logger.info('{}-Saving {}...'.format(datetime.now(), train_list))
with open(train_list, 'w') as filelist:
list_num = math.ceil(len(train_h5) / args.h5_num)
for list_idx in range(list_num):
train_list_i = os.path.join(root, 'filelists', 'train_files_g_%d.txt' % list_idx)
with open(train_list_i, 'w') as filelist_i:
for h5_idx in range(args.h5_num):
filename_idx = list_idx * args.h5_num + h5_idx
if filename_idx > len(train_h5) - 1:
break
filename_h5 = train_h5[filename_idx]
filelist_i.write('../' + filename_h5)
for repeat_idx in range(args.repeat_num):
filelist.write('./filelists/train_files_g_%d.txt\n' % list_idx)
val_h5 = split_filelists.get('val')
if val_h5:
val_list = os.path.join(root, 'val_data_files.txt')
logger.info('{}-Saving {}...'.format(datetime.now(), val_list))
with open(val_list, 'w') as filelist:
for filename_h5 in val_h5:
filelist.write(filename_h5)
test_h5 = split_filelists.get('test')
if test_h5:
test_list = os.path.join(root, 'test_files.txt')
logger.info('{}-Saving {}...'.format(datetime.now(), test_list))
with open(test_list, 'w') as filelist:
for filename_h5 in test_h5:
filelist.write(filename_h5)
if __name__ == '__main__':
main()
| 37.703704 | 119 | 0.614276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 560 | 0.183366 |
aad86497556c746db67d036ea08fdadb6f84750c | 7,382 | py | Python | survol/sources_types/CIM_ComputerSystem/__init__.py | AugustinMascarelli/survol | 7a822900e82d1e6f016dba014af5741558b78f15 | [
"BSD-3-Clause"
] | null | null | null | survol/sources_types/CIM_ComputerSystem/__init__.py | AugustinMascarelli/survol | 7a822900e82d1e6f016dba014af5741558b78f15 | [
"BSD-3-Clause"
] | null | null | null | survol/sources_types/CIM_ComputerSystem/__init__.py | AugustinMascarelli/survol | 7a822900e82d1e6f016dba014af5741558b78f15 | [
"BSD-3-Clause"
] | null | null | null | """
Computer system.
Scripts related to the class CIM_ComputerSystem.
"""
import sys
import socket
import lib_util
# This must be defined here, because dockit cannot load modules from here,
# and this ontology would not be defined.
def EntityOntology():
return ( ["Name"], )
import lib_common
from lib_properties import pc
# This returns a nice name given the parameter of the object.
def EntityName(entity_ids_arr):
entity_id = entity_ids_arr[0]
return entity_id
# We do not care about the entity_host as this is simply the machine from which
# this machine was detected, so nothing more than a computer on the same network.
def UniversalAlias(entity_ids_arr,entity_host,entity_class):
# TOO SLOW !!!
return "ThisComputer:"+entity_ids_arr[0].lower()
try:
# (entity_ids_arr=[u'desktop-ni99v8e'], entity_host='192.168.0.14', entity_class=u'CIM_ComputerSystem')
# might possibly throw:
# "[Errno 11004] getaddrinfo failed "
aHostName = lib_util.GlobalGetHostByName(entity_ids_arr[0])
except:
aHostName = entity_host
# Hostnames are case-insensitive, RFC4343 https://tools.ietf.org/html/rfc4343
return "ThisComputer:"+aHostName.lower()
# This adds the WBEM and WMI urls related to the entity.
def AddWbemWmiServers(grph,rootNode,entity_host, nameSpace, entity_type, entity_id):
DEBUG("AddWbemWmiServers entity_host=%s nameSpace=%s entity_type=%s entity_id=%s", entity_host,nameSpace,entity_type,entity_id)
if entity_host:
host_wbem_wmi = entity_host
else:
host_wbem_wmi = lib_util.currentHostname
# This receives a map and a RDF property, and must add the correspknding nodes to the rootNode
# int the given graph. The same callback signature is used elsewhere to generate HTML tables.
def AddWMap(theMap,propData):
for urlSubj in theMap:
grph.add( ( rootNode, propData, urlSubj ) )
for theProp, urlObj in theMap[urlSubj]:
grph.add( ( urlSubj, theProp, urlObj ) )
mapWbem = AddWbemServers(host_wbem_wmi, nameSpace, entity_type, entity_id)
AddWMap(mapWbem,pc.property_wbem_data)
mapWmi = AddWmiServers(host_wbem_wmi, nameSpace, entity_type, entity_id)
AddWMap(mapWmi,pc.property_wmi_data)
mapSurvol = AddSurvolServers(host_wbem_wmi, nameSpace, entity_type, entity_id)
AddWMap(mapSurvol,pc.property_survol_agent)
def AddWbemServers(entity_host, nameSpace, entity_type, entity_id):
DEBUG("AddWbemServers entity_host=%s nameSpace=%s entity_type=%s entity_id=%s",entity_host,nameSpace,entity_type,entity_id)
mapWbem = dict()
try:
# Maybe some of these servers are not able to display anything about this object.
import lib_wbem
wbem_servers_desc_list = lib_wbem.GetWbemUrlsTyped( entity_host, nameSpace, entity_type, entity_id )
# sys.stderr.write("wbem_servers_desc_list len=%d\n" % len(wbem_servers_desc_list))
for url_server in wbem_servers_desc_list:
# TODO: Filter only entity_host
# sys.stderr.write("url_server=%s\n" % str(url_server))
if lib_wbem.ValidClassWbem(entity_type):
wbemNode = lib_common.NodeUrl(url_server[0])
if entity_host:
txtLiteral = "WBEM url, host=%s class=%s"%(entity_host,entity_type)
else:
txtLiteral = "WBEM url, current host, class=%s"%(entity_type)
wbemHostNode = lib_common.gUriGen.HostnameUri( url_server[1] )
mapWbem[wbemNode] = [
( pc.property_information, lib_common.NodeLiteral(txtLiteral ) ),
( pc.property_host, wbemHostNode )
]
# TODO: This could try to pen a HTTP server on this machine, possibly with port 80.
# grph.add( ( wbemHostNode, pc.property_information, lib_common.NodeLiteral("Url to host") ) )
except ImportError:
pass
return mapWbem
def AddWmiServers(entity_host, nameSpace, entity_type, entity_id):
DEBUG("AddWmiServers entity_host=%s nameSpace=%s entity_type=%s entity_id=%s",entity_host,nameSpace,entity_type,entity_id)
# This will not work on Linux.
import lib_wmi
mapWmi = dict()
if lib_wmi.ValidClassWmi(entity_type):
# TODO: We may also loop on all machines which may describe this object.
wmiurl = lib_wmi.GetWmiUrl( entity_host, nameSpace, entity_type, entity_id )
# sys.stderr.write("wmiurl=%s\n" % str(wmiurl))
if wmiurl:
wmiNode = lib_common.NodeUrl(wmiurl)
if entity_host:
txtLiteral = "WMI url, host=%s class=%s"%(entity_host,entity_type)
else:
txtLiteral = "WMI url, current host, class=%s"%(entity_type)
mapWmi[wmiNode] = [
(pc.property_information, lib_common.NodeLiteral(txtLiteral))
]
if entity_host:
nodePortalWmi = lib_util.UrlPortalWmi(entity_host)
mapWmi[wmiNode].append(
(pc.property_rdf_data_nolist2, nodePortalWmi)
)
return mapWmi
def AddSurvolServers(entity_host, nameSpace, entity_type, entity_id):
DEBUG("AddSurvolServers entity_host=%s nameSpace=%s entity_type=%s entity_id=%s",entity_host,nameSpace,entity_type,entity_id)
mapSurvol = dict()
# TODO: Not implemented yet.
return mapSurvol
# g = geocoder.ip('216.58.206.37')
# g.json
# {'status': 'OK', 'city': u'Mountain View', 'ok': True, 'encoding': 'utf-8', 'ip': u'216.58.206.37',
# 'hostname': u'lhr35s10-in-f5.1e100.net', 'provider': 'ipinfo', 'state': u'California', 'location': '216.58.206.37',
# 'status_code': 200, 'country': u'US', 'lat': 37.4192, 'org': u'AS15169 Google Inc.', 'lng': -122.0574, 'postal': u'94043',
# 'address': u'Mountain View, California, US'}
#
# g = geocoder.ip('192.168.1.22')
# g.json
# {'status': 'ERROR - No results found', 'status_code': 200, 'encoding': 'utf-8', 'ip': u'192.168.1.22',
# 'location': '192.168.1.22', 'provider': 'ipinfo', 'ok': False}
def AddGeocoder(grph,node,ipv4):
try:
import geocoder
except ImportError:
return
try:
geoc = geocoder.ip(ipv4)
for jsonKey,jsonVal in geoc.json.iteritems():
# Conversion to str otherwise numbers are displayed as "float".
grph.add( ( node, lib_common.MakeProp(jsonKey), lib_common.NodeLiteral(str(jsonVal)) ) )
except Exception:
# This might be a simple time-out.
return
# The URL is hard-coded but very important because it allows to visit another host with WMI access.
def AddInfo(grph,node,entity_ids_arr):
theHostname = entity_ids_arr[0]
try:
ipv4 = lib_util.GlobalGetHostByName(theHostname)
except:
grph.add( ( node, pc.property_information, lib_common.NodeLiteral("Unknown machine") ) )
return
grph.add( ( node, lib_common.MakeProp("IP address"), lib_common.NodeLiteral(ipv4) ) )
fqdn = socket.getfqdn(theHostname)
grph.add( ( node, lib_common.MakeProp("FQDN"), lib_common.NodeLiteral(fqdn) ) )
# No need to do that, because it is done in entity.py if mode!=json.
# nameSpace = ""
# AddWbemWmiServers(grph,node,theHostname, nameSpace, "CIM_ComputerSystem", "Name="+theHostname)
AddGeocoder(grph,node,ipv4)
| 38.649215 | 131 | 0.671227 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,015 | 0.408426 |
aad8819b95f363cf2961e65958f9749138888b61 | 133 | py | Python | Primeiros Passos/1-DAY ONE/Sabendo se o nome da cidade tem santo.py | pedroluceena/TreinosPI | c11a76a1361f61a71e16edb2127eb08c12c090e1 | [
"MIT"
] | null | null | null | Primeiros Passos/1-DAY ONE/Sabendo se o nome da cidade tem santo.py | pedroluceena/TreinosPI | c11a76a1361f61a71e16edb2127eb08c12c090e1 | [
"MIT"
] | null | null | null | Primeiros Passos/1-DAY ONE/Sabendo se o nome da cidade tem santo.py | pedroluceena/TreinosPI | c11a76a1361f61a71e16edb2127eb08c12c090e1 | [
"MIT"
] | null | null | null | cidade = str(input('Qual é o Nome da sua Cidade ?: ')).strip()
print('Sua cidade possui o nome Santo?',cidade[:5].upper() == 'SANTO') | 66.5 | 70 | 0.654135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.552239 |
aadbf2b63958e62ec45145d598a2e30bf7ec61e8 | 391 | py | Python | carbonplan_forest_risks/setup/loading.py | norlandrhagen/forest-risks | 2cbc87064ac05299dba952c9f0cb8022ffd8909a | [
"MIT"
] | 20 | 2021-05-01T18:08:07.000Z | 2022-03-09T10:24:53.000Z | carbonplan_forest_risks/setup/loading.py | norlandrhagen/forest-risks | 2cbc87064ac05299dba952c9f0cb8022ffd8909a | [
"MIT"
] | 15 | 2021-03-31T05:20:55.000Z | 2022-02-28T13:02:58.000Z | carbonplan_forest_risks/setup/loading.py | norlandrhagen/forest-risks | 2cbc87064ac05299dba952c9f0cb8022ffd8909a | [
"MIT"
] | 4 | 2020-10-26T20:52:30.000Z | 2021-02-19T07:42:52.000Z | import pathlib
import urlpath
def loading(store=None):
if store is None:
raise ValueError('data store not specified')
if store == 'gs':
base = urlpath.URL('gs://')
elif store == 'az':
base = urlpath.URL('https://carbonplan.blob.core.windows.net')
elif store == 'local':
base = pathlib.Path(pathlib.Path.home() / 'workdir')
return base
| 23 | 70 | 0.608696 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.253197 |