max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
datasets.py | Confusezius/CVPR2020_PADS | 62 | 12770351 | """
Dataloaders for CUB200-2011, CARS196 and Stanford Online Products.
"""
"""==================================================================================================="""
################### LIBRARIES ###################
import warnings
warnings.filterwarnings("ignore")
import numpy as np, os, sys, pandas as pd, csv, copy
import torch, torch.nn as nn, matplotlib.pyplot as plt, random
from torch.utils.data import Dataset
from PIL import Image
from torchvision import transforms
from tqdm import tqdm
import pretrainedmodels.utils as utils
import auxiliaries as aux
"""==================================================================================================="""
################ FUNCTION TO RETURN ALL DATALOADERS NECESSARY ####################
def give_dataloaders(dataset, opt):
### ImageNet Properties
opt.mean, opt.std, opt.input_space, opt.input_range = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225], 'RGB', [0,1]
if 'class_samples_per_class' in vars(opt).keys():
opt.samples_per_class = opt.class_samples_per_class
if opt.dataset=='cub200':
datasets = give_CUB200_datasets(opt)
elif opt.dataset=='cars196':
datasets = give_CARS196_datasets(opt)
elif opt.dataset=='online_products':
datasets = give_OnlineProducts_datasets(opt)
else:
raise Exception('No Dataset >{}< available!'.format(dataset))
dataloaders = {}
for key,dataset in datasets.items():
if dataset is not None:
is_val = dataset.is_validation
dataloaders[key] = torch.utils.data.DataLoader(dataset, batch_size=opt.bs, num_workers=opt.kernels, shuffle=not is_val, pin_memory=True, drop_last=not is_val)
return dataloaders
"""==================================================================================================="""
################# FUNCTIONS TO RETURN TRAIN/VAL PYTORCH DATASETS FOR CUB200, CARS196 AND STANFORD ONLINE PRODUCTS ####################################
def give_CUB200_datasets(opt):
"""
This function generates a training and testing dataloader for Metric Learning on the CUB-200-2011 dataset.
For Metric Learning, the dataset is sorted by name, and the first halt used for training while the last half is used for testing.
So no random shuffling of classes.
"""
image_sourcepath = opt.source_path+'/images'
image_classes = sorted([x for x in os.listdir(image_sourcepath) if '._' not in x], key=lambda x: int(x.split('.')[0]))
conversion = {int(x.split('.')[0]):x.split('.')[-1] for x in image_classes}
image_list = {int(key.split('.')[0]):sorted([image_sourcepath+'/'+key+'/'+x for x in os.listdir(image_sourcepath+'/'+key) if '._' not in x]) for key in image_classes}
image_list = [[(key,img_path) for img_path in image_list[key]] for key in image_list.keys()]
image_list = [x for y in image_list for x in y]
image_dict = {}
for key, img_path in image_list:
key = key-1
if not key in image_dict.keys():
image_dict[key] = []
image_dict[key].append(img_path)
keys = sorted(list(image_dict.keys()))
# random.shuffle(keys)
#Following "Deep Metric Learning via Lifted Structured Feature Embedding", we use the first half of classes for training.
train,test = keys[:len(keys)//2], keys[len(keys)//2:]
if opt.sampling=='learned':
if opt.train_val_split_by_class:
train_val_split = int(len(train)*opt.train_val_split)
train, val = train[:train_val_split], train[train_val_split:]
train_image_dict, val_image_dict, test_image_dict = {key:image_dict[key] for key in train}, {key:image_dict[key] for key in val}, {key:image_dict[key] for key in test}
else:
train_image_dict, val_image_dict = {},{}
for key in train:
# train_ixs = np.random.choice(len(image_dict[key]), int(len(image_dict[key])*opt.train_val_split), replace=False)
train_ixs = np.array(list(set(np.round(np.linspace(0,len(image_dict[key])-1,int(len(image_dict[key])*opt.train_val_split)))))).astype(int)
val_ixs = np.array([x for x in range(len(image_dict[key])) if x not in train_ixs])
train_image_dict[key] = np.array(image_dict[key])[train_ixs]
val_image_dict[key] = np.array(image_dict[key])[val_ixs]
else:
train_image_dict = {key:image_dict[key] for key in train}
test_image_dict = {key:image_dict[key] for key in test}
train_dataset = BaseTripletDataset(train_image_dict, opt, samples_per_class=opt.samples_per_class)
test_dataset = BaseTripletDataset(test_image_dict, opt, is_validation=True)
eval_dataset = BaseTripletDataset(train_image_dict, opt, is_validation=True)
train_dataset.conversion = conversion
test_dataset.conversion = conversion
eval_dataset.conversion = conversion
if opt.sampling!='learned':
return {'training':train_dataset, 'testing':test_dataset, 'evaluation':eval_dataset}
else:
val_dataset = BaseTripletDataset(val_image_dict, opt, is_validation=True)
val_dataset.conversion = conversion
return {'training':train_dataset, 'validation':val_dataset, 'testing':test_dataset, 'evaluation':eval_dataset}
def give_CARS196_datasets(opt):
"""
This function generates a training and testing dataloader for Metric Learning on the CARS-196 dataset.
For Metric Learning, the dataset is sorted by name, and the first halt used for training while the last half is used for testing.
So no random shuffling of classes.
"""
image_sourcepath = opt.source_path+'/images'
image_classes = sorted([x for x in os.listdir(image_sourcepath)])
conversion = {i:x for i,x in enumerate(image_classes)}
image_list = {i:sorted([image_sourcepath+'/'+key+'/'+x for x in os.listdir(image_sourcepath+'/'+key)]) for i,key in enumerate(image_classes)}
image_list = [[(key,img_path) for img_path in image_list[key]] for key in image_list.keys()]
image_list = [x for y in image_list for x in y]
image_dict = {}
for key, img_path in image_list:
if not key in image_dict.keys():
image_dict[key] = []
image_dict[key].append(img_path)
keys = sorted(list(image_dict.keys()))
# random.shuffle(keys)
#Following "Deep Metric Learning via Lifted Structured Feature Embedding", we use the first half of classes for training.
train,test = keys[:len(keys)//2], keys[len(keys)//2:]
if opt.sampling=='learned':
if opt.train_val_split_by_class:
train_val_split = int(len(train)*opt.train_val_split)
train, val = train[:train_val_split], train[train_val_split:]
train_image_dict, val_image_dict, test_image_dict = {key:image_dict[key] for key in train}, {key:image_dict[key] for key in val}, {key:image_dict[key] for key in test}
else:
train_image_dict, val_image_dict = {},{}
for key in train:
train_ixs = np.random.choice(len(image_dict[key]), int(len(image_dict[key])*opt.train_val_split), replace=False)
val_ixs = np.array([x for x in range(len(image_dict[key])) if x not in train_ixs])
train_image_dict[key] = np.array(image_dict[key])[train_ixs]
val_image_dict[key] = np.array(image_dict[key])[val_ixs]
test_image_dict = {key:image_dict[key] for key in test}
val_dataset = BaseTripletDataset(val_image_dict, opt, is_validation=True)
val_dataset.conversion = conversion
else:
train_image_dict, test_image_dict = {key:image_dict[key] for key in train}, {key:image_dict[key] for key in test}
val_dataset = None
train_dataset = BaseTripletDataset(train_image_dict, opt, samples_per_class=opt.samples_per_class)
test_dataset = BaseTripletDataset(test_image_dict, opt, is_validation=True)
eval_dataset = BaseTripletDataset(train_image_dict, opt, is_validation=True)
train_dataset.conversion = conversion
test_dataset.conversion = conversion
eval_dataset.conversion = conversion
return {'training':train_dataset, 'validation':val_dataset, 'testing':test_dataset, 'evaluation':eval_dataset}
def give_OnlineProducts_datasets(opt):
image_sourcepath = opt.source_path+'/images'
training_files = pd.read_table(opt.source_path+'/Info_Files/Ebay_train.txt', header=0, delimiter=' ')
test_files = pd.read_table(opt.source_path+'/Info_Files/Ebay_test.txt', header=0, delimiter=' ')
conversion, super_conversion = {},{}
for class_id, path in zip(training_files['class_id'],training_files['path']):
conversion[class_id] = path.split('/')[0]
for super_class_id, path in zip(training_files['super_class_id'],training_files['path']):
conversion[super_class_id] = path.split('/')[0]
for class_id, path in zip(test_files['class_id'],test_files['path']):
conversion[class_id] = path.split('/')[0]
train_image_dict, test_image_dict, super_train_image_dict = {},{},{}
for key, img_path in zip(training_files['class_id'],training_files['path']):
key = key-1
if not key in train_image_dict.keys():
train_image_dict[key] = []
train_image_dict[key].append(image_sourcepath+'/'+img_path)
for key, img_path in zip(test_files['class_id'],test_files['path']):
key = key-1
if not key in test_image_dict.keys():
test_image_dict[key] = []
test_image_dict[key].append(image_sourcepath+'/'+img_path)
for key, img_path in zip(training_files['super_class_id'],training_files['path']):
key = key-1
if not key in super_train_image_dict.keys():
super_train_image_dict[key] = []
super_train_image_dict[key].append(image_sourcepath+'/'+img_path)
train_keys = list(train_image_dict.keys())
# if opt.train_val_split_by_class:
if opt.sampling=='learned':
train_val_split = int(len(train_keys)*opt.train_val_split)
train, val = train_keys[:train_val_split], train_keys[train_val_split:]
train_image_dict, val_image_dict = {key:train_image_dict[key] for key in train}, {key:train_image_dict[key] for key in val}
val_dataset = BaseTripletDataset(val_image_dict, opt, is_validation=True)
val_dataset.conversion = conversion
else:
val_dataset = None
# else:
# train_image_dict_temp, val_image_dict_temp = {},{}
# for key in train_keys:
# print(len(train_image_dict[key]))
# train_ixs = np.random.choice(len(train_image_dict[key]), int(len(train_image_dict[key])*opt.train_val_split), replace=False)
# val_ixs = np.array([x for x in range(len(train_image_dict[key])) if x not in train_ixs])
# train_image_dict_temp[key] = np.array(image_dict[key])[train_ixs]
# val_image_dict_temp[key] = np.array(image_dict[key])[val_ixs]
super_train_dataset = BaseTripletDataset(super_train_image_dict, opt, is_validation=True)
train_dataset = BaseTripletDataset(train_image_dict, opt, samples_per_class=opt.samples_per_class)
test_dataset = BaseTripletDataset(test_image_dict, opt, is_validation=True)
eval_dataset = BaseTripletDataset(train_image_dict, opt, is_validation=True)
super_train_dataset.conversion = super_conversion
train_dataset.conversion = conversion
test_dataset.conversion = conversion
eval_dataset.conversion = conversion
return {'training':train_dataset, 'validation':val_dataset, 'testing':test_dataset, 'evaluation':eval_dataset, 'super_evaluation':super_train_dataset}
"""==================================================================================================="""
################## BASIC PYTORCH DATASET USED FOR ALL DATASETS ##################################
class BaseTripletDataset(Dataset):
def __init__(self, image_dict, opt, samples_per_class=8, is_validation=False):
self.is_validation = is_validation
self.pars = opt
self.image_dict = image_dict
self.samples_per_class = samples_per_class
#####
self.init_setup()
##### Option 2: Use Mean/Stds on which the networks were trained
if 'bninception' in opt.arch:
normalize = transforms.Normalize(mean=[0.502, 0.4588, 0.4078],std=[0.0039, 0.0039, 0.0039])
else:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
transf_list = []
if not self.is_validation:
transf_list.extend([transforms.RandomResizedCrop(size=224), transforms.RandomHorizontalFlip(0.5)])
else:
transf_list.extend([transforms.Resize(256), transforms.CenterCrop(224)])
transf_list.extend([transforms.ToTensor(),
normalize])
self.transform = transforms.Compose(transf_list)
def init_setup(self):
self.n_files = np.sum([len(self.image_dict[key]) for key in self.image_dict.keys()])
self.avail_classes = sorted(list(self.image_dict.keys()))
self.image_dict = {i:self.image_dict[key] for i,key in enumerate(self.avail_classes)}
self.avail_classes = sorted(list(self.image_dict.keys()))
if not self.is_validation:
#Select current class to sample images from up to <samples_per_class>
self.current_class = np.random.randint(len(self.avail_classes))
self.classes_visited = [self.current_class, self.current_class]
self.n_samples_drawn = 0
# if self.is_validation or self.samples_per_class==1:
self.image_list = [[(x,key) for x in self.image_dict[key]] for key in self.image_dict.keys()]
self.image_list = [x for y in self.image_list for x in y]
# self.sample_probs = np.ones(len(self.image_list))/len(self.image_list)
self.is_init = True
def ensure_3dim(self, img):
if len(img.size)==2:
img = img.convert('RGB')
return img
def __getitem__(self, idx):
if self.is_init:
self.current_class = self.avail_classes[idx%len(self.avail_classes)]
self.is_init = False
if not self.is_validation:
if self.samples_per_class==1:
return (self.image_list[idx][-1], self.transform(self.ensure_3dim(Image.open(self.image_list[idx][0]))))
if self.n_samples_drawn==self.samples_per_class:
#Once enough samples per class have been drawn, we choose another class to draw samples from.
#Note that we ensure with self.classes_visited that no class is chosen if it had been chosen
#previously or one before that.
counter = copy.deepcopy(self.avail_classes)
for prev_class in self.classes_visited:
if prev_class in counter: counter.remove(prev_class)
self.current_class = counter[idx%len(counter)]
self.classes_visited = self.classes_visited[1:]+[self.current_class]
self.n_samples_drawn = 0
class_sample_idx = idx%len(self.image_dict[self.current_class])
self.n_samples_drawn += 1
out_img = self.transform(self.ensure_3dim(Image.open(self.image_dict[self.current_class][class_sample_idx])))
if 'bninception' in self.pars.arch:
out_img = out_img[range(3)[::-1],:]
return (self.current_class,out_img)
else:
out_img = self.transform(self.ensure_3dim(Image.open(self.image_list[idx][0])))
if 'bninception' in self.pars.arch:
out_img = out_img[range(3)[::-1],:]
return (self.image_list[idx][-1], out_img)
def __len__(self):
return self.n_files
| 2.0625 | 2 |
src/btc/types_btc.py | wenqinchao/btc | 3 | 12770352 | from typing import NewType
RPCEndpoint = NewType("RPCEndpoint", str) | 1.570313 | 2 |
testcases/test_4_get_domain.py | evilbrave/REST_API_TESTCASES | 1 | 12770353 | <reponame>evilbrave/REST_API_TESTCASES
import requests
from signature import Signature
import common_data
import test_1_device_auth
from requests.utils import quote
url = common_data.oss_url
#url = "http://127.0.0.1:8888"
path = "/v1/domains/"
def init_headers(headers):
headers['X-Api-Key'] = common_data.x_api_key
headers['X-Signature'] = ""
return headers
def init_body_content(body_content):
body_content['certificate_serial'] = common_data.certificate_serial
body_content['access_token'] = ""
return body_content
def testcase_0(headers, body_content, domain):
headers = headers.copy()
body_content = body_content.copy()
concat_dict = body_content.copy()
concat_dict['domain'] = domain
concat_text = common_data.get_concat_text(concat_dict)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.get(url + path + domain, params=body_content, headers=headers)
if response.status_code == 200 :
print "TEST CASE 0 OK"
else:
print "TEST CASE 0 FAILED"
print response.status_code
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_1(headers, body_content, domain):
headers = headers.copy()
body_content = body_content.copy()
headers.pop('X-Api-Key')
concat_dict = body_content.copy()
concat_dict['domain'] = domain
concat_text = common_data.get_concat_text(concat_dict)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.get(url + path + domain, params=body_content, headers=headers)
if response.status_code == 400 :#and response.json()['code'] == "400.0":
print "TEST CASE 1 OK"
else:
print "TEST CASE 1 FAILED"
print response.status_code
print "HTTP Path: " + url + path + domain
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_2(headers, body_content, domain):
headers = headers.copy()
body_content = body_content.copy()
headers.pop('X-Signature')
response = requests.get(url + path + domain, params=body_content, headers=headers)
if response.status_code == 400 and response.json()['code'] == "400.0":
print "TEST CASE 2 OK"
else:
print "TEST CASE 2 FAILED"
print response.status_code
print "HTTP Path: " + url + path + domain
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_3(headers, body_content, domain):
headers = headers.copy()
body_content = body_content.copy()
domain = ""
concat_dict = body_content.copy()
concat_dict['domain'] = domain
concat_text = common_data.get_concat_text(concat_dict)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.get(url + path + domain, params=body_content, headers=headers)
if response.status_code == 400 :#and response.json()['code'] == "400.0":
print "TEST CASE 3 OK"
else:
print "TEST CASE 3 FAILED"
print response.status_code
print "HTTP Path: " + url + path + domain
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
# no certificate serial
def testcase_4(headers, body_content, domain):
headers = headers.copy()
body_content = body_content.copy()
body_content.pop('certificate_serial')
concat_dict = body_content.copy()
concat_dict['domain'] = domain
concat_text = common_data.get_concat_text(concat_dict)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.get(url + path + domain, params=body_content, headers=headers)
if response.status_code == 400 and response.json()['code'] == "400.2":
print "TEST CASE 4 OK"
else:
print "TEST CASE 4 FAILED"
print response.status_code
print "HTTP Path: " + url + path + domain
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
# no access token
def testcase_5(headers, body_content, domain):
headers = headers.copy()
body_content = body_content.copy()
body_content.pop('access_token')
concat_dict = body_content.copy()
concat_dict['domain'] = domain
concat_text = common_data.get_concat_text(concat_dict)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.get(url + path + domain, params=body_content, headers=headers)
if response.status_code == 400 and response.json()['code'] == "400.6":
print "TEST CASE 5 OK"
else:
print "TEST CASE 5 FAILED"
print response.status_code
print "HTTP Path: " + url + path + domain
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
# invalid x-api-key
def testcase_6(headers, body_content, domain):
headers = headers.copy()
body_content = body_content.copy()
headers['X-Api-Key'] = "INVALID_X_API_KEY"
concat_dict = body_content.copy()
concat_dict['domain'] = domain
concat_text = common_data.get_concat_text(concat_dict)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.get(url + path + domain, params=body_content, headers=headers)
if response.status_code == 400:# and response.json()['code'] == "400.6":
print "TEST CASE 6 OK"
else:
print "TEST CASE 6 FAILED"
print response.status_code
print "HTTP Path: " + url + path + domain
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
# invalid x-signature
def testcase_7(headers, body_content, domain):
headers = headers.copy()
body_content = body_content.copy()
headers['X-Signature'] = "INVALID_X_SIGNATURE"
response = requests.get(url + path + domain, params=body_content, headers=headers)
if response.status_code == 400 and response.json()['code'] == "400.1":
print "TEST CASE 7 OK"
else:
print "TEST CASE 7 FAILED"
print response.status_code
print "HTTP Path: " + url + path + domain
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
# invalid domain
def testcase_8(headers, body_content, domain):
headers = headers.copy()
body_content = body_content.copy()
domain = "INVALID_DOMAIN@"
concat_dict = body_content.copy()
concat_dict['domain'] = quote(domain)
concat_text = common_data.get_concat_text(concat_dict)
print concat_text
concat_text = body_content['access_token'] + body_content['certificate_serial'] + "INVALID_DOMAIN@"
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
print signed_signature
headers['X-Signature'] = signed_signature
print quote(domain)
response = requests.get(url + path + quote(domain), params=body_content, headers=headers)
if response.status_code == 400:# and response.json()['code'] == "400.6":
print "TEST CASE 8 OK"
else:
print "TEST CASE 8 FAILED"
print response.status_code
print "HTTP Path: " + url + path + domain
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
# invalid certificate serial
def testcase_9(headers, body_content, domain):
headers = headers.copy()
body_content = body_content.copy()
body_content['certificate_serial'] = "INVALID_CERTIFICATE_SERIAL"
concat_dict = body_content.copy()
concat_dict['domain'] = domain
concat_text = common_data.get_concat_text(concat_dict)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.get(url + path + domain, params=body_content, headers=headers)
if response.status_code == 400 and response.json()['code'] == "400.3":
print "TEST CASE 9 OK"
else:
print "TEST CASE 9 FAILED"
print response.status_code
print "HTTP Path: " + url + path + domain
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
# invalid access_token
def testcase_10(headers, body_content, domain):
headers = headers.copy()
body_content = body_content.copy()
body_content['access_token'] = "INVALID_ACCESS_TOKEN"
concat_dict = body_content.copy()
concat_dict['domain'] = domain
concat_text = common_data.get_concat_text(concat_dict)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.get(url + path + domain, params=body_content, headers=headers)
if response.status_code == 400 and response.json()['code'] == "401.0":
print "TEST CASE 10 OK"
else:
print "TEST CASE 10 FAILED"
print response.status_code
print "HTTP Path: " + url + path + domain
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
if __name__ == '__main__':
# set headers
headers = dict()
headers = init_headers(headers)
# set body
body_content = dict()
init_body_content(body_content)
sso_tokens = test_1_device_auth.get_device_authentication_token()
if sso_tokens.has_key('access_token') and sso_tokens.has_key('refresh_token'):
body_content['access_token'] = sso_tokens['access_token']
else:
print "[Error] init access token failed!"
exit(-1)
domain = "TEST_DOMAIN"
testcase_0(headers, body_content, domain)
# testcase_1(headers, body_content, domain)
# testcase_2(headers, body_content, domain)
# testcase_3(headers, body_content, domain)
# testcase_4(headers, body_content, domain)
# testcase_5(headers, body_content, domain)
# testcase_6(headers, body_content, domain)
# testcase_7(headers, body_content, domain)
# testcase_8(headers, body_content, domain)
# testcase_9(headers, body_content, domain)
# testcase_10(headers, body_content, domain) | 2.421875 | 2 |
rpeakdetection/Evaluation.py | Fabrizio1994/ECGClassification | 10 | 12770354 | from rpeakdetection.Utility import Utility
util = Utility()
class Evaluation:
def evaluate(self, rpeaks, name, evaluation_width, rule_based, test_index=None):
real_locations = util.remove_non_beat(name, rule_based)[0]
if test_index is not None:
real_locations = list(filter(lambda x: x >= test_index, real_locations))
window_size = int(evaluation_width / 2)
Y = list()
for y in real_locations:
Y.extend([y + q for q in range(-window_size, window_size)])
filtered_peaks = list()
prev = 0
for peak in rpeaks:
if peak - prev > evaluation_width:
filtered_peaks.append(peak)
prev = peak
correct_detected = set(filtered_peaks).intersection(set(Y))
recall = len(correct_detected) / len(real_locations)
if len(rpeaks) != 0:
precision = len(correct_detected) / len(rpeaks)
else:
precision = 0
return recall, precision
| 2.484375 | 2 |
backend/api/migrations/0011_annotation.py | cerob/bounswe2018group6 | 12 | 12770355 | # Generated by Django 2.1.2 on 2018-12-28 16:58
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('api', '0010_auto_20181228_1625'),
]
operations = [
migrations.CreateModel(
name='Annotation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('data', django.contrib.postgres.fields.jsonb.JSONField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='annotation_set', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| 1.875 | 2 |
multivariate.py | ChaofanChen/ORCSimulator | 0 | 12770356 | <reponame>ChaofanChen/ORCSimulator
import matplotlib.pyplot as plt
from geothermal_orc_design import multivariate_optimization
from collections import OrderedDict
from znes_plotting import plot, shared
import pandas as pd
import numpy as np
import itertools
from matplotlib.ticker import MaxNLocator
import json
import sys
cur_dir = sys.argv[1] if len(sys.argv) > 1 else '.'
with open(cur_dir + '/test_multi.json', 'r') as f:
input_data = json.load(f)
f.close()
opt_results = pd.DataFrame(columns=['fluid', 'net_power', 'T_before_tur', 'dT_air'])
result, opt_results = multivariate_optimization(**input_data)
fig, ax = plt.subplots(figsize=(8, 6), dpi=100)
ax.bar(opt_results['fluid'], opt_results['net_power'], color='blue', width=0.4)#, marker="o", linewidth=2
for i, v in enumerate(np.array(opt_results['net_power'])):
ax.text(i - 0.25, v + 0.1, str(round(v, 2)), color='black', fontweight='bold', fontsize=12)
ax.set(xlabel= 'Working fluid', ylabel='Net power output (MW)')
ax.yaxis.label.set_size(18)
ax.xaxis.label.set_size(18)
plt.ylim(10, 13)
ax.tick_params(axis="x", labelsize=15, width=0.5)
ax.tick_params(axis="y", labelsize=15, width=0.5)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(1)
fig.autofmt_xdate()
ax.grid(b=True, which='major', linewidth=0.5)
plt.savefig('diff_working_fluid_net_power.pdf')
plt.show()
print(opt_results.to_latex(escape=False, na_rep='-', float_format='%.2f'))
#fluid=list(result.keys())
#
#for a in range(len(fluid)):
# df = pd.DataFrame(list(result.values())[a])
#
# # Plot the surface.
# X=list(df['T_before_tur'])
# Y=list(df['IHE_sizing'])
# Z=list(df['dT_air'])
# C=list(df['net power output'])
#
## from mpl_toolkits.mplot3d import Axes3D
## fig = plt.figure()
## ax = Axes3D(fig)
## surf = ax.plot_trisurf(X, Z, Y, cmap=plt.cm.jet)
## surf = ax.contour3D(X, Z, C, cmap=plt.cm.jet, linewidth=0.01)
## fig.colorbar(surf, shrink=0.5, aspect=5)
## plt.show()
#
# fig = plt.figure(figsize=(16, 12), dpi=100)
# ax = fig.add_subplot(111, projection='3d')
# surf = ax.scatter(X, Z, C, c=C, cmap=plt.cm.jet, s=80)
# ax.set_xlabel('Turbine inlet temperature (°C)')
# ax.set_ylabel('$\Delta T_{air}$ (°C)')
# ax.set_zlabel('Net power output (MW)')
# ax.xaxis.labelpad=12
# ax.yaxis.labelpad=12
# ax.zaxis.labelpad=12
#
# ax.yaxis.label.set_size(20)
# ax.xaxis.label.set_size(20)
# ax.zaxis.label.set_size(20)
# ax.tick_params(axis="both", labelsize=18)
# cbar = fig.colorbar(surf, ax=ax)
# cbar.set_label("Net power output (MW)", size=20)
# cbar.ax.tick_params(labelsize=18)
# plt.savefig('Multivariate_optimization_' + fluid[a] + '.pdf')
# plt.show()
#%%
#for fluid, data in result.items():
# combinations = list(itertools.combinations(input_data['variables'].keys(), 2))
#
# for combination in combinations:
#
# ax = plot.scatter(
# data=data[data['valid']], x=combination[0], y=combination[1],
# xlabel=input_data['variables'][combination[0]]['label'],
# ylabel=input_data['variables'][combination[1]]['label'],
# colormap=plt.cm.get_cmap('RdYlBu'), c=input_data['objective'])
#
# # Create multipage PDF from plots
# shared.create_multipage_pdf(fluid + '_multivariate_scatterplots.pdf')
| 2.234375 | 2 |
tests/test_directory.py | eightBEC/watson-visual-recognition-tooling | 0 | 12770357 | <gh_stars>0
# coding: utf-8
from unittest import TestCase
from .context import vrtool
import os
class DirectoryTest(TestCase):
def setUp(self):
path = os.path.join(os.path.dirname(__file__),'testfiles')
self.directory = vrtool.Directory(path)
def test_get_image_files(self):
path = os.path.join(os.path.dirname(__file__),'testfiles')
files = self.directory.get_image_files(path)
self.assertEqual(sorted(files), ['001.JPEG', '002.JPG', '003.jpeg', '004.jpg', '005.png','006.PNG'])
def test_get_subfolders_all(self):
path = os.path.join(os.path.dirname(__file__),'testfiles')
subfolders = self.directory.get_sub_folders(path)
self.assertEqual(sorted(subfolders), ['negatives', 'subfolder1', 'subfolder2'])
def test_get_subfolders_regex(self):
path = os.path.join(os.path.dirname(__file__),'testfiles')
subfolders = self.directory.get_sub_folders(path, '.*1')
self.assertEqual(subfolders, ['subfolder1'])
def test_has_sub_folder(self):
path = os.path.join(os.path.dirname(__file__),'testfiles')
has_sub_folder = self.directory.has_sub_folder(path)
self.assertTrue(has_sub_folder)
path = os.path.join(os.path.dirname(__file__),'testfiles', 'subfolder2')
has_sub_folder = self.directory.has_sub_folder(path)
self.assertFalse(has_sub_folder)
def test_has_sub_folders(self):
path = os.path.join(os.path.dirname(__file__),'testfiles')
has_sub_folders = vrtool.Directory.has_sub_folders(path)
self.assertTrue(has_sub_folders)
path = os.path.join(os.path.dirname(__file__),'testfiles', 'subfolder2')
has_sub_folders = vrtool.Directory.has_sub_folders(path)
self.assertFalse(has_sub_folders)
| 2.65625 | 3 |
plugin.video.vstream/resources/sites/enstream.py | akuala/REPO.KUALA | 2 | 12770358 | <reponame>akuala/REPO.KUALA
#-*- coding: utf-8 -*-
# https://github.com/Kodi-vStream/venom-xbmc-addons
from resources.lib.gui.hoster import cHosterGui
from resources.lib.gui.gui import cGui
from resources.lib.handler.inputParameterHandler import cInputParameterHandler
from resources.lib.handler.outputParameterHandler import cOutputParameterHandler
from resources.lib.handler.requestHandler import cRequestHandler
from resources.lib.parser import cParser
from resources.lib.util import Unquote
SITE_IDENTIFIER = 'enstream'
SITE_NAME = 'Enstream'
SITE_DESC = 'Regarder tous vos films streaming complets, gratuit et illimité'
URL_MAIN = 'https://ww1.enstream.co/'
FUNCTION_SEARCH = 'showMovies'
URL_SEARCH = ('', FUNCTION_SEARCH)
URL_SEARCH_MOVIES = (URL_SEARCH[0], FUNCTION_SEARCH)
MOVIE_MOVIE = (True, 'load')
MOVIE_NEWS = (URL_MAIN + 'films-streaming/', 'showMovies')
MOVIE_GENRES = (True, 'showGenres')
def load():
oGui = cGui()
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', URL_SEARCH[0])
oGui.addDir(SITE_IDENTIFIER, 'showSearch', 'Recherche', 'search.png', oOutputParameterHandler)
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_NEWS[1], 'Films (Derniers ajouts)', 'news.png', oOutputParameterHandler)
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', MOVIE_GENRES[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_GENRES[1], 'Films (Genres)', 'genres.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showSearch():
oGui = cGui()
sSearchText = oGui.showKeyBoard()
if (sSearchText != False):
showMovies(sSearchText)
oGui.setEndOfDirectory()
return
def showGenres():
oGui = cGui()
liste = []
liste.append( ['Action', URL_MAIN + 'genre/action/'] )
liste.append( ['Animation', URL_MAIN + 'genre/animation/'] )
liste.append( ['Aventure', URL_MAIN + 'genre/aventure/'] )
liste.append( ['Biopic', URL_MAIN + 'genre/biopic/'] )
liste.append( ['Comédie', URL_MAIN + 'genre/comedie/'] )
liste.append( ['Comédie Dramatique', URL_MAIN + 'genre/comedie-dramatique/'] )
liste.append( ['Comédie Musicale', URL_MAIN + 'genre/comedie-musical/'] )
liste.append( ['Drame', URL_MAIN + 'genre/drame/'] )
liste.append( ['Epouvante Horreur', URL_MAIN + 'genre/epouvante-horreur/'] )
liste.append( ['Espionnage', URL_MAIN + 'genre/espionnage/'] )
liste.append( ['Famille', URL_MAIN + 'genre/famille/'] )
liste.append( ['Fantastique', URL_MAIN + 'genre/fantastique/'] )
liste.append( ['Guerre', URL_MAIN + 'genre/guerre/'] )
liste.append( ['Historique', URL_MAIN + 'genre/historique/'] )
liste.append( ['Judiciaire', URL_MAIN + 'genre/judiciaire/'] )
liste.append( ['Musical', URL_MAIN + 'genre/musical/'] )
liste.append( ['Péplum', URL_MAIN + 'genre/peplum/'] )
liste.append( ['Policier', URL_MAIN + 'genre/policier/'] )
liste.append( ['Romance', URL_MAIN + 'genre/romance/'] )
liste.append( ['Science Fiction', URL_MAIN + 'genre/science-fiction/'] )
liste.append( ['Thriller', URL_MAIN + 'genre/thriller/'] )
liste.append( ['Western', URL_MAIN + 'genre/western/'] )
for sTitle, sUrl in liste:
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle, 'genres.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showMovies(sSearch = ''):
oGui = cGui()
if sSearch:
sUrl = URL_MAIN + 'search.php'
oRequestHandler = cRequestHandler(sUrl)
oRequestHandler.setRequestType(cRequestHandler.REQUEST_TYPE_POST)
oRequestHandler.addParameters('q', Unquote(sSearch))
else:
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
oRequestHandler = cRequestHandler(sUrl)
oRequestHandler.addHeaderEntry('Referer', URL_MAIN)
sHtmlContent = oRequestHandler.request()
if sSearch:
sPattern = '<a href="([^"]+)".+?url\((.+?)\).+?<div class="title"> (.+?) </div>'
elif 'genre/' in sUrl:
sPattern = 'film-uno"><a href="([^"]+)".+?data-src="([^"]+)".+?alt="([^"]+)"'
else:
sPattern = 'film-uno"><a href="([^"]+)".+?data-src="([^"]+)".+?alt="([^"]+)".+?short-story">([^<]+)'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == False):
oGui.addText(SITE_IDENTIFIER)
if (aResult[0] == True):
for aEntry in aResult[1]:
sUrl = aEntry[0]
sThumb = aEntry[1]
sTitle = aEntry[2]
sDesc = ''
if len(aEntry) > 3:
sDesc = aEntry[3]
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oOutputParameterHandler.addParameter('sMovieTitle', sTitle)
oOutputParameterHandler.addParameter('sThumb', sThumb)
oGui.addMovie(SITE_IDENTIFIER, 'showHoster', sTitle, '', sThumb, sDesc, oOutputParameterHandler)
if not sSearch:
sNextPage = __checkForNextPage(sHtmlContent)
if (sNextPage != False):
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', sNextPage)
oGui.addNext(SITE_IDENTIFIER, 'showMovies', '[COLOR teal]Suivant >>>[/COLOR]', oOutputParameterHandler)
oGui.setEndOfDirectory()
def __checkForNextPage(sHtmlContent):
oParser = cParser()
sPattern = 'class=\'Paginaactual\'.+?a href=\'([^"]+?)\''
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
return URL_MAIN[:-1] + aResult[1][0]
return False
def showHoster():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sThumb = oInputParameterHandler.getValue('sThumb')
sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')
oParser = cParser()
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request()
sPattern = 'data-url="([^"]+)".+?data-code="([^"]+)".+?mobile">([^<]+)'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == False):
oGui.addText(SITE_IDENTIFIER)
if (aResult[0] == True):
for aEntry in aResult[1]:
sDataUrl = aEntry[0]
sDataCode = aEntry[1]
sHost = aEntry[2].capitalize()
sDesc = ''
# filtrage des hosters
oHoster = cHosterGui().checkHoster(sHost)
if not oHoster:
continue
sTitle = ('%s [COLOR coral]%s[/COLOR]') % (sMovieTitle, sHost)
lien = URL_MAIN + 'Players.php?PPl=' + sDataUrl + '&CData=' + sDataCode
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('sMovieTitle', sMovieTitle)
oOutputParameterHandler.addParameter('sThumb', sThumb)
oOutputParameterHandler.addParameter('siteUrl', lien)
oOutputParameterHandler.addParameter('referer', sUrl)
oGui.addLink(SITE_IDENTIFIER, 'showHostersLinks', sTitle, sThumb, sDesc, oOutputParameterHandler)
oGui.setEndOfDirectory()
def showHostersLinks():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')
sThumb = oInputParameterHandler.getValue('sThumb')
referer = oInputParameterHandler.getValue('referer')
oRequestHandler = cRequestHandler(sUrl)
oRequestHandler.addHeaderEntry('Referer', referer)
oRequestHandler.request()
sHosterUrl = oRequestHandler.getRealUrl()
oHoster = cHosterGui().checkHoster(sHosterUrl)
if (oHoster != False):
oHoster.setDisplayName(sMovieTitle)
oHoster.setFileName(sMovieTitle)
cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb)
oGui.setEndOfDirectory()
| 2.0625 | 2 |
tests/api/v3_1_0/test_identity_groups.py | CiscoISE/ciscoisesdk | 36 | 12770359 | <reponame>CiscoISE/ciscoisesdk<filename>tests/api/v3_1_0/test_identity_groups.py
# -*- coding: utf-8 -*-
"""IdentityServicesEngineAPI identity_groups API fixtures and tests.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from fastjsonschema.exceptions import JsonSchemaException
from ciscoisesdk.exceptions import MalformedRequest
from ciscoisesdk.exceptions import ciscoisesdkException
from tests.environment import IDENTITY_SERVICES_ENGINE_VERSION
pytestmark = pytest.mark.skipif(IDENTITY_SERVICES_ENGINE_VERSION != '3.1.0', reason='version does not match')
def is_valid_get_identity_group_by_name(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_1f18bdd1938755409bf6db6b29e85d3a_v3_1_0').validate(obj.response)
return True
def get_identity_group_by_name(api):
endpoint_result = api.identity_groups.get_identity_group_by_name(
name='string'
)
return endpoint_result
@pytest.mark.identity_groups
def test_get_identity_group_by_name(api, validator):
try:
assert is_valid_get_identity_group_by_name(
validator,
get_identity_group_by_name(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_identity_group_by_name_default(api):
endpoint_result = api.identity_groups.get_identity_group_by_name(
name='string'
)
return endpoint_result
@pytest.mark.identity_groups
def test_get_identity_group_by_name_default(api, validator):
try:
assert is_valid_get_identity_group_by_name(
validator,
get_identity_group_by_name_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_identity_group_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_ca3df31c13b857e6b5dbc8357a8ab010_v3_1_0').validate(obj.response)
return True
def get_identity_group_by_id(api):
endpoint_result = api.identity_groups.get_identity_group_by_id(
id='string'
)
return endpoint_result
@pytest.mark.identity_groups
def test_get_identity_group_by_id(api, validator):
try:
assert is_valid_get_identity_group_by_id(
validator,
get_identity_group_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_identity_group_by_id_default(api):
endpoint_result = api.identity_groups.get_identity_group_by_id(
id='string'
)
return endpoint_result
@pytest.mark.identity_groups
def test_get_identity_group_by_id_default(api, validator):
try:
assert is_valid_get_identity_group_by_id(
validator,
get_identity_group_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_identity_group_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_1c0689e940ba5526946ad15976cc3365_v3_1_0').validate(obj.response)
return True
def update_identity_group_by_id(api):
endpoint_result = api.identity_groups.update_identity_group_by_id(
active_validation=False,
description='string',
id='string',
name='string',
parent='string',
payload=None
)
return endpoint_result
@pytest.mark.identity_groups
def test_update_identity_group_by_id(api, validator):
try:
assert is_valid_update_identity_group_by_id(
validator,
update_identity_group_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def update_identity_group_by_id_default(api):
endpoint_result = api.identity_groups.update_identity_group_by_id(
active_validation=False,
id='string',
description=None,
name=None,
parent=None,
payload=None
)
return endpoint_result
@pytest.mark.identity_groups
def test_update_identity_group_by_id_default(api, validator):
try:
assert is_valid_update_identity_group_by_id(
validator,
update_identity_group_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_identity_groups(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_9d904c521059563490c4a93871b33d51_v3_1_0').validate(obj.response)
return True
def get_identity_groups(api):
endpoint_result = api.identity_groups.get_identity_groups(
filter='value1,value2',
filter_type='string',
page=0,
size=0,
sortasc='string',
sortdsc='string'
)
return endpoint_result
@pytest.mark.identity_groups
def test_get_identity_groups(api, validator):
try:
assert is_valid_get_identity_groups(
validator,
get_identity_groups(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_identity_groups_default(api):
endpoint_result = api.identity_groups.get_identity_groups(
filter=None,
filter_type=None,
page=None,
size=None,
sortasc=None,
sortdsc=None
)
return endpoint_result
@pytest.mark.identity_groups
def test_get_identity_groups_default(api, validator):
try:
assert is_valid_get_identity_groups(
validator,
get_identity_groups_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_create_identity_group(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_592250bf19f653f9a5c48d1fb1890409_v3_1_0').validate(obj.response)
return True
def create_identity_group(api):
endpoint_result = api.identity_groups.create_identity_group(
active_validation=False,
description='string',
name='string',
parent='string',
payload=None
)
return endpoint_result
@pytest.mark.identity_groups
def test_create_identity_group(api, validator):
try:
assert is_valid_create_identity_group(
validator,
create_identity_group(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def create_identity_group_default(api):
endpoint_result = api.identity_groups.create_identity_group(
active_validation=False,
description=None,
name=None,
parent=None,
payload=None
)
return endpoint_result
@pytest.mark.identity_groups
def test_create_identity_group_default(api, validator):
try:
assert is_valid_create_identity_group(
validator,
create_identity_group_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_version(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_aab79aee0b455bfea8a6d7c6464a2a09_v3_1_0').validate(obj.response)
return True
def get_version(api):
endpoint_result = api.identity_groups.get_version(
)
return endpoint_result
@pytest.mark.identity_groups
def test_get_version(api, validator):
try:
assert is_valid_get_version(
validator,
get_version(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_version_default(api):
endpoint_result = api.identity_groups.get_version(
)
return endpoint_result
@pytest.mark.identity_groups
def test_get_version_default(api, validator):
try:
assert is_valid_get_version(
validator,
get_version_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
| 1.6875 | 2 |
test_pyqt2.py | orange5rs/GPRPy | 0 | 12770360 | import sys
import matplotlib
matplotlib.use('Qt5Agg')
from PyQt5 import QtCore, QtGui, QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
class MplCanvas(FigureCanvasQTAgg):
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
super(MplCanvas, self).__init__(fig)
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
sc = MplCanvas(self, width=5, height=4, dpi=100)
sc.axes.plot([0,1,2,3,4], [10,1,20,3,40])
# Create toolbar, passing canvas as first parament, parent (self, the MainWindow) as second.
toolbar = NavigationToolbar(sc, self)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(toolbar)
layout.addWidget(sc)
# Create a placeholder widget to hold our toolbar and canvas.
widget = QtWidgets.QWidget()
widget.setLayout(layout)
self.setCentralWidget(widget)
self.show()
app = QtWidgets.QApplication(sys.argv)
w = MainWindow()
app.exec_()
| 2.78125 | 3 |
Ogrenciler/Burcu/05DATASTRUCTRE/nesnetaban.py | ProEgitim/Python-Dersleri-BEM | 1 | 12770361 | class Calisan():
def __init__(self,isim,maas):
print("Çalışan sınıfının init fonsksiy") | 1.929688 | 2 |
fridgeai/gui/mainwindow.py | PosaLusa24/fridgeai | 1 | 12770362 | import os
import sqlite3
from fridgeai import camera
from PyQt5 import QtCore, QtGui, QtWidgets
from fridgeai.gui.manual import Ui_Manual
from fridgeai.gui.predict import Ui_predict
from fridgeai.gui.testing import Ui_List
from fridgeai.gui.learn import Ui_Learn
from datetime import date
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1024, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.wallpaper = QtWidgets.QLabel(self.centralwidget)
self.wallpaper.setGeometry(QtCore.QRect(-7, -5, 1931, 1080))
self.wallpaper.setStyleSheet("QLabel{\n"
" background-color:\"#1D283D\"\n"
"}")
self.wallpaper.setText("")
self.wallpaper.setObjectName("wallpaper")
self.inventory_wallpaper = QtWidgets.QLabel(self.centralwidget)
self.inventory_wallpaper.setGeometry(QtCore.QRect(60, 40, 401, 531))
self.inventory_wallpaper.setText("")
self.inventory_wallpaper.setPixmap(QtGui.QPixmap("98adaa-2048x1536.png"))
self.inventory_wallpaper.setObjectName("inventory_wallpaper")
self.inventor_title = QtWidgets.QLabel(self.centralwidget)
self.inventor_title.setGeometry(QtCore.QRect(170, 50, 171, 91))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
self.inventor_title.setPalette(palette)
font = QtGui.QFont()
font.setPointSize(22)
font.setBold(False)
font.setWeight(50)
self.inventor_title.setFont(font)
self.inventor_title.setObjectName("inventor_title")
self.vector = QtWidgets.QLabel(self.centralwidget)
self.vector.setGeometry(QtCore.QRect(90, 140, 341, 2))
self.vector.setStyleSheet("QLabel{\n"
"background-color:black\n"
"}")
self.vector.setText("")
self.vector.setObjectName("vector")
self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
self.tableWidget.setGeometry(QtCore.QRect(90, 160, 341, 411))
font = QtGui.QFont()
font.setPointSize(22)
self.tableWidget.setFont(font)
self.tableWidget.setStyleSheet("QTableWidget {background: #98ADAA; color: #FFFFFF; }")
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(4)
self.tableWidget.setRowCount(10)
self.tableWidget.horizontalHeader().setVisible(False)
self.tableWidget.horizontalHeader().setHighlightSections(False)
self.tableWidget.verticalHeader().setVisible(False)
self.tableWidget.verticalHeader().setHighlightSections(False)
self.tableWidget.setShowGrid(False)
self.Time = QtWidgets.QLabel(self.centralwidget)
self.Time.setGeometry(QtCore.QRect(600, 20, 291, 151))
font = QtGui.QFont()
font.setPointSize(48)
self.Time.setFont(font)
self.Time.setObjectName("Time")
self.temperature = QtWidgets.QLabel(self.centralwidget)
self.temperature.setGeometry(QtCore.QRect(660, 190, 201, 91))
font = QtGui.QFont()
font.setPointSize(48)
self.temperature.setFont(font)
self.temperature.setObjectName("temperature")
self.gas = QtWidgets.QLabel(self.centralwidget)
self.gas.setGeometry(QtCore.QRect(660, 260, 541, 191))
font = QtGui.QFont()
font.setPointSize(48)
self.gas.setFont(font)
self.gas.setObjectName("gas")
self.Add = QtWidgets.QPushButton(self.centralwidget)
self.Add.setGeometry(QtCore.QRect(540, 460, 100, 100))
font = QtGui.QFont()
font.setPointSize(14)
self.Add.setFont(font)
self.Add.setStyleSheet("QPushButton {\n"
" color: #FFFFFF;\n"
" border: 4px solid #FFFFFF;\n"
" border-radius: 50;\n"
" }\n"
"")
self.ListButton = QtWidgets.QPushButton(self.centralwidget)
self.ListButton.setGeometry(QtCore.QRect(90, 160, 341, 411))
font = QtGui.QFont()
font.setPointSize(14)
self.ListButton.setFont(font)
self.ListButton.setStyleSheet("QPushButton {\n"
" \n"
" border: 0.1px solid #FFFFFF;\n"
" \n"
" }\n"
"")
self.ListButton.setText("")
self.ListButton.setObjectName("ListButton")
self.temperature_icon = QtWidgets.QLabel(self.centralwidget)
self.temperature_icon.setGeometry(QtCore.QRect(550, 210, 101, 71))
self.temperature_icon.setText("")
self.temperature_icon.setPixmap(QtGui.QPixmap(os.path.join("data", "temperature-2-64.png")))
self.temperature_icon.setObjectName("temperature_icon")
self.pressure_icon = QtWidgets.QLabel(self.centralwidget)
self.pressure_icon.setGeometry(QtCore.QRect(550, 320, 71, 71))
self.pressure_icon.setText("")
self.pressure_icon.setPixmap(QtGui.QPixmap(os.path.join("data", "pressure-64.png")))
self.pressure_icon.setObjectName("pressure_icon")
self.Water = QtWidgets.QPushButton(self.centralwidget)
self.Water.setGeometry(QtCore.QRect(10, 10, 41, 41))
font = QtGui.QFont()
font.setPointSize(14)
self.Water.setFont(font)
self.Water.setStyleSheet("QPushButton {\n"" color: #FFFFFF;\n" "\n" " border: 4px solid #FFFFFF;\n"
" border-radius: 20;\n"
" }\n"
"")
self.Water.setText("")
self.Water.setObjectName("Water")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(60, 20, 81, 21))
font = QtGui.QFont()
font.setPointSize(10)
self.label.setFont(font)
self.label.setObjectName("label")
self.learn = QtWidgets.QPushButton(self.centralwidget)
self.learn.setGeometry(QtCore.QRect(700, 460, 100, 100))
font = QtGui.QFont()
font.setPointSize(12)
self.learn.setFont(font)
self.learn.setStyleSheet("QPushButton {\n"
" color: #FFFFFF;\n"
" border: 4px solid #FFFFFF;\n"
" border-radius: 50;\n"
" }\n"
"")
self.learn.setObjectName("Learn")
self.Manual = QtWidgets.QPushButton(self.centralwidget)
self.Manual.setGeometry(QtCore.QRect(860, 460, 100, 100))
font = QtGui.QFont()
font.setPointSize(14)
self.Manual.setFont(font)
self.Manual.setStyleSheet("QPushButton {\n"
" color: #FFFFFF;\n"
" border: 4px solid #FFFFFF;\n"
" border-radius: 50;\n"
" }\n"
"")
self.Manual.setObjectName("Manual")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.Add.clicked.connect(self.addItem)
self.learn.clicked.connect(self.learnItem)
self.Manual.clicked.connect(self.addManual)
self.ListButton.clicked.connect(self.showInventory)
connection = sqlite3.connect(os.path.join('data', 'item.db'))
query = "SELECT name,end_date FROM Inventory"
result = connection.execute(query)
#self.ListButton.clicked.connect(self.test)
self.tableWidget.setRowCount(0)
count = 0
for row_number, row_data in enumerate(result):
self.tableWidget.insertRow(row_number)
count=count+1
for colum_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number, colum_number, QtWidgets.QTableWidgetItem(str(data)))
self.tableWidget.setColumnWidth(colum_number, 1000)
for num in range(count):
if(str(self.tableWidget.item(num,1).text()) == str(date.today())):
self.tableWidget.item(num,0).setBackground(QtGui.QColor(246,77,77))
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.reload)
self.timer.setInterval(1000)
self.timer.start()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.inventor_title.setText(_translate(
"MainWindow", "<html><head/><body><p><span style=\" font-size:24pt; color:#ffffff;\">Inventory</span></p></body></html>"))
self.Time.setText(_translate(
"MainWindow", "<html><head/><body><p><span style=\" color:#ffffff;\">1:18 PM</span></p></body></html>"))
self.temperature.setText(_translate(
"MainWindow", "<html><head/><body><p><span style=\" color:#ffffff;\">37*F</span></p></body></html>"))
self.gas.setText(_translate(
"MainWindow", "<html><head/><body><p><span style=\" color:#ffffff;\">2.4psi</span></p></body></html>"))
self.label.setText(_translate(
"MainWindow", "<html><head/><body><p><span style=\" color:#ffffff;\">Leakage</span></p></body></html>"))
self.Add.setText(_translate("MainWindow", "Add"))
self.learn.setText(_translate("MainWindow", "Learn"))
self.Manual.setText(_translate("MainWindow", "Manual"))
def takeSnap(self):
camera.get_frames(shape=(32, 32), count=5, interval=5)
def addItem(self):
self.MainWindow = QtWidgets.QMainWindow()
self.ui = Ui_predict()
self.ui.setupUi(self.MainWindow)
self.MainWindow.show()
def reload(self):
connection = sqlite3.connect(os.path.join('data', 'item.db'))
query = "SELECT name,end_date FROM Inventory"
result = connection.execute(query)
#self.ListButton.clicked.connect(self.test)
self.tableWidget.setRowCount(0)
count = 0
for row_number, row_data in enumerate(result):
self.tableWidget.insertRow(row_number)
count=count+1
for colum_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number, colum_number, QtWidgets.QTableWidgetItem(str(data)))
self.tableWidget.setColumnWidth(colum_number, 1000)
for num in range(count):
if(str(self.tableWidget.item(num,1).text()) == str(date.today())):
self.tableWidget.item(num,0).setBackground(QtGui.QColor(246,77,77))
def learnItem(self):
self.MainWindow = QtWidgets.QMainWindow()
self.ui = Ui_Learn()
self.ui.setupUi(self.MainWindow)
self.MainWindow.show()
def addManual(self):
self.MainWindow = QtWidgets.QMainWindow()
self.ui = Ui_Manual()
self.ui.setupUi(self.MainWindow)
self.MainWindow.show()
def showInventory(self):
self.MainWindow = QtWidgets.QMainWindow()
self.ui = Ui_List()
self.ui.setupUi(self.MainWindow)
self.MainWindow.show()
| 2.03125 | 2 |
tests/core/test_templating.py | AurelienLourot/charm-helpers | 15 | 12770363 | import pkg_resources
import shutil
import tempfile
import unittest
import jinja2
import os.path
import pwd
import grp
import mock
from charmhelpers.core import templating
TEMPLATES_DIR = pkg_resources.resource_filename(__name__, 'templates')
class TestTemplating(unittest.TestCase):
def setUp(self):
self.charm_dir = pkg_resources.resource_filename(__name__, '')
self._charm_dir_patch = mock.patch.object(templating.hookenv,
'charm_dir')
self._charm_dir_mock = self._charm_dir_patch.start()
self._charm_dir_mock.side_effect = lambda: self.charm_dir
def tearDown(self):
self._charm_dir_patch.stop()
@mock.patch.object(templating.host.os, 'fchown')
@mock.patch.object(templating.host, 'mkdir')
@mock.patch.object(templating.host, 'log')
def test_render(self, log, mkdir, fchown):
with tempfile.NamedTemporaryFile() as fn1, \
tempfile.NamedTemporaryFile() as fn2:
context = {
'nats': {
'port': '1234',
'host': 'example.com',
},
'router': {
'domain': 'api.foo.com'
},
'nginx_port': 80,
}
templating.render('fake_cc.yml', fn1.name,
context, templates_dir=TEMPLATES_DIR)
contents = open(fn1.name).read()
self.assertRegexpMatches(contents, 'port: 1234')
self.assertRegexpMatches(contents, 'host: example.com')
self.assertRegexpMatches(contents, 'domain: api.foo.com')
templating.render('test.conf', fn2.name, context,
templates_dir=TEMPLATES_DIR)
contents = open(fn2.name).read()
self.assertRegexpMatches(contents, 'listen 80')
self.assertEqual(fchown.call_count, 2)
# Not called, because the target directory exists. Calling
# it would make the target directory world readable and
# expose your secrets (!).
self.assertEqual(mkdir.call_count, 0)
@mock.patch.object(templating.host.os, 'fchown')
@mock.patch.object(templating.host, 'mkdir')
@mock.patch.object(templating.host, 'log')
def test_render_from_string(self, log, mkdir, fchown):
with tempfile.NamedTemporaryFile() as fn:
context = {
'foo': 'bar'
}
config_template = '{{ foo }}'
templating.render('somefile.txt', fn.name,
context, templates_dir=TEMPLATES_DIR,
config_template=config_template)
contents = open(fn.name).read()
self.assertRegexpMatches(contents, 'bar')
self.assertEqual(fchown.call_count, 1)
# Not called, because the target directory exists. Calling
# it would make the target directory world readable and
# expose your secrets (!).
self.assertEqual(mkdir.call_count, 0)
@mock.patch.object(templating.host.os, 'fchown')
@mock.patch.object(templating.host, 'mkdir')
@mock.patch.object(templating.host, 'log')
def test_render_loader(self, log, mkdir, fchown):
with tempfile.NamedTemporaryFile() as fn1:
context = {
'nats': {
'port': '1234',
'host': 'example.com',
},
'router': {
'domain': 'api.foo.com'
},
'nginx_port': 80,
}
template_loader = jinja2.ChoiceLoader([jinja2.FileSystemLoader(TEMPLATES_DIR)])
templating.render('fake_cc.yml', fn1.name,
context, template_loader=template_loader)
contents = open(fn1.name).read()
self.assertRegexpMatches(contents, 'port: 1234')
self.assertRegexpMatches(contents, 'host: example.com')
self.assertRegexpMatches(contents, 'domain: api.foo.com')
@mock.patch.object(templating.os.path, 'exists')
@mock.patch.object(templating.host.os, 'fchown')
@mock.patch.object(templating.host, 'mkdir')
@mock.patch.object(templating.host, 'log')
def test_render_no_dir(self, log, mkdir, fchown, exists):
exists.return_value = False
with tempfile.NamedTemporaryFile() as fn1, \
tempfile.NamedTemporaryFile() as fn2:
context = {
'nats': {
'port': '1234',
'host': 'example.com',
},
'router': {
'domain': 'api.foo.com'
},
'nginx_port': 80,
}
templating.render('fake_cc.yml', fn1.name,
context, templates_dir=TEMPLATES_DIR)
contents = open(fn1.name).read()
self.assertRegexpMatches(contents, 'port: 1234')
self.assertRegexpMatches(contents, 'host: example.com')
self.assertRegexpMatches(contents, 'domain: api.foo.com')
templating.render('test.conf', fn2.name, context,
templates_dir=TEMPLATES_DIR)
contents = open(fn2.name).read()
self.assertRegexpMatches(contents, 'listen 80')
self.assertEqual(fchown.call_count, 2)
# Target directory was created, world readable (!).
self.assertEqual(mkdir.call_count, 2)
@mock.patch.object(templating.host.os, 'fchown')
@mock.patch.object(templating.host, 'log')
def test_render_2(self, log, fchown):
tmpdir = tempfile.mkdtemp()
fn1 = os.path.join(tmpdir, 'test.conf')
try:
context = {'nginx_port': 80}
templating.render('test.conf', fn1, context,
owner=pwd.getpwuid(os.getuid()).pw_name,
group=grp.getgrgid(os.getgid()).gr_name,
templates_dir=TEMPLATES_DIR)
with open(fn1) as f:
contents = f.read()
self.assertRegexpMatches(contents, 'something')
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
@mock.patch.object(templating, 'hookenv')
@mock.patch('jinja2.Environment')
def test_load_error(self, Env, hookenv):
Env().get_template.side_effect = jinja2.exceptions.TemplateNotFound(
'fake_cc.yml')
self.assertRaises(
jinja2.exceptions.TemplateNotFound, templating.render,
'fake.src', 'fake.tgt', {}, templates_dir='tmpl')
hookenv.log.assert_called_once_with(
'Could not load template fake.src from tmpl.', level=hookenv.ERROR)
| 2.390625 | 2 |
doc/source/EXAMPLES/kmpfit_ODRsinus.py | kapteyn-astro/kapteyn | 3 | 12770364 | #!/usr/bin/env python
#------------------------------------------------------------
# Purpose: Program finds best-fit pararameters of a model
# a*sin(bx+c) with data with errors in both variables
# x and y. It uses the effective variance method for
# kmpfit and the results are compared with SciPy's
# ODR routine.
# It can be used to demonstrate the sensitivity of
# the fit process to initial estimates by varying
# values for beta0
# Vog, 09 Dec, 2011
#------------------------------------------------------------
import numpy
from matplotlib.pyplot import figure, show, rc
from numpy.random import normal
from kapteyn import kmpfit
def model(p, x):
# Model: Y = a*sin(b*x+c)
a,b,c = p
return a * numpy.sin(b*x+c)
def residuals(p, data):
# Effective variance method
a, b, c = p
x, y, ex, ey = data
e2 = ey*ey + (a*b*numpy.cos(b*x+c))**2*ex*ex
w = numpy.sqrt(numpy.where(e2==0.0, 0.0, 1.0/(e2)))
d = w*(y-model(p,x))
return d
def residuals2(p, data):
# Merit function for data with errors Y only
a, b, c = p
x, y, ey = data
w = numpy.where(ey==0.0, 0.0, 1.0/(ey))
d = w*(y-model(p,x))
return d
# Generate noisy data points
N = 30
a0 = 2; b0 = 1; c0 = 1
x = numpy.linspace(-3, 7.0, N)
y = model((a0,b0,c0),x) + normal(0.0, 0.3, N)
errx = normal(0.1, 0.2, N)
erry = normal(0.1, 0.3, N)
# It is important to start with realistic initial estimates
beta0 = [1.8,0.9,0.9]
print("\nODR:")
print("==========")
from scipy.odr import Data, Model, ODR, RealData, odr_stop
linear = Model(model)
mydata = RealData(x, y, sx=errx, sy=erry)
myodr = ODR(mydata, linear, beta0=beta0, maxit=5000)
myoutput = myodr.run()
print("Fitted parameters: ", myoutput.beta)
print("Covariance errors: ", numpy.sqrt(myoutput.cov_beta.diagonal()))
print("Standard errors: ", myoutput.sd_beta)
print("Minimum chi^2: ", myoutput.sum_square)
print("Minimum (reduced)chi^2: ", myoutput.res_var)
beta = myoutput.beta
# Prepare fit routine
fitobj = kmpfit.Fitter(residuals=residuals, data=(x, y, errx, erry))
fitobj.fit(params0=beta0)
print("\n\n======== Results kmpfit with effective variance =========")
print("Fitted parameters: ", fitobj.params)
print("Covariance errors: ", fitobj.xerror)
print("Standard errors: ", fitobj.stderr)
print("Chi^2 min: ", fitobj.chi2_min)
print("Reduced Chi^2: ", fitobj.rchi2_min)
print("Status Message:", fitobj.message)
# Compare to a fit with weights for y only
fitobj2 = kmpfit.Fitter(residuals=residuals2, data=(x, y, erry))
fitobj2.fit(params0=beta0)
print("\n\n======== Results kmpfit errors in Y only =========")
print("Fitted parameters: ", fitobj2.params)
print("Covariance errors: ", fitobj2.xerror)
print("Standard errors: ", fitobj2.stderr)
print("Chi^2 min: ", fitobj2.chi2_min)
print("Reduced Chi^2: ", fitobj2.rchi2_min)
print("Status Message:", fitobj2.message)
# Some plotting
rc('font', size=9)
rc('legend', fontsize=8)
fig = figure(1)
frame = fig.add_subplot(1,1,1, aspect=1, adjustable='datalim')
frame.errorbar(x, y, xerr=errx, yerr=erry, fmt='bo')
# Plot first fit
frame.plot(x, model(beta,x), '-y', lw=4, label="SciPy's ODR", alpha=0.6)
frame.plot(x, model(fitobj.params,x), 'c', ls='--', lw=2, label="kmpfit (errors in X & Y")
frame.plot(x, model(fitobj2.params,x), 'm', ls='--', lw=2, label="kmpfit (errors in Y only)")
frame.plot(x, model((a0,b0,c0),x), 'r', label="Model with true parameters")
frame.set_xlabel("X")
frame.set_ylabel("Y")
frame.set_title("ODR and kmpfit with weighted fit. Model: $y=a\,\sin(bx+c)$")
frame.grid(True)
leg = frame.legend(loc=2)
show() | 3.40625 | 3 |
inventory/migrations/0001_initial.py | balexander85/inventory | 0 | 12770365 | <reponame>balexander85/inventory
# Generated by Django 2.1 on 2018-08-18 20:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Item",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=50)),
("insured", models.BooleanField(default=False)),
(
"description",
models.CharField(blank=True, default="", max_length=500),
),
(
"value",
models.DecimalField(decimal_places=2, default=0.0, max_digits=10),
),
(
"profile_image",
models.ImageField(
blank=True,
null=True,
upload_to="images/%Y/%m",
verbose_name="Image",
),
),
("created_date", models.DateTimeField(auto_now_add=True)),
("last_updated", models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name="ItemAttachment",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
(
"attachment",
models.FileField(
blank=True,
null=True,
upload_to="files/%Y/%m",
verbose_name="Attachment",
),
),
(
"item",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="inventory.Item",
),
),
],
),
migrations.CreateModel(
name="ItemType",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
],
),
migrations.AddField(
model_name="item",
name="item_type",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="inventory.ItemType",
),
),
]
| 1.867188 | 2 |
kerastools/layers/horde_layers.py | gyfastas/ICCV2019-Horde | 84 | 12770366 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Layer
from kerastools.initializers import RandomMaclaurin
class CompactKOrderPooling(Layer):
""" Keras layer to compute K-th order moments representation. In the non-trainable case, the Random Maclaurin
initialization is used while in trainable mode we simply initialize the weights with Glorot uniform initializer.
:param output_dim: Dimension of the high-order representation.
:param ho_trainable: if the weights for high-order approximation are trainable.
"""
def __init__(self,
output_dim,
ho_trainable=False,
**kwargs):
super(CompactKOrderPooling, self).__init__(**kwargs)
self.ho_trainable = ho_trainable
self.output_dim = output_dim
self.k_order_weights = []
self.order = 0
if ho_trainable:
self.init_func = "glorot_uniform"
else:
self.init_func = RandomMaclaurin()
def build(self, input_shape):
for k_shape in input_shape:
self.order += 1
self.k_order_weights.append(self.add_weight(name='W' + str(self.order),
shape=(1, 1, int(k_shape[-1]), self.output_dim),
initializer=self.init_func,
trainable=self.ho_trainable,
constraint=None))
super(CompactKOrderPooling, self).build(input_shape)
def call(self, inputs, **kwargs):
if type(inputs) is not list or len(inputs) != self.order:
raise Exception('Compact Bilinear Pooling must be called '
'on a list of ' + str(self.order) + ' tensors. Got: ' + str(inputs))
T = 1.
for k, inp in enumerate(inputs):
T *= tf.nn.conv2d(input=inp,
filter=self.k_order_weights[k],
strides=[1, 1, 1, 1],
padding="SAME",
dilations=[1, 1, 1, 1]) # shape = bs x W x H x dim_intermediate
return T
def compute_output_shape(self, input_shape):
return input_shape[0][0], input_shape[0][1], input_shape[0][2], self.output_dim
def get_config(self):
base_config = super(CompactKOrderPooling, self).get_config()
config = {"output_dim": self.output_dim,
"ho_trainable": self.ho_trainable}
return dict(list(base_config.items()) + list(config.items()))
class PartialKOrderBlock(Layer):
""" Keras layer to compute approximate bilinear product with either trainable weights or Random Maclaurin init.
Arguments:
output_dim: Dimension of the representation.
only_project_second: Do not add learnable weights for the second entry (cascaded implementation)
ho_trainable: make high-order weights trainable or not.
Returns:
A Keras layer.
"""
def __init__(self,
output_dim,
only_project_second=True,
ho_trainable=True,
**kwargs):
self.ho_trainable = ho_trainable
self.output_dim = output_dim
self.only_project_second = only_project_second
if ho_trainable:
self.init_func = "glorot_uniform"
else:
self.init_func = RandomMaclaurin()
super(PartialKOrderBlock, self).__init__(**kwargs)
def build(self, input_shape):
self.second_block_dim = int(input_shape[1][-1])
self.proj = self.add_weight(name='w',
shape=(1, 1, self.second_block_dim, self.output_dim),
initializer=self.init_func,
trainable=self.ho_trainable,
constraint=None)
if not self.only_project_second:
self.first_block_dim = int(input_shape[0][-1])
self.first_proj = self.add_weight(name='w_first',
shape=(1, 1, self.first_block_dim, self.output_dim),
initializer=self.init_func,
trainable=self.ho_trainable,
constraint=None)
super(PartialKOrderBlock, self).build(input_shape)
def call(self, inputs, **kwargs):
if type(inputs) is not list or len(inputs) != 2:
raise Exception('Partial Hadamard Block must be called '
'on a list of 2 tensors. Got: {}'.format(inputs))
first_block, second_block = inputs
second_block = tf.nn.conv2d(input=second_block,
filter=self.proj,
strides=[1, 1, 1, 1],
padding="VALID",
dilations=[1, 1, 1, 1]) # shape = bs x W x H x dim_intermediate
if not self.only_project_second:
first_block = tf.nn.conv2d(input=first_block,
filter=self.first_proj,
strides=[1, 1, 1, 1],
padding="VALID",
dilations=[1, 1, 1, 1]) # shape = bs x W x H x dim_intermediate
return first_block * second_block
def compute_output_shape(self, input_shape):
return input_shape[0][0], input_shape[0][1], input_shape[0][2], self.output_dim
def get_config(self):
base_config = super(PartialKOrderBlock, self).get_config()
config = {'output_dim': self.output_dim,
'only_project_second': self.only_project_second,
'ho_trainable': self.ho_trainable}
return dict(list(base_config.items()) + list(config.items()))
# alias
CKOP = CompactKOrderPooling
PKOB = PartialKOrderBlock
| 3.09375 | 3 |
setup.py | bastiendonjon/sanic-jwt | 0 | 12770367 | <filename>setup.py
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
tests_require = [
"coverage",
"freezegun",
# 'pytest-cache',
"pytest-cov",
"pytest-flakes",
"pytest-pep8",
# 'pytest-sanic',
"pytest-asyncio",
"pytest",
"sanic",
"pyjwt",
# 'uvloop', # XXX setup crashes by not being 'able to create executables'
"cryptography",
"codecov",
]
extras_require = {
"docs": [
# 'sphinx_rtd_theme',
"Sphinx"
],
"tests": tests_require,
}
extras_require["all"] = []
for reqs in extras_require.values():
extras_require["all"].extend(reqs)
setup_requires = ["pytest-runner"]
install_requires = ["pyjwt"]
setup(
name="sanic-jwt",
version="1.1.2",
description="JWT oauth flow for Sanic",
url="https://github.com/ahopkins/sanic-jwt",
download_url="https://github.com/ahopkins/sanic-jwt/archive/master.zip",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
keywords="sanic oauth authentication jwt",
packages=find_packages(exclude=["example", "tests"]),
install_requires=install_requires,
extras_require=extras_require,
setup_requires=setup_requires,
tests_require=tests_require,
package_data={},
)
| 1.53125 | 2 |
customExceptions.py | spark-156/python-playbook | 0 | 12770368 | instrument_familes = {
'Strings': ['Guitar', 'Banjo', 'Sitar'],
'Percussion': ['Conga', 'Cymbal', 'Cajon'],
'woodwinds': ['Flute', 'Oboe', 'Clarinet']
}
class KeyError(Exception):
def __init__(self, key):
self.key = key
def __str__(self) -> str:
return f"Key {self.key} does not exist"
def print_instrument_families() -> None:
# print out instrument families
for family in ['Strings', 'Percussion', 'woodwinds']:
try:
print('Some instruments in the ' + family + 'family are: ' + ', '.join(instrument_familes[family]))
except:
raise KeyError(family)
print_instrument_families()
| 3.609375 | 4 |
shuffleindex/utils.py | escudocloud/distributed-shuffleindex | 1 | 12770369 | <reponame>escudocloud/distributed-shuffleindex
from six.moves import xrange
import struct
ENC = 'UTF-16LE' # we use little endian (LE) to remove the BOM character
FMT = '60s' # format is a string of 60 characters
def encode(data, fmt=FMT, enc=ENC):
"""Encode data with specified format and encoding"""
data = data.encode(enc)
return struct.pack(fmt, data)
def decode(data, fmt=FMT, enc=ENC):
"""Decode data with specified format and encoding"""
(data,) = struct.unpack(fmt, data)
return data.decode(enc).partition('\0')[0]
def chunks(enum, n):
"""Yield successive n-sized chunks from enum"""
for i in xrange(0, len(enum), n):
yield enum[i:i+n]
| 2.703125 | 3 |
src/luh3417/replace/__main__.py | WithIO/luh3417 | 0 | 12770370 | from argparse import ArgumentParser
from typing import Optional, Sequence, Text
from luh3417.serialized_replace import walk
from luh3417.utils import make_doer, run_main, setup_logging
doing = make_doer("luh3417.replace")
def parse_args(argv: Optional[Sequence[Text]] = None):
parser = ArgumentParser(description="Seeks and replaces serialized values")
parser.add_argument("-i", "--input", required=True, help="Input file name")
parser.add_argument("-o", "--output", required=True, help="Output file name")
parser.add_argument("-b", "--before", nargs="+", help="String(s) to look for")
parser.add_argument("-a", "--after", nargs="+", help="String(s) to replace by")
parser.add_argument(
"-c", "--charset", default="utf-8", help="What charset to use to read the file"
)
args = parser.parse_args(argv)
if len(args.before) != len(args.after):
parser.error("Not the same number of --before and --after")
exit(1)
return args
def main(argv: Optional[Sequence[Text]] = None):
args = parse_args(argv)
setup_logging()
rep = [
*zip(
(x.encode(args.charset) for x in args.before),
(x.encode(args.charset) for x in args.after),
)
]
with open(args.input, "rb") as i, open(args.output, "wb") as o:
for line in i:
o.write(walk(line, rep))
def __main__():
return run_main(main, doing)
if __name__ == "__main__":
__main__()
| 3.421875 | 3 |
to_move/streamlit-simple/app.py | PietroAnsidei/examples | 5 | 12770371 | <filename>to_move/streamlit-simple/app.py
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import streamlit as st
from util import get_results, render_results
st.title("South Park Search")
st.write("Who said what?")
st.sidebar.title("Search with Jina")
query = st.sidebar.text_input("What do you wish to search?")
top_k = st.sidebar.slider("Top K", min_value=1, max_value=20, value=10)
if st.sidebar.button("Search"):
results = get_results(query=query, top_k=top_k)
st.balloons()
st.markdown(render_results(results))
| 2.78125 | 3 |
03_zhihu/start.py | GongkunJiang/MySpider | 0 | 12770372 | # coding=utf-8
from scrapy import cmdline
# cmdline.execute('scrapy crawl itcast'.split())
cmdline.execute('scrapy crawl zhihuSpider'.split()) | 1.625 | 2 |
Code/MQTT_thing_client_94/src/TimeResource.py | tobiasritscher/SmartHumidor | 0 | 12770373 | <reponame>tobiasritscher/SmartHumidor
#!/usr/bin/env python3
'''
___ ___ ___
___ /\__\ /\ \ /\ \
/\ \ /::| | /::\ \ /::\ \
\:\ \ /:|:| | /:/\:\ \ /:/\ \ \
/::\__\ /:/|:| |__ /::\~\:\ \ _\:\~\ \ \
__/:/\/__/ /:/ |:| /\__\ /:/\:\ \:\__\ /\ \:\ \ \__\
/\/:/ / \/__|:|/:/ / \:\~\:\ \/__/ \:\ \:\ \/__/
\::/__/ |:/:/ / \:\ \:\__\ \:\ \:\__\
\:\__\ |::/ / \:\ \/__/ \:\/:/ /
\/__/ /:/ / \:\__\ \::/ /
\/__/ \/__/ \/__/
File: TimeResource.py
Purpose: Derived class from the
python internal thread class.
This resource's pupose
is to get the operating
system's time and publish
it under a MQTT topic.
The querying time interval of
the resource is currently
set to 2 seconds.
Remarks: -
Author: <NAME> <<EMAIL>>
Date: 10/2016
'''
import threading
import datetime
import time
import log
import mqttconfig
ALIVE_CHECK_INTERVAL_IN_MILLIS = int( 100 )
ALIVE_CHECK_INTERVAL_IN_S = float( ALIVE_CHECK_INTERVAL_IN_MILLIS / 1000 )
# logging setup
logger = log.setup_custom_logger( "mqtt_thing_time_resource" )
class TimeResource( threading.Thread ):
def __init__( self, lock, \
mqtt_client, \
running, \
pub_topic, \
pub_interval = float( 2.0 ) ):
# must be called ...
threading.Thread.__init__( self )
self.lock = lock
self.mqtt_client = mqtt_client
self.running = running
self.pub_topic = pub_topic
self.pub_interval = pub_interval # unit is seconds ...
def query_system_time( self ):
keep_querying = bool( False )
pub_interval_in_millis = int( self.pub_interval * 1000 )
sleep_periods = int( pub_interval_in_millis // ALIVE_CHECK_INTERVAL_IN_MILLIS )
self.lock.acquire()
keep_querying = self.running
self.lock.release()
while keep_querying:
payload = str( datetime.datetime.now().strftime( "%Y-%m-%d %H:%M:%S" ) )
self.mqtt_client.publish( self.pub_topic, str( payload ), \
mqttconfig.QUALITY_OF_SERVICE, False )
for _ in range( 0, sleep_periods ):
time.sleep( ALIVE_CHECK_INTERVAL_IN_S )
self.lock.acquire()
keep_querying = self.running
self.lock.release()
if not keep_querying:
break
def run( self ):
self.query_system_time()
| 2.328125 | 2 |
__init__.py | MattDavies01/Rocket.PY | 1 | 12770374 | # -*- coding: utf-8 -*-
"""
RocketPy is a trajectory simulation for High-Power Rocketry built by
<NAME>. The code allows for a complete 6 degrees of freedom simulation
of a rocket's flight trajectory, including high fidelity variable mass effects
as well as descent under parachutes. Weather conditions, such as wind profile,
can be imported from sophisticated datasets, allowing for realistic scenarios.
Furthermore, the implementation facilitates complex simulations, such as
multi-stage rockets, design and trajectory optimization and dispersion analysis.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 20XX, Project Geronimo"
__license__ = "APACHE LICENSE"
__Version__ = "VERSION 2.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
import re
import math
import bisect
import warnings
import time
from datetime import datetime, timedelta
from inspect import signature, getsourcelines
from collections import namedtuple
import numpy as np
from scipy import integrate
from scipy import linagl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from .Fucntion import Function
from .Environment import Enviroment
from .SolidMotor import SolidMotor
from .Rocket import Rocket
from .Flight import Flight | 2.515625 | 3 |
projects/webptspy/webpts/celery.py | codelieche/testing | 2 | 12770375 | <reponame>codelieche/testing
# -*- coding:utf-8 -*-
from __future__ import absolute_import
import os
import django
from celery import Celery
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'webpts.settings')
# django.setup()
# 可以在实例化Celery的时候传入broker、backend参数。也可以在settings中配置BROKER_URL
app = Celery('webpts')
'''
BROKER_URL = 'redis://127.0.0.1:6379/0'
BACKEND_URL = 'redis://127.0.0.1:6379/1'
app = Celery('webpts', broker=BROKER_URL, backend=BACKEND_URL)
'''
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| 1.914063 | 2 |
schema.py | jfjallid/GreenIoT-GraphQL-Demo | 0 | 12770376 | <reponame>jfjallid/GreenIoT-GraphQL-Demo
from graphene import ObjectType, Field, String, Float, DateTime, List, Int, Boolean
from collections import namedtuple
import json
from elasticsearch import Elasticsearch
from dateutil.parser import parse
import datetime
import os
elastic_host = os.environ.get('ELASTIC_HOST')
if not elastic_host:
elastic_host = 'localhost'
elastic_port = os.environ.get('ELASTIC_PORT')
if not elastic_port:
elastic_port = 9200
es = Elasticsearch([{'host': elastic_host, 'port': elastic_port}])
class CustomGrapheneDateTime(DateTime):
@staticmethod
def serialize(date):
if isinstance(date, str):
date = parse(date)
return DateTime.serialize(date)
class Measurement(ObjectType):
n = String(description='Sensor name')
u = String(description='Measurement unit e.g., %RH')
v = Float(description='Integer value')
vs = String(description='String value')
vb = Boolean(description='Boolean value')
ut = Float(description='Update time')
sum = Float(description='Sum')
uuid = String(description='Unique measurement ID')
timestamp = CustomGrapheneDateTime(description='Timestamp for when measurement was received')
class Aggregate(ObjectType):
avg = Float(description='Average value of selected sensor type')
unit = String(description='Unit of measurement')
def _json_object_hook(d):
return namedtuple('X', d.keys())(*d.values())
def _json2obj(data):
return json.loads(data, object_hook=_json_object_hook)
def _parse_date(date_string):
try:
date = datetime.datetime.strptime(date_string, '%Y-%m-%dt%H:%M:%S')
except ValueError:
raise ValueError("Incorrect date format, should be yyyy-MM-dd'T'HH:mm:ss")
return date
class Query(ObjectType):
measurements = List(
Measurement,
description='Retrieve measurements based on name and type.',
sensor_name=String(description='name of the sensor e.g., urn:dev:mac:fcc23d000000050f'),
amount=Int(description='Number of measurements to retrieve. 1-100, default is 10.'),
sensor_type=String(
description='Choose sensor type from: temp, humidity, pressure, pm1, pm2_5, pm10, no2'
),
from_date=String(description="UTC Timestamp: yyyy-MM-dd'T'HH:mm:ss, e.g. 2019-01-01T10:00:00"),
to_date=String(description="UTC Timestamp: yyyy-MM-dd'T'HH:mm:ss, e.g. 2019-01-07T10:00:00"),
)
avgbydate = Field(
Aggregate,
description='Calculate average value of measurement type, e.g., average temp.',
sensor_type=String(
description='Choose sensor type from: temp, humidity, pressure, pm1, pm2_5, pm10, no2'
),
from_date=String(description="UTC Timestamp: yyyy-MM-dd'T'HH:mm:ss, e.g. 2019-01-01T10:00:00"),
to_date=String(description="UTC Timestamp: yyyy-MM-dd'T'HH:mm:ss, e.g. 2019-01-07T10:00:00"),
)
def resolve_measurements(
_self, info, sensor_name=None, amount=10, sensor_type=None, from_date=None, to_date=None, **kwargs
):
allowed_types = ['temp', 'humidity', 'pressure', 'pm1', 'pm2_5', 'pm10', 'no2', 'SoC:temp', 'WiFi:ESSID',
'WiFi:ch', 'hostname', 'uptime']
if sensor_type not in allowed_types:
sensor_type = None
if (amount < 0) or (amount > 100):
amount = 10
if not from_date:
# Default to 1 week back in time
from_date = (datetime.datetime.utcnow() - datetime.timedelta(days=7)).strftime('%Y-%m-%dT%H:%M:%S')
if not to_date:
to_date = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
if _parse_date(from_date).date() == _parse_date(to_date).date(): # Same day
index_name = f"measurements-{from_date.split('T')[0]}"
else:
index_name = 'measurements-*'
if not sensor_name:
sensor_name = 'urn:dev'
if sensor_type:
n_query = f'{sensor_name}*{sensor_type}'
else:
n_query = f'{sensor_name}*'
query = {
'query': {
'bool': {
'filter': [
{'wildcard': {'n.keyword': n_query}},
{
'range': {
'timestamp': {
'from': from_date,
'to': to_date,
}
}
}
]
}
},
'size': amount,
"sort": [
{"timestamp": {"order": "asc"}}
]
}
res = es.search(index=index_name, body=query)['hits']['hits']
return [_json2obj(json.dumps(x['_source'])) for x in res]
def resolve_avgbydate(_self, info, sensor_type=None, from_date=None, to_date=None, **kwargs):
allowed_types = ['temp', 'humidity', 'pressure', 'pm1', 'pm2_5', 'pm10', 'no2']
if sensor_type not in allowed_types:
print(f'Not allowed type: {sensor_type}, using temp instead!') # Change to logging.info
sensor_type = 'temp'
if not from_date:
# Default to 1 week back in time
from_date = (datetime.datetime.utcnow() - datetime.timedelta(days=7)).strftime('%Y-%m-%dT%H:%M:%S')
if not to_date:
to_date = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
_parse_date(from_date)
_parse_date(to_date)
query = {
'query': {
'bool': {
'filter': [
{'wildcard': {'n': sensor_type}},
{'range': {'timestamp': {'from': from_date, 'to': to_date}}}
]
}
},
'_source': 'false',
'aggs': {
'avg': {'avg': {'field': 'v'}},
'units': {'terms': {'field': 'u.keyword', 'size': '2'}}
}
}
res = es.search(index='measurements-*', body=query, filter_path='aggregations')
data = dict()
data['avg'] = res['aggregations']['avg']['value']
buckets = len(res['aggregations']['units']['buckets'])
if buckets > 1:
raise Exception("Multiple different units in aggregation!")
elif buckets == 1:
data['unit'] = res['aggregations']['units']['buckets'][0]['key']
return _json2obj(json.dumps(data))
| 2.609375 | 3 |
nndet/core/boxes/anchors.py | joeranbosma/nnDetection | 242 | 12770377 | <reponame>joeranbosma/nnDetection
"""
Parts of this code are from torchvision and thus licensed under
BSD 3-Clause License
Copyright (c) <NAME> 2016,
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import torch
from typing import Callable, Sequence, List, Tuple, TypeVar, Union
from torchvision.models.detection.rpn import AnchorGenerator
from loguru import logger
from itertools import product
AnchorGeneratorType = TypeVar('AnchorGeneratorType', bound=AnchorGenerator)
def get_anchor_generator(dim: int, s_param: bool = False) -> AnchorGenerator:
"""
Get anchor generator class for corresponding dimension
Args:
dim: number of spatial dimensions
s_param: enable size parametrization
Returns:
Callable: class of anchor generator
"""
normal = {2: AnchorGenerator2D, 3: AnchorGenerator3D}
sparam = {2: AnchorGenerator2DS, 3: AnchorGenerator3DS}
if s_param:
return sparam[dim]
else:
return normal[dim]
def compute_anchors_for_strides(anchors: torch.Tensor,
strides: Sequence[Union[Sequence[Union[int, float]], Union[int, float]]],
cat: bool) -> Union[List[torch.Tensor], torch.Tensor]:
"""
Compute anchors sizes which follow a given sequence of strides
Args:
anchors: anchors for stride 0
strides: sequence of strides to adjust anchors for
cat: concatenate resulting anchors, if false a Sequence of Anchors
is returned
Returns:
Union[List[torch.Tensor], torch.Tensor]: new anchors
"""
anchors_with_stride = [anchors]
dim = anchors.shape[1] // 2
for stride in strides:
if isinstance(stride, (int, float)):
stride = [stride] * dim
stride_formatted = [stride[0], stride[1], stride[0], stride[1]]
if dim == 3:
stride_formatted.extend([stride[2], stride[2]])
anchors_with_stride.append(
anchors * torch.tensor(stride_formatted)[None].float())
if cat:
anchors_with_stride = torch.cat(anchors_with_stride, dim=0)
return anchors_with_stride
class AnchorGenerator2D(torch.nn.Module):
def __init__(self, sizes: Sequence[Union[int, Sequence[int]]] = (128, 256, 512),
aspect_ratios: Sequence[Union[float, Sequence[float]]] = (0.5, 1.0, 2.0),
**kwargs):
"""
Generator for anchors
Modified from https://github.com/pytorch/vision/blob/master/torchvision/models/detection/rpn.py
Args:
sizes (Sequence[Union[int, Sequence[int]]]): anchor sizes for each feature map
(length should match the number of feature maps)
aspect_ratios (Sequence[Union[float, Sequence[float]]]): anchor aspect ratios:
height/width, e.g. (0.5, 1, 2). if Seq[Seq] is provided, it should have
the same length as sizes
"""
super().__init__()
if not isinstance(sizes[0], (list, tuple)):
sizes = tuple((s,) for s in sizes)
if not isinstance(aspect_ratios[0], (list, tuple)):
aspect_ratios = (aspect_ratios,) * len(sizes)
assert len(sizes) == len(aspect_ratios)
self.sizes = sizes
self.aspect_ratios = aspect_ratios
self.cell_anchors = None
self._cache = {}
self.num_anchors_per_level: List[int] = None
if kwargs:
logger.info(f"Discarding anchor generator kwargs {kwargs}")
def cached_grid_anchors(self, grid_sizes: List[List[int]], strides: List[List[int]]) -> List[torch.Tensor]:
"""
Check if combination was already generated before and return that if possible
Args:
grid_sizes (Sequence[Sequence[int]]): spatial sizes of feature maps
strides (Sequence[Sequence[int]]): stride of each feature map
Returns:
List[torch.Tensor]: Anchors for each feature maps
"""
key = str(grid_sizes + strides)
if key not in self._cache:
self._cache[key] = self.grid_anchors(grid_sizes, strides)
self.num_anchors_per_level = self._cache[key][1]
return self._cache[key][0]
def grid_anchors(self, grid_sizes, strides) -> Tuple[List[torch.Tensor], List[int]]:
"""
Distribute anchors over feature maps
Args:
grid_sizes (Sequence[Sequence[int]]): spatial sizes of feature maps
strides (Sequence[Sequence[int]]): stride of each feature map
Returns:
List[torch.Tensor]: Anchors for each feature maps
List[int]: number of anchors per level
"""
assert len(grid_sizes) == len(strides), "Every fm size needs strides"
assert len(grid_sizes) == len(self.cell_anchors), "Every fm size needs cell anchors"
anchors = []
cell_anchors = self.cell_anchors
assert cell_anchors is not None
_i = 0
# modified from torchvision (ordering of axis differs)
anchor_per_level = []
for size, stride, base_anchors in zip(grid_sizes, strides, cell_anchors):
size0, size1 = size
stride0, stride1 = stride
device = base_anchors.device
shifts_x = torch.arange(0, size0, dtype=torch.float, device=device) * stride0
shifts_y = torch.arange(0, size1, dtype=torch.float, device=device) * stride1
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)
_anchors = (shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)
anchors.append(_anchors)
anchor_per_level.append(_anchors.shape[0])
logger.debug(f"Generated {anchors[_i].shape[0]} anchors and expected "
f"{size0 * size1 * self.num_anchors_per_location()[_i]} "
f"anchors on level {_i}.")
_i += 1
return anchors, anchor_per_level
@staticmethod
def generate_anchors(scales: Tuple[int],
aspect_ratios: Tuple[float],
dtype: torch.dtype = torch.float,
device: Union[torch.device, str] = "cpu",
) -> torch.Tensor:
"""
Generate anchors for a pair of scales and ratios
Args:
scales (Tuple[int]): scales of anchors, e.g. (32, 64, 128)
aspect_ratios (Tuple[float]): aspect ratios of height/width, e.g. (0.5, 1, 2)
dtype (torch.dtype): data type of anchors
device (Union[torch.device, str]): target device of anchors
Returns:
Tensor: anchors of shape [n(scales) * n(ratios), dim * 2]
"""
scales = torch.as_tensor(scales, dtype=dtype, device=device)
aspect_ratios = torch.as_tensor(aspect_ratios, dtype=dtype, device=device)
h_ratios = torch.sqrt(aspect_ratios)
w_ratios = 1 / h_ratios
ws = (w_ratios[:, None] * scales[None, :]).view(-1)
hs = (h_ratios[:, None] * scales[None, :]).view(-1)
base_anchors = torch.stack([-ws, -hs, ws, hs], dim=1) / 2
return base_anchors.round()
def set_cell_anchors(self, dtype: torch.dtype, device: Union[torch.device, str] = "cpu") -> None:
"""
Set :para:`self.cell_anchors` if it was not already set
Args:
dtype (torch.dtype): data type of anchors
device (Union[torch.device, str]): target device of anchors
Returns:
None
result is saved into attribute
"""
if self.cell_anchors is not None:
return
cell_anchors = [self.generate_anchors(sizes, aspect_ratios, dtype, device)
for sizes, aspect_ratios in zip(self.sizes, self.aspect_ratios)]
self.cell_anchors = cell_anchors
def forward(self, image_list: torch.Tensor, feature_maps: List[torch.Tensor]) -> List[torch.Tensor]:
"""
Generate anchors for given feature maps
# TODO: update docstring and type
Args:
image_list (torch.Tensor): data structure which contains images and their original shapes
feature_maps (Sequence[torch.Tensor]): feature maps for which anchors need to be generated
Returns:
List[Tensor]: list of anchors (for each image inside the batch)
"""
device = image_list.device
grid_sizes = list([feature_map.shape[2:] for feature_map in feature_maps])
image_size = image_list.shape[2:]
strides = [list((int(i / s) for i, s in zip(image_size, fm_size))) for fm_size in grid_sizes]
self.set_cell_anchors(dtype=feature_maps[0].dtype, device=feature_maps[0].device)
anchors_over_all_feature_maps = self.cached_grid_anchors(grid_sizes, strides)
anchors = []
images_shapes = [img.shape for img in image_list.split(1)]
for i, x in enumerate(images_shapes):
anchors_in_image = []
for anchors_per_feature_map in anchors_over_all_feature_maps:
anchors_in_image.append(anchors_per_feature_map)
anchors.append(anchors_in_image)
anchors = [torch.cat(anchors_per_image).to(device) for anchors_per_image in anchors]
# TODO: check with torchvision if this makes sense (if enabled, anchors are newly generated for each run)
# # Clear the cache in case that memory leaks.
# self._cache.clear()
return anchors
def num_anchors_per_location(self) -> List[int]:
"""
Number of anchors per resolution
Returns:
List[int]: number of anchors per positions for each resolution
"""
return [len(s) * len(a) for s, a in zip(self.sizes, self.aspect_ratios)]
def get_num_acnhors_per_level(self) -> List[int]:
"""
Number of anchors per resolution
Returns:
List[int]: number of anchors per positions for each resolution
"""
if self.num_anchors_per_level is None:
raise RuntimeError("Need to forward features maps before "
"get_num_acnhors_per_level can be called")
return self.num_anchors_per_level
class AnchorGenerator3D(AnchorGenerator2D):
def __init__(self,
sizes: Sequence[Union[int, Sequence[int]]] = (128, 256, 512),
aspect_ratios: Sequence[Union[float, Sequence[float]]] = (0.5, 1.0, 2.0),
zsizes: Sequence[Union[int, Sequence[int]]] = (4, 4, 4),
**kwargs):
"""
Helper to generate anchors for different input sizes
Args:
sizes (Sequence[Union[int, Sequence[int]]]): anchor sizes for each feature map
(length should match the number of feature maps)
aspect_ratios (Sequence[Union[float, Sequence[float]]]): anchor aspect ratios:
height/width, e.g. (0.5, 1, 2). if Seq[Seq] is provided, it should have
the same length as sizes
zsizes (Sequence[Union[int, Sequence[int]]]): sizes along z dimension
"""
super().__init__(sizes, aspect_ratios)
if not isinstance(zsizes[0], (Sequence, list, tuple)):
zsizes = (zsizes,) * len(sizes)
self.zsizes = zsizes
if kwargs:
logger.info(f"Discarding anchor generator kwargs {kwargs}")
def set_cell_anchors(self, dtype: torch.dtype, device: Union[torch.device, str] = "cpu") -> None:
"""
Compute anchors for all pairs of sclaes and ratios and save them inside :param:`cell_anchors`
if they were not computed before
Args:
dtype (torch.dtype): data type of anchors
device (Union[torch.device, str]): target device of anchors
Returns:
None (result is saved into :param:`self.cell_anchors`)
"""
if self.cell_anchors is not None:
return
cell_anchors = [
self.generate_anchors(sizes, aspect_ratios, zsizes, dtype, device)
for sizes, aspect_ratios, zsizes in zip(self.sizes, self.aspect_ratios, self.zsizes)
]
self.cell_anchors = cell_anchors
@staticmethod
def generate_anchors(scales: Tuple[int], aspect_ratios: Tuple[float], zsizes: Tuple[int],
dtype: torch.dtype = torch.float,
device: Union[torch.device, str] = "cpu") -> torch.Tensor:
"""
Generate anchors for a pair of scales and ratios
Args:
scales (Tuple[int]): scales of anchors, e.g. (32, 64, 128)
aspect_ratios (Tuple[float]): aspect ratios of height/width, e.g. (0.5, 1, 2)
zsizes (Tuple[int]): scale along z dimension
dtype (torch.dtype): data type of anchors
device (Union[torch.device, str]): target device of anchors
Returns:
Tensor: anchors of shape [n(scales) * n(ratios) * n(zscales) , dim * 2]
"""
base_anchors_2d = AnchorGenerator2D.generate_anchors(
scales, aspect_ratios, dtype=dtype, device=device)
zanchors = torch.cat(
[torch.as_tensor([-z, z], dtype=dtype, device=device).repeat(
base_anchors_2d.shape[0], 1) for z in zsizes], dim=0)
base_anchors_3d = torch.cat(
[base_anchors_2d.repeat(len(zsizes), 1), (zanchors / 2.).round()], dim=1)
return base_anchors_3d
def grid_anchors(self, grid_sizes: Sequence[Sequence[int]],
strides: Sequence[Sequence[int]]) -> Tuple[List[torch.Tensor], List[int]]:
"""
Distribute anchors over feature maps
Args:
grid_sizes (Sequence[Sequence[int]]): spatial sizes of feature maps
strides (Sequence[Sequence[int]]): stride of each feature map
Returns:
List[torch.Tensor]: Anchors for each feature maps
List[int]: number of anchors per level
"""
assert len(grid_sizes) == len(strides)
assert len(grid_sizes) == len(self.cell_anchors)
anchors = []
_i = 0
anchor_per_level = []
for size, stride, base_anchors in zip(grid_sizes, strides, self.cell_anchors):
size0, size1, size2 = size
stride0, stride1, stride2 = stride
dtype, device = base_anchors.dtype, base_anchors.device
shifts_x = torch.arange(0, size0, dtype=dtype, device=device) * stride0
shifts_y = torch.arange(0, size1, dtype=dtype, device=device) * stride1
shifts_z = torch.arange(0, size2, dtype=dtype, device=device) * stride2
shift_x, shift_y, shift_z = torch.meshgrid(shifts_x, shifts_y, shifts_z)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
shift_z = shift_z.reshape(-1)
shifts = torch.stack((shift_x, shift_y, shift_x, shift_y, shift_z, shift_z), dim=1)
_anchors = (shifts.view(-1, 1, 6) + base_anchors.view(1, -1, 6)).reshape(-1, 6)
anchors.append(_anchors)
anchor_per_level.append(_anchors.shape[0])
logger.debug(f"Generated {_anchors.shape[0]} anchors and expected "
f"{size0 * size1 * size2 * self.num_anchors_per_location()[_i]} "
f"anchors on level {_i}.")
_i += 1
return anchors, anchor_per_level
def num_anchors_per_location(self) -> List[int]:
"""
Number of anchors per resolution
Returns:
List[int]: number of anchors per positions for each resolution
"""
return [len(s) * len(a) * len(z) for s, a, z in zip(self.sizes, self.aspect_ratios, self.zsizes)]
class AnchorGenerator2DS(AnchorGenerator2D):
def __init__(self,
width: Sequence[Union[int, Sequence[int]]],
height: Sequence[Union[int, Sequence[int]]],
**kwargs,
):
"""
Helper to generate anchors for different input sizes
Uses a different parametrization of anchors
(if Sequence[int] is provided it is interpreted as one
value per feature map size)
Args:
width: sizes along width dimension
height: sizes along height dimension
"""
# TODO: check width and height statements
super().__init__()
if not isinstance(width[0], Sequence):
width = [(w,) for w in width]
if not isinstance(height[0], Sequence):
height = [(h,) for h in height]
self.width = width
self.height = height
assert len(self.width) == len(self.height)
if kwargs:
logger.info(f"Discarding anchor generator kwargs {kwargs}")
def set_cell_anchors(self, dtype: torch.dtype,
device: Union[torch.device, str] = "cpu") -> None:
"""
Compute anchors for all pairs of sclaes and ratios and
save them inside :param:`cell_anchors`
if they were not computed before
Args:
dtype (torch.dtype): data type of anchors
device (Union[torch.device, str]): target device of anchors
Returns:
None (result is saved into :param:`self.cell_anchors`)
"""
if self.cell_anchors is not None:
return
cell_anchors = [
self.generate_anchors(w, h, dtype, device)
for w, h in zip(self.width, self.height)
]
self.cell_anchors = cell_anchors
@staticmethod
def generate_anchors(width: Tuple[int],
height: Tuple[int],
dtype: torch.dtype = torch.float,
device: Union[torch.device, str] = "cpu",
) -> torch.Tensor:
"""
Generate anchors for given width, height and depth sizes
Args:
width: sizes along width dimension
height: sizes along height dimension
Returns:
Tensor: anchors of shape [n(width) * n(height), dim * 2]
"""
all_sizes = torch.tensor(list(product(width, height)),
dtype=dtype, device=device) / 2
anchors = torch.stack([-all_sizes[:, 0], -all_sizes[:, 1],
all_sizes[:, 0], all_sizes[:, 1]], dim=1)
return anchors
def num_anchors_per_location(self) -> List[int]:
"""
Number of anchors per resolution
Returns:
List[int]: number of anchors per positions for each resolution
"""
return [len(w) * len(h) for w, h in zip(self.width, self.height)]
class AnchorGenerator3DS(AnchorGenerator3D):
def __init__(self,
width: Sequence[Union[int, Sequence[int]]],
height: Sequence[Union[int, Sequence[int]]],
depth: Sequence[Union[int, Sequence[int]]],
**kwargs,
):
"""
Helper to generate anchors for different input sizes
Uses a different parametrization of anchors
(if Sequence[int] is provided it is interpreted as one
value per feature map size)
Args:
width: sizes along width dimension
height: sizes along height dimension
depth: sizes along depth dimension
"""
# TODO: check width and height statements
super().__init__()
if not isinstance(width[0], Sequence):
width = [(w,) for w in width]
if not isinstance(height[0], Sequence):
height = [(h,) for h in height]
if not isinstance(depth[0], Sequence):
depth = [(d,) for d in depth]
self.width = width
self.height = height
self.depth = depth
assert len(self.width) == len(self.height) == len(self.depth)
if kwargs:
logger.info(f"Discarding anchor generator kwargs {kwargs}")
def set_cell_anchors(self, dtype: torch.dtype, device: Union[torch.device, str] = "cpu") -> None:
"""
Compute anchors for all pairs of scales and ratios and save them inside :param:`cell_anchors`
if they were not computed before
Args:
dtype (torch.dtype): data type of anchors
device (Union[torch.device, str]): target device of anchors
Returns:
None (result is saved into :param:`self.cell_anchors`)
"""
if self.cell_anchors is not None:
return
cell_anchors = [
self.generate_anchors(w, h, d, dtype, device)
for w, h, d in zip(self.width, self.height, self.depth)
]
self.cell_anchors = cell_anchors
@staticmethod
def generate_anchors(width: Tuple[int],
height: Tuple[int],
depth: Tuple[int],
dtype: torch.dtype = torch.float,
device: Union[torch.device, str] = "cpu") -> torch.Tensor:
"""
Generate anchors for given width, height and depth sizes
Args:
width: sizes along width dimension
height: sizes along height dimension
depth: sizes along depth dimension
Returns:
Tensor: anchors of shape [n(width) * n(height) * n(depth) , dim * 2]
"""
all_sizes = torch.tensor(list(product(width, height, depth)),
dtype=dtype, device=device) / 2
anchors = torch.stack(
[-all_sizes[:, 0], -all_sizes[:, 1], all_sizes[:, 0], all_sizes[:, 1],
-all_sizes[:, 2], all_sizes[:, 2]], dim=1
)
return anchors
def num_anchors_per_location(self) -> List[int]:
"""
Number of anchors per resolution
Returns:
List[int]: number of anchors per positions for each resolution
"""
return [len(w) * len(h) * len(d)
for w, h, d in zip(self.width, self.height, self.depth)]
| 1.515625 | 2 |
output/models/nist_data/list_pkg/int_pkg/schema_instance/nistschema_sv_iv_list_int_length_3_xsd/__init__.py | tefra/xsdata-w3c-tests | 1 | 12770378 | <filename>output/models/nist_data/list_pkg/int_pkg/schema_instance/nistschema_sv_iv_list_int_length_3_xsd/__init__.py<gh_stars>1-10
from output.models.nist_data.list_pkg.int_pkg.schema_instance.nistschema_sv_iv_list_int_length_3_xsd.nistschema_sv_iv_list_int_length_3 import NistschemaSvIvListIntLength3
__all__ = [
"NistschemaSvIvListIntLength3",
]
| 1.078125 | 1 |
aula4_19oct/Greates_Number.py | JessHV/LabADA_GrupoC | 0 | 12770379 | <gh_stars>0
#Ejercicio 1
'''
Ejemplo del profesor
Convertir esta funsión de O(n^2) a una funsión de O(n)
'''
def greatestNumber(array):
for i in array:
isIValTheGreatest = True
for j in array:
if j > i:
isIValTheGreatest = False
if isIValTheGreatest:
return i
'''
Función de O(n)
'''
def theGreatestNumber(array):
#Esta función solo usará un for para recorrer y comparar los elementos del array y encontrar el de mayor valor
#Complejidad temporal/algorítmica: O(n)
mayor = array[0]
for i in range (len(array)):
if array[i] > mayor:
mayor = array[i]
return mayor
'''
Caso prueba
'''
print(theGreatestNumber([1, 12, 3, 4, 5, 6, 7, 8, 9, 10]))
| 3.734375 | 4 |
common/env_wrapper.py | Frager/Deep-Neuroevolution-In-SC2LE | 1 | 12770380 | from common.preprocessor import Preprocessor
class EnvWrapper:
# Wrapper class for SC2Env.
# Used to fit the data coming from SC2Env to the agents model and vice versa
def __init__(self, env, model_config):
self.env = env
self.model_config = model_config
self.preprocessor = Preprocessor(model_config)
def reset(self):
timesteps = self.env.reset()
return self.preprocess_timesteps(timesteps)
def step(self, action):
processed_actions = self.preprocessor.preprocess_action(action)
timesteps = self.env.step(processed_actions)
return self.preprocess_timesteps(timesteps)
def preprocess_timesteps(self, timesteps):
obs_raw = [timestep.observation for timestep in timesteps]
available_actions_raw = [ob.available_actions for ob in obs_raw]
rewards = [timestep.reward for timestep in timesteps]
score_cumulative = [timestep.observation['score_cumulative'] for timestep in timesteps]
dones = [timestep.last() for timestep in timesteps]
# Available actions get one hot encoded
available_actions = [self.preprocessor.preprocess_available_actions(available_actions_raw)]
# raw observations are made to better fit the configuration of the agents model
processed_obs = self.preprocessor.preprocess_observations(obs_raw)
return {'observation': processed_obs,
'rewards': rewards,
'score_cumulative': score_cumulative,
'dones': dones,
'available_actions': available_actions}
def observation_spec(self):
return self.env.observation_spec
def action_spec(self):
return self.env.action_spec
def close(self):
self.env.close()
| 2.703125 | 3 |
w3resource/Basics-Part II/Example029.py | DanielPascualSenties/pythonw3 | 0 | 12770381 | def intersection(l1, l2):
res = [v for v in l1 if v in l2]
return res
def divide_into_primes(num, base=2, seq=None):
if seq is None:
seq = []
if num == 1:
return seq
for i in range(base, num+1):
if not num % i:
seq.append(i)
return divide_into_primes(int(num/i), i, seq)
x = divide_into_primes(2340)
print(x)
y = divide_into_primes(11230)
print(y)
common = intersection(x, y)
print(common)
# TODO there are issues with repeated elements | 3.5 | 4 |
py/rest/convert.py | woodrow/pyoac | 1 | 12770382 | import py
from py.__.process.cmdexec import ExecutionFailed
# utility functions to convert between various formats
format_to_dotargument = {"png": "png",
"eps": "ps",
"ps": "ps",
"pdf": "ps",
}
def ps2eps(ps):
# XXX write a pure python version
if not py.path.local.sysfind("ps2epsi") and \
not py.path.local.sysfind("ps2eps"):
raise SystemExit("neither ps2eps nor ps2epsi found")
try:
eps = ps.new(ext=".eps")
py.process.cmdexec('ps2epsi "%s" "%s"' % (ps, eps))
except ExecutionFailed:
py.process.cmdexec('ps2eps -l -f "%s"' % ps)
def ps2pdf(ps, compat_level="1.2"):
if not py.path.local.sysfind("gs"):
raise SystemExit("ERROR: gs not found")
pdf = ps.new(ext=".pdf")
options = dict(OPTIONS="-dSAFER -dCompatibilityLevel=%s" % compat_level,
infile=ps, outfile=pdf)
cmd = ('gs %(OPTIONS)s -q -dNOPAUSE -dBATCH -sDEVICE=pdfwrite '
'"-sOutputFile=%(outfile)s" %(OPTIONS)s -c .setpdfwrite '
'-f "%(infile)s"') % options
py.process.cmdexec(cmd)
return pdf
def eps2pdf(eps):
# XXX write a pure python version
if not py.path.local.sysfind("epstopdf"):
raise SystemExit("ERROR: epstopdf not found")
py.process.cmdexec('epstopdf "%s"' % eps)
def dvi2eps(dvi, dest=None):
if dest is None:
dest = eps.new(ext=".eps")
command = 'dvips -q -E -n 1 -D 600 -p 1 -o "%s" "%s"' % (dest, dvi)
if not py.path.local.sysfind("dvips"):
raise SystemExit("ERROR: dvips not found")
py.process.cmdexec(command)
def convert_dot(fn, new_extension):
if not py.path.local.sysfind("dot"):
raise SystemExit("ERROR: dot not found")
result = fn.new(ext=new_extension)
print result
arg = "-T%s" % (format_to_dotargument[new_extension], )
py.std.os.system('dot "%s" "%s" > "%s"' % (arg, fn, result))
if new_extension == "eps":
ps = result.new(ext="ps")
result.move(ps)
ps2eps(ps)
ps.remove()
elif new_extension == "pdf":
# convert to eps file first, to get the bounding box right
eps = result.new(ext="eps")
ps = result.new(ext="ps")
result.move(ps)
ps2eps(ps)
eps2pdf(eps)
ps.remove()
eps.remove()
return result
class latexformula2png(object):
def __init__(self, formula, dest, temp=None):
self.formula = formula
try:
import Image
self.Image = Image
self.scale = 2 # create a larger image
self.upscale = 5 # create the image upscale times larger, then scale it down
except ImportError:
self.scale = 2
self.upscale = 1
self.Image = None
self.output_format = ('pngmono', 'pnggray', 'pngalpha')[2]
if temp is None:
temp = py.test.ensuretemp("latexformula")
self.temp = temp
self.latex = self.temp.join('formula.tex')
self.dvi = self.temp.join('formula.dvi')
self.eps = self.temp.join('formula.eps')
self.png = self.temp.join('formula.png')
self.saveas(dest)
def saveas(self, dest):
self.gen_latex()
self.gen_dvi()
dvi2eps(self.dvi, self.eps)
self.gen_png()
self.scale_image()
self.png.copy(dest)
def gen_latex(self):
self.latex.write ("""
\\documentclass{article}
\\pagestyle{empty}
\\begin{document}
%s
\\pagebreak
\\end{document}
""" % (self.formula))
def gen_dvi(self):
origdir = py.path.local()
self.temp.chdir()
py.process.cmdexec('latex "%s"' % (self.latex))
origdir.chdir()
def gen_png(self):
tempdir = py.path.local.mkdtemp()
re_bbox = py.std.re.compile('%%BoundingBox:\s*(\d+) (\d+) (\d+) (\d+)')
eps = self.eps.read()
x1, y1, x2, y2 = [int(i) for i in re_bbox.search(eps).groups()]
X = x2 - x1 + 2
Y = y2 - y1 + 2
mx = -x1
my = -y1
ps = self.temp.join('temp.ps')
source = self.eps
ps.write("""
1 1 1 setrgbcolor
newpath
-1 -1 moveto
%(X)d -1 lineto
%(X)d %(Y)d lineto
-1 %(Y)d lineto
closepath
fill
%(mx)d %(my)d translate
0 0 0 setrgbcolor
(%(source)s) run
""" % locals())
sx = int((x2 - x1) * self.scale * self.upscale)
sy = int((y2 - y1) * self.scale * self.upscale)
res = 72 * self.scale * self.upscale
command = ('gs -q -g%dx%d -r%dx%d -sDEVICE=%s -sOutputFile="%s" '
'-dNOPAUSE -dBATCH "%s"') % (
sx, sy, res, res, self.output_format, self.png, ps)
py.process.cmdexec(command)
def scale_image(self):
if self.Image is None:
return
image = self.Image.open(str(self.png))
image.resize((image.size[0] / self.upscale,
image.size[1] / self.upscale),
self.Image.ANTIALIAS).save(str(self.png))
| 2.421875 | 2 |
apps/shortener/migrations/0009_auto_20200824_2124.py | ShAlireza/Yektanet | 0 | 12770383 | # Generated by Django 3.1 on 2020-08-24 21:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shortener', '0008_auto_20200824_1342'),
]
operations = [
migrations.AlterField(
model_name='shortenedurl',
name='key',
field=models.CharField(db_index=True, default='', max_length=8, unique=True),
),
migrations.AlterField(
model_name='visit',
name='created_at',
field=models.DateField(auto_now_add=True, db_index=True),
),
migrations.CreateModel(
name='Analytic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('all_visits', models.JSONField(default={})),
('unique_visits', models.JSONField(default={})),
('short_url', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='analytic', to='shortener.shortenedurl')),
],
),
]
| 1.6875 | 2 |
scattertext/termcompaction/ClassPercentageCompactor.py | shettyprithvi/scattertext | 1,823 | 12770384 | <reponame>shettyprithvi/scattertext
import numpy as np
from scattertext.termranking import AbsoluteFrequencyRanker
class ClassPercentageCompactor(object):
def __init__(self,
term_ranker=AbsoluteFrequencyRanker,
term_count=2):
'''
Limit terms to ones that make up a minimum percentage
of documents in a category. Given a term_count, set the threshold
to that of the smallest class.
Parameters
----------
term_ranker : TermRanker
term_count : int
'''
self.term_ranker = term_ranker
self.term_count = term_count
def compact(self, term_doc_matrix, non_text=False):
'''
Parameters
-------
term_doc_matrix : TermDocMatrix
non_text : bool
Returnss
-------
New term doc matrix
'''
ranker = self.term_ranker(term_doc_matrix)
if non_text:
ranker = ranker.use_non_text_features()
tdf = ranker.get_ranks()
tdf_sum = tdf.sum(axis=0)
tdf_portions = tdf / tdf_sum
threshold = np.max(self.term_count / tdf_sum)
terms_to_remove = tdf_portions[~(tdf_portions > threshold).any(axis=1)].index
return term_doc_matrix.remove_terms(terms_to_remove, non_text=non_text)
| 2.90625 | 3 |
build/PureCloudPlatformClientV2/models/evaluation.py | MyPureCloud/platform-client-sdk-python | 10 | 12770385 | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class Evaluation(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Evaluation - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'str',
'name': 'str',
'conversation': 'Conversation',
'evaluation_form': 'EvaluationForm',
'evaluator': 'User',
'agent': 'User',
'calibration': 'Calibration',
'status': 'str',
'answers': 'EvaluationScoringSet',
'agent_has_read': 'bool',
'release_date': 'datetime',
'assigned_date': 'datetime',
'changed_date': 'datetime',
'queue': 'Queue',
'media_type': 'list[str]',
'rescore': 'bool',
'conversation_date': 'datetime',
'conversation_end_date': 'datetime',
'never_release': 'bool',
'resource_id': 'str',
'resource_type': 'str',
'redacted': 'bool',
'is_scoring_index': 'bool',
'authorized_actions': 'list[str]',
'self_uri': 'str'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'conversation': 'conversation',
'evaluation_form': 'evaluationForm',
'evaluator': 'evaluator',
'agent': 'agent',
'calibration': 'calibration',
'status': 'status',
'answers': 'answers',
'agent_has_read': 'agentHasRead',
'release_date': 'releaseDate',
'assigned_date': 'assignedDate',
'changed_date': 'changedDate',
'queue': 'queue',
'media_type': 'mediaType',
'rescore': 'rescore',
'conversation_date': 'conversationDate',
'conversation_end_date': 'conversationEndDate',
'never_release': 'neverRelease',
'resource_id': 'resourceId',
'resource_type': 'resourceType',
'redacted': 'redacted',
'is_scoring_index': 'isScoringIndex',
'authorized_actions': 'authorizedActions',
'self_uri': 'selfUri'
}
self._id = None
self._name = None
self._conversation = None
self._evaluation_form = None
self._evaluator = None
self._agent = None
self._calibration = None
self._status = None
self._answers = None
self._agent_has_read = None
self._release_date = None
self._assigned_date = None
self._changed_date = None
self._queue = None
self._media_type = None
self._rescore = None
self._conversation_date = None
self._conversation_end_date = None
self._never_release = None
self._resource_id = None
self._resource_type = None
self._redacted = None
self._is_scoring_index = None
self._authorized_actions = None
self._self_uri = None
@property
def id(self):
"""
Gets the id of this Evaluation.
The globally unique identifier for the object.
:return: The id of this Evaluation.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Evaluation.
The globally unique identifier for the object.
:param id: The id of this Evaluation.
:type: str
"""
self._id = id
@property
def name(self):
"""
Gets the name of this Evaluation.
:return: The name of this Evaluation.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Evaluation.
:param name: The name of this Evaluation.
:type: str
"""
self._name = name
@property
def conversation(self):
"""
Gets the conversation of this Evaluation.
:return: The conversation of this Evaluation.
:rtype: Conversation
"""
return self._conversation
@conversation.setter
def conversation(self, conversation):
"""
Sets the conversation of this Evaluation.
:param conversation: The conversation of this Evaluation.
:type: Conversation
"""
self._conversation = conversation
@property
def evaluation_form(self):
"""
Gets the evaluation_form of this Evaluation.
Evaluation form used for evaluation.
:return: The evaluation_form of this Evaluation.
:rtype: EvaluationForm
"""
return self._evaluation_form
@evaluation_form.setter
def evaluation_form(self, evaluation_form):
"""
Sets the evaluation_form of this Evaluation.
Evaluation form used for evaluation.
:param evaluation_form: The evaluation_form of this Evaluation.
:type: EvaluationForm
"""
self._evaluation_form = evaluation_form
@property
def evaluator(self):
"""
Gets the evaluator of this Evaluation.
:return: The evaluator of this Evaluation.
:rtype: User
"""
return self._evaluator
@evaluator.setter
def evaluator(self, evaluator):
"""
Sets the evaluator of this Evaluation.
:param evaluator: The evaluator of this Evaluation.
:type: User
"""
self._evaluator = evaluator
@property
def agent(self):
"""
Gets the agent of this Evaluation.
:return: The agent of this Evaluation.
:rtype: User
"""
return self._agent
@agent.setter
def agent(self, agent):
"""
Sets the agent of this Evaluation.
:param agent: The agent of this Evaluation.
:type: User
"""
self._agent = agent
@property
def calibration(self):
"""
Gets the calibration of this Evaluation.
:return: The calibration of this Evaluation.
:rtype: Calibration
"""
return self._calibration
@calibration.setter
def calibration(self, calibration):
"""
Sets the calibration of this Evaluation.
:param calibration: The calibration of this Evaluation.
:type: Calibration
"""
self._calibration = calibration
@property
def status(self):
"""
Gets the status of this Evaluation.
:return: The status of this Evaluation.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this Evaluation.
:param status: The status of this Evaluation.
:type: str
"""
allowed_values = ["PENDING", "INPROGRESS", "FINISHED"]
if status.lower() not in map(str.lower, allowed_values):
# print("Invalid value for status -> " + status)
self._status = "outdated_sdk_version"
else:
self._status = status
@property
def answers(self):
"""
Gets the answers of this Evaluation.
:return: The answers of this Evaluation.
:rtype: EvaluationScoringSet
"""
return self._answers
@answers.setter
def answers(self, answers):
"""
Sets the answers of this Evaluation.
:param answers: The answers of this Evaluation.
:type: EvaluationScoringSet
"""
self._answers = answers
@property
def agent_has_read(self):
"""
Gets the agent_has_read of this Evaluation.
:return: The agent_has_read of this Evaluation.
:rtype: bool
"""
return self._agent_has_read
@agent_has_read.setter
def agent_has_read(self, agent_has_read):
"""
Sets the agent_has_read of this Evaluation.
:param agent_has_read: The agent_has_read of this Evaluation.
:type: bool
"""
self._agent_has_read = agent_has_read
@property
def release_date(self):
"""
Gets the release_date of this Evaluation.
Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The release_date of this Evaluation.
:rtype: datetime
"""
return self._release_date
@release_date.setter
def release_date(self, release_date):
"""
Sets the release_date of this Evaluation.
Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param release_date: The release_date of this Evaluation.
:type: datetime
"""
self._release_date = release_date
@property
def assigned_date(self):
"""
Gets the assigned_date of this Evaluation.
Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The assigned_date of this Evaluation.
:rtype: datetime
"""
return self._assigned_date
@assigned_date.setter
def assigned_date(self, assigned_date):
"""
Sets the assigned_date of this Evaluation.
Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param assigned_date: The assigned_date of this Evaluation.
:type: datetime
"""
self._assigned_date = assigned_date
@property
def changed_date(self):
"""
Gets the changed_date of this Evaluation.
Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The changed_date of this Evaluation.
:rtype: datetime
"""
return self._changed_date
@changed_date.setter
def changed_date(self, changed_date):
"""
Sets the changed_date of this Evaluation.
Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param changed_date: The changed_date of this Evaluation.
:type: datetime
"""
self._changed_date = changed_date
@property
def queue(self):
"""
Gets the queue of this Evaluation.
:return: The queue of this Evaluation.
:rtype: Queue
"""
return self._queue
@queue.setter
def queue(self, queue):
"""
Sets the queue of this Evaluation.
:param queue: The queue of this Evaluation.
:type: Queue
"""
self._queue = queue
@property
def media_type(self):
"""
Gets the media_type of this Evaluation.
List of different communication types used in conversation.
:return: The media_type of this Evaluation.
:rtype: list[str]
"""
return self._media_type
@media_type.setter
def media_type(self, media_type):
"""
Sets the media_type of this Evaluation.
List of different communication types used in conversation.
:param media_type: The media_type of this Evaluation.
:type: list[str]
"""
self._media_type = media_type
@property
def rescore(self):
"""
Gets the rescore of this Evaluation.
Is only true when evaluation is re-scored.
:return: The rescore of this Evaluation.
:rtype: bool
"""
return self._rescore
@rescore.setter
def rescore(self, rescore):
"""
Sets the rescore of this Evaluation.
Is only true when evaluation is re-scored.
:param rescore: The rescore of this Evaluation.
:type: bool
"""
self._rescore = rescore
@property
def conversation_date(self):
"""
Gets the conversation_date of this Evaluation.
Date of conversation. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The conversation_date of this Evaluation.
:rtype: datetime
"""
return self._conversation_date
@conversation_date.setter
def conversation_date(self, conversation_date):
"""
Sets the conversation_date of this Evaluation.
Date of conversation. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param conversation_date: The conversation_date of this Evaluation.
:type: datetime
"""
self._conversation_date = conversation_date
@property
def conversation_end_date(self):
"""
Gets the conversation_end_date of this Evaluation.
End date of conversation if it had completed before evaluation creation. Null if created before the conversation ended. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The conversation_end_date of this Evaluation.
:rtype: datetime
"""
return self._conversation_end_date
@conversation_end_date.setter
def conversation_end_date(self, conversation_end_date):
"""
Sets the conversation_end_date of this Evaluation.
End date of conversation if it had completed before evaluation creation. Null if created before the conversation ended. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param conversation_end_date: The conversation_end_date of this Evaluation.
:type: datetime
"""
self._conversation_end_date = conversation_end_date
@property
def never_release(self):
"""
Gets the never_release of this Evaluation.
Signifies if the evaluation is never to be released. This cannot be set true if release date is also set.
:return: The never_release of this Evaluation.
:rtype: bool
"""
return self._never_release
@never_release.setter
def never_release(self, never_release):
"""
Sets the never_release of this Evaluation.
Signifies if the evaluation is never to be released. This cannot be set true if release date is also set.
:param never_release: The never_release of this Evaluation.
:type: bool
"""
self._never_release = never_release
@property
def resource_id(self):
"""
Gets the resource_id of this Evaluation.
Only used for email evaluations. Will be null for all other evaluations.
:return: The resource_id of this Evaluation.
:rtype: str
"""
return self._resource_id
@resource_id.setter
def resource_id(self, resource_id):
"""
Sets the resource_id of this Evaluation.
Only used for email evaluations. Will be null for all other evaluations.
:param resource_id: The resource_id of this Evaluation.
:type: str
"""
self._resource_id = resource_id
@property
def resource_type(self):
"""
Gets the resource_type of this Evaluation.
The type of resource. Only used for email evaluations. Will be null for evaluations on all other resources.
:return: The resource_type of this Evaluation.
:rtype: str
"""
return self._resource_type
@resource_type.setter
def resource_type(self, resource_type):
"""
Sets the resource_type of this Evaluation.
The type of resource. Only used for email evaluations. Will be null for evaluations on all other resources.
:param resource_type: The resource_type of this Evaluation.
:type: str
"""
allowed_values = ["EMAIL"]
if resource_type.lower() not in map(str.lower, allowed_values):
# print("Invalid value for resource_type -> " + resource_type)
self._resource_type = "outdated_sdk_version"
else:
self._resource_type = resource_type
@property
def redacted(self):
"""
Gets the redacted of this Evaluation.
Is only true when the user making the request does not have sufficient permissions to see evaluation
:return: The redacted of this Evaluation.
:rtype: bool
"""
return self._redacted
@redacted.setter
def redacted(self, redacted):
"""
Sets the redacted of this Evaluation.
Is only true when the user making the request does not have sufficient permissions to see evaluation
:param redacted: The redacted of this Evaluation.
:type: bool
"""
self._redacted = redacted
@property
def is_scoring_index(self):
"""
Gets the is_scoring_index of this Evaluation.
:return: The is_scoring_index of this Evaluation.
:rtype: bool
"""
return self._is_scoring_index
@is_scoring_index.setter
def is_scoring_index(self, is_scoring_index):
"""
Sets the is_scoring_index of this Evaluation.
:param is_scoring_index: The is_scoring_index of this Evaluation.
:type: bool
"""
self._is_scoring_index = is_scoring_index
@property
def authorized_actions(self):
"""
Gets the authorized_actions of this Evaluation.
List of user authorized actions on evaluation. Possible values: edit, editScore, editAgentSignoff, delete, viewAudit
:return: The authorized_actions of this Evaluation.
:rtype: list[str]
"""
return self._authorized_actions
@authorized_actions.setter
def authorized_actions(self, authorized_actions):
"""
Sets the authorized_actions of this Evaluation.
List of user authorized actions on evaluation. Possible values: edit, editScore, editAgentSignoff, delete, viewAudit
:param authorized_actions: The authorized_actions of this Evaluation.
:type: list[str]
"""
self._authorized_actions = authorized_actions
@property
def self_uri(self):
"""
Gets the self_uri of this Evaluation.
The URI for this object
:return: The self_uri of this Evaluation.
:rtype: str
"""
return self._self_uri
@self_uri.setter
def self_uri(self, self_uri):
"""
Sets the self_uri of this Evaluation.
The URI for this object
:param self_uri: The self_uri of this Evaluation.
:type: str
"""
self._self_uri = self_uri
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 1.734375 | 2 |
Image checker/fileHasher.py | subhrapaladhi/File-Data-Change-Detector | 0 | 12770386 | <gh_stars>0
import hashlib
from pymongo import MongoClient
import uuid
import os
class FileHasher:
def __init__(self, filename):
self.filename = filename
def readFileData(self):
file = open(self.filename,"rb")
self.fileData = file.read()
# print('filedata = \n',self.fileData)
file.close()
def hasher(self):
self.hashArray = []
self.unhashedDataArray = []
start = 0
end = min(len(self.fileData),3)
self.salt = os.urandom(6)
while(start<len(self.fileData)):
if(end == len(self.fileData)+1):
break
substr = self.fileData[start:end]
self.unhashedDataArray.append(substr)
result = hashlib.sha256(substr+self.salt).digest()
self.hashArray.append(result)
start +=1
end +=1
self.key = str(uuid.uuid4())[-12:]
print("key = {}".format(self.key))
def saveData(self):
client = MongoClient("mongodb+srv://subhra:<MONGODB KEY>@cluster0.stksg.mongodb.net/change_detector?retryWrites=true&w=majority")
db = client['change_detector']
hasher_data = db['hasher_data']
data = {"_id": self.key, "salt":self.salt, "hashedDataArray": self.hashArray, "unhashedDataArray": self.unhashedDataArray}
hasher_data.insert_one(data) | 2.65625 | 3 |
SPC_trendanalysis/SPC_rules.py | Ann19941118/SPC_trendanalysis | 0 | 12770387 | #-*- coding:utf-8 -*-
"""
Main class that provides SPC analysis. It detects SPC rules violations.
It can draw charts using matplotlib.
:arguments:
data
user data as flat array/list
"""
from utils import *
import numpy as np
import pandas as pd
RULE_1_BEYOND_3SIGMA = '1个点落在A区以外'
RULE_2_OF_3_BEYOND_2SIGMA_ONE_SIDE = '3个点中有2个点连续落在B区以外'
RULE_4_OF_5_BEYOND_1SIGMA = '5个点中有4个点连续落在中心线同一侧C区以外'
RULE_6_TRENDING = '6个点连续增长或下降'
RULE_8_ON_TWO_SIDE_NONE_C = '8个连续的点落在中心线两侧且无一点在C区'
RULE_9_ON_ONE_SIDE = '9个连续的点在中心线的同一侧'
RULE_14_up_down = '连续14个点交替上下'
RULE_15_below_1sigma = '15个连续点在在中心线两侧C区'
RULES_ALL = [RULE_1_BEYOND_3SIGMA,
RULE_2_OF_3_BEYOND_2SIGMA_ONE_SIDE,
RULE_4_OF_5_BEYOND_1SIGMA,
RULE_6_TRENDING,
RULE_8_ON_TWO_SIDE_NONE_C,
RULE_9_ON_ONE_SIDE,
RULE_14_up_down,
RULE_15_below_1sigma]
RULES_FUNCS = {
RULE_1_BEYOND_3SIGMA: (test_1_beyond_3sigma, 1),
RULE_2_OF_3_BEYOND_2SIGMA_ONE_SIDE: (test_2_OF_3_BEYOND_2SIGMA_ONE_SIDE, 3),
RULE_4_OF_5_BEYOND_1SIGMA: (test_4_OF_5_BEYOND_1SIGMA_ONE_SIDE, 5),
RULE_6_TRENDING: (test_6_thrund, 6),
RULE_8_ON_TWO_SIDE_NONE_C: (test_8_BEYOND_1SIGMA, 8),
RULE_9_ON_ONE_SIDE: (test_violating_runs, 9),
RULE_14_up_down: (test_14_up_down, 14),
RULE_15_below_1sigma: (test_15_below_sigma, 15)}
class SPC_rule(object):
"""
Main class that provides WECR analysis. It detects WECR rules violations.
It can draw charts using matplotlib.
:arguments:
data
user data as flat array/list
"""
def __init__(self, data, center=None, sigma=None, rule_keys=None):
'''
:param data: list/dataframe/np.ndarray
:param center: mean
:param sigma: sigma
:param rule_keys: list, key of rules, such:[1,2]
1:RULE_1_BEYOND_3SIGMA = '1个点落在A区以外'
2:RULE_2_OF_3_BEYOND_2SIGMA_ONE_SIDE = '3个点中有2个点连续落在B区以外'
3:RULE_4_OF_5_BEYOND_1SIGMA = '5个点中有4个点连续落在中心线同一侧C区以外'
4:RULE_6_TRENDING = '6个点连续增长或下降'
5:RULE_8_ON_TWO_SIDE_NONE_C = '8个连续的点落在中心线两侧且无一点在C区'
6:RULE_9_ON_ONE_SIDE = '9个连续的点在中心线的同一侧'
7:RULE_14_up_down = '连续14个点交替上下'
8:RULE_15_below_1sigma = '15个连续点在在中心线两侧C区'
'''
if isinstance(data, pd.DataFrame):
data = data.values
data = data.reshape((1, -1))
data = list(data)
if isinstance(data, np.ndarray):
data = data.reshape((1, -1))
elif not isinstance(data, list):
raise TypeError('please input data of list or pd.Dataframe or np.ndarray')
self.orig_data = data
if not center:
center = np.mean(data)
self.center = center
if not sigma:
sigma = np.std(data, ddof=1)
self.sigma = sigma
if not rule_keys:
rule_new = RULES_ALL
else:
rule_new = []
for key in rule_keys:
rule_new.append(RULES_ALL[key-1])
self.rules = rule_new
self.length = len(data)
self.violating_points = self._find_violating_points()
def __repr__(self):
print(self.get_violating_points())
return "<spc: (%d)>" % self.__hash__()
def _find_violating_points(self):
points_all = {}
for r in self.rules:
func, points_num = RULES_FUNCS[r]
list1 = []
for i in range(len(self.orig_data)):
if i < points_num-1:
continue
if func(self.orig_data[i - points_num+1:i+1], self.center, self.sigma):
list1.extend(range(i - points_num+1, i+1))
points_all.setdefault(r, []).extend(list1)
return points_all
def get_violating_points(self):
"""Return points that violates rules"""
points_all = self.violating_points
points_dict = {}
for key, values in points_all.items():
# if values != []:
points_dict[key] = sorted(set(values))
return points_dict
| 2.625 | 3 |
addons/pythonscript_repl/plugin.py | matheus2740/godot-python | 0 | 12770388 | <filename>addons/pythonscript_repl/plugin.py
from godot import exposed, export, EditorPlugin
from godot import *
BASE_RES = str(ProjectSettings.localize_path(__file__)).rsplit("/", 1)[0]
PYTHON_REPL_RES = ResourceLoader.load(f"{BASE_RES}/python_repl.tscn")
@exposed(tool=True)
class plugin(EditorPlugin):
def _enter_tree(self):
# Initialization of the plugin goes here
self.repl = PYTHON_REPL_RES.instance()
self.repl_button = self.add_control_to_bottom_panel(self.repl, "Python REPL")
def _exit_tree(self):
# Clean-up of the plugin goes here
self.remove_control_from_bottom_panel(self.repl)
self.repl.queue_free()
self.repl = None
def _ready(self):
pass
| 2.1875 | 2 |
Python/Zelle/Chapter10_DefiningClasses/ProgrammingExercises/6_GPACalculatorImproved/gpaCalculatorLetterGrade.py | jeffvswanson/CodingPractice | 0 | 12770389 | <filename>Python/Zelle/Chapter10_DefiningClasses/ProgrammingExercises/6_GPACalculatorImproved/gpaCalculatorLetterGrade.py
# gpaCalculatorLetterGrade.py
# A program to calculate a student's GPA using letter grades as input.
"""Extend Chapter 10 Programming Exercise 5 by implementing an addLetterGrade
method. This is similar to addGrade except that it accepts a letter grade as
a string (instead of gradePoint). Use the updated class to improve the GPA
calculator by allowing the entry of letter grades."""
from studentRecord import Student
def Intro():
print("This program allows you to record a student's grade for a course \
and the credits the course was worth to get the student's GPA.")
studentName = input("Enter name of the student: ")
return studentName
def inputGrades(studentName):
pupil = Student(studentName, 0, 0, 0, 0, "")
grade = "F"
creditHours = 0
while grade != "q":
pupil.addLetterGrade(grade, creditHours)
try:
grade = input("Please enter the letter grade (q to quit): ")
if grade == "q":
break
elif any(char.isdigit() for char in grade):
print("You have to enter a letter grade.")
continue
except(SyntaxError, NameError, TypeError, ValueError):
print("You have to enter a letter grade.")
continue
while True:
try:
creditHours = float(input(
"Please enter the credits the course is worth: "))
except(SyntaxError, NameError, TypeError, ValueError):
print("You have to enter a positive number greater than zero.")
continue
if creditHours <= 0:
print("You have to enter a positive number greater than zero.")
continue
else:
break
return pupil
def main():
# Create a new student object with 0 credits and 0 quality points
studentName = input("Enter name of the student: ")
# Prompt the user to input course information (gradepoint and credits)
pupil = inputGrades(studentName)
# Print the final GPA achieved.
print("{0}'s GPA is {1}.".format(pupil.getName(), pupil.gpa()))
main()
| 4.5625 | 5 |
tests/integration/test_encrypted_disk/test.py | thealexjo/ClickHouse | 1 | 12770390 | import pytest
from helpers.cluster import ClickHouseCluster
from helpers.client import QueryRuntimeException
FIRST_PART_NAME = "all_1_1_0"
@pytest.fixture(scope="module")
def cluster():
try:
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance("node",
main_configs=["configs/storage.xml"],
tmpfs=["/disk:size=100M"],
with_minio=True)
cluster.start()
yield cluster
finally:
cluster.shutdown()
@pytest.mark.parametrize("policy", ["encrypted_policy", "encrypted_policy_key192b", "local_policy", "s3_policy"])
def test_encrypted_disk(cluster, policy):
node = cluster.instances["node"]
node.query(
"""
CREATE TABLE encrypted_test (
id Int64,
data String
) ENGINE=MergeTree()
ORDER BY id
SETTINGS storage_policy='{}'
""".format(policy)
)
node.query("INSERT INTO encrypted_test VALUES (0,'data'),(1,'data')")
select_query = "SELECT * FROM encrypted_test ORDER BY id FORMAT Values"
assert node.query(select_query) == "(0,'data'),(1,'data')"
node.query("INSERT INTO encrypted_test VALUES (2,'data'),(3,'data')")
node.query("OPTIMIZE TABLE encrypted_test FINAL")
assert node.query(select_query) == "(0,'data'),(1,'data'),(2,'data'),(3,'data')"
node.query("DROP TABLE IF EXISTS encrypted_test NO DELAY")
@pytest.mark.parametrize("policy, destination_disks", [("local_policy", ["disk_local_encrypted", "disk_local_encrypted2", "disk_local_encrypted_key192b", "disk_local"]), ("s3_policy", ["disk_s3_encrypted", "disk_s3"])])
def test_part_move(cluster, policy, destination_disks):
node = cluster.instances["node"]
node.query(
"""
CREATE TABLE encrypted_test (
id Int64,
data String
) ENGINE=MergeTree()
ORDER BY id
SETTINGS storage_policy='{}'
""".format(policy)
)
node.query("INSERT INTO encrypted_test VALUES (0,'data'),(1,'data')")
select_query = "SELECT * FROM encrypted_test ORDER BY id FORMAT Values"
assert node.query(select_query) == "(0,'data'),(1,'data')"
for destination_disk in destination_disks:
node.query("ALTER TABLE encrypted_test MOVE PART '{}' TO DISK '{}'".format(FIRST_PART_NAME, destination_disk))
assert node.query(select_query) == "(0,'data'),(1,'data')"
with pytest.raises(QueryRuntimeException) as exc:
node.query("ALTER TABLE encrypted_test MOVE PART '{}' TO DISK '{}'".format(FIRST_PART_NAME, destination_disk))
assert("Part '{}' is already on disk '{}'".format(FIRST_PART_NAME, destination_disk) in str(exc.value))
assert node.query(select_query) == "(0,'data'),(1,'data')"
node.query("DROP TABLE IF EXISTS encrypted_test NO DELAY")
@pytest.mark.parametrize("policy,encrypted_disk", [("local_policy", "disk_local_encrypted"), ("s3_policy", "disk_s3_encrypted")])
def test_optimize_table(cluster, policy, encrypted_disk):
node = cluster.instances["node"]
node.query(
"""
CREATE TABLE encrypted_test (
id Int64,
data String
) ENGINE=MergeTree()
ORDER BY id
SETTINGS storage_policy='{}'
""".format(policy)
)
node.query("INSERT INTO encrypted_test VALUES (0,'data'),(1,'data')")
select_query = "SELECT * FROM encrypted_test ORDER BY id FORMAT Values"
assert node.query(select_query) == "(0,'data'),(1,'data')"
node.query("ALTER TABLE encrypted_test MOVE PART '{}' TO DISK '{}'".format(FIRST_PART_NAME, encrypted_disk))
assert node.query(select_query) == "(0,'data'),(1,'data')"
node.query("INSERT INTO encrypted_test VALUES (2,'data'),(3,'data')")
node.query("OPTIMIZE TABLE encrypted_test FINAL")
with pytest.raises(QueryRuntimeException) as exc:
node.query("ALTER TABLE encrypted_test MOVE PART '{}' TO DISK '{}'".format(FIRST_PART_NAME, encrypted_disk))
assert("Part {} is not exists or not active".format(FIRST_PART_NAME) in str(exc.value))
assert node.query(select_query) == "(0,'data'),(1,'data'),(2,'data'),(3,'data')"
node.query("DROP TABLE IF EXISTS encrypted_test NO DELAY")
| 2.125 | 2 |
lfs/catalog/migrations/0007_auto_20210405_1816.py | michael-hahn/django-lfs | 0 | 12770391 | # Generated by Django 3.1.2 on 2021-04-05 18:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0006_auto_20210405_1803'),
]
operations = [
migrations.AddField(
model_name='deliverytime',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='deliverytime',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='deliverytime',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='file',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='file',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='file',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='filterstep',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='filterstep',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='filterstep',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='groupspropertiesrelation',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='groupspropertiesrelation',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='groupspropertiesrelation',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='image',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='image',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='image',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='product',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='product',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='product',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='productaccessories',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='productaccessories',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='productaccessories',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='productattachment',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='productattachment',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='productattachment',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='productpropertyvalue',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='productpropertyvalue',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='productpropertyvalue',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='productspropertiesrelation',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='productspropertiesrelation',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='productspropertiesrelation',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='property',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='property',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='property',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='propertygroup',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='propertygroup',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='propertygroup',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='propertyoption',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='propertyoption',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='propertyoption',
name='trusted',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='staticblock',
name='synthesized',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='staticblock',
name='taints',
field=models.BigIntegerField(default=0),
),
migrations.AddField(
model_name='staticblock',
name='trusted',
field=models.BooleanField(default=True),
),
]
| 1.679688 | 2 |
users/management/commands/load_report_evidence.py | maverick-labs-pune/wikirumours | 0 | 12770392 | import csv
import os
import datetime
from typing import re
from django.core.files import File
from django.core.management import BaseCommand
from django.db import transaction
from django.utils.encoding import force_text
from django.utils.functional import keep_lazy_text
from report.models import Report, WatchlistedReport, EvidenceFile
from users.models import User
class Command(BaseCommand):
help = "load report evidence"
def handle(self, *args, **kwargs):
import_report_evidence()
def import_report_evidence():
dir_path = os.path.dirname(os.path.realpath(__file__))
with transaction.atomic():
report_evidence_folder = dir_path + '/../data/media/'
# report_evidence_folder = dir_path + '/../../../wikirumours/media/'
directories = os.listdir(report_evidence_folder)
for folder in directories:
report_folder_path = os.path.join(report_evidence_folder, folder)
if os.path.isfile(report_folder_path):
continue
report_public_id = folder
report = Report.objects.filter(public_id=report_public_id).first()
if not report:
continue
process_report_folder(report, report_folder_path)
def process_report_folder(report, report_folder_path):
files = os.listdir(report_folder_path)
for file in files:
existing_file_path = os.path.join(report_folder_path, file)
if os.path.isfile(existing_file_path):
with open(existing_file_path, 'rb') as f:
evidence_file = EvidenceFile(
report=report,
uploader=None,
file=File(f, name=file)
)
evidence_file.save()
return
@keep_lazy_text
def get_valid_filename(s):
"""
Returns the given string converted to a string that can be used for a clean
filename. Specifically, leading and trailing spaces are removed; other
spaces are converted to underscores; and anything that is not a unicode
alphanumeric, dash, underscore, or dot, is removed.
"""
s = force_text(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
| 2.25 | 2 |
pyytdata/util/__init__.py | princekrroshan01/youtube-api-wrapper | 17 | 12770393 | <gh_stars>10-100
from .chnlinfo import ChnlInfo
from .info import Info
from .vidinfo import VidInfo
| 1.101563 | 1 |
pyramid_caching_api/__init__.py | jvanasco/pyramid_caching_api | 0 | 12770394 | <gh_stars>0
from . import api
from . import utils
| 1.140625 | 1 |
makai_kingdom/wish.py | misterfifths/nis_mods | 1 | 12770395 | <filename>makai_kingdom/wish.py
import ctypes as C
from typing import Final
from platform_config import PSP
from astruct import typed_struct
from astruct.type_hints import *
from utils import CountedTable
@typed_struct
class Wish(C.Structure):
_pack_ = 1
mana_cost: CUInt32
level_req: CUInt16
id: CUInt16
_zero: CUInt8
_NAME_LEN: Final[int] = 63 if PSP else 81
name: CStr[_NAME_LEN]
_DESCRIPTION_LEN: Final[int] = 64 if PSP else 106
description: CStr[_DESCRIPTION_LEN]
class WishTable(CountedTable[Wish]):
STANDARD_FILENAME: Final = 'WISH.DAT'
def __init__(self, buffer: WriteableBuffer, offset: int = 0) -> None:
super().__init__(Wish, buffer, offset)
| 2.234375 | 2 |
src/data_process/convert_face_to_voc.py | MatthewK3023/FoodCalorieDetector | 0 | 12770396 | import os,h5py,cv2,sys,shutil
import numpy as np
from xml.dom.minidom import Document
rootdir="../"
convet2yoloformat=True
convert2vocformat=True
resized_dim=(48, 48)
#最小取20大小的脸,并且补齐
minsize2select=20
usepadding=True
datasetprefix="/home/yanhe/data/widerface"#
def gen_hdf5():
imgdir=rootdir+"/WIDER_train/images"
gtfilepath=rootdir+"/wider_face_split/wider_face_train_bbx_gt.txt"
index =0
with open(gtfilepath,'r') as gtfile:
faces=[]
labels=[]
while(True ):#and len(faces)<10
imgpath=gtfile.readline()[:-1]
if(imgpath==""):
break;
print index,imgpath
img=cv2.imread(imgdir+"/"+imgpath)
numbbox=int(gtfile.readline())
bbox=[]
for i in range(numbbox):
line=gtfile.readline()
line=line.split()
line=line[0:4]
if(int(line[3])<=0 or int(line[2])<=0):
continue
bbox=(int(line[0]),int(line[1]),int(line[2]),int(line[3]))
face=img[int(line[1]):int(line[1])+int(line[3]),int(line[0]):int(line[0])+int(line[2])]
face=cv2.resize(face, resized_dim)
faces.append(face)
labels.append(1)
cv2.rectangle(img,(int(line[0]),int(line[1])),(int(line[0])+int(line[2]),int(line[1])+int(line[3])),(255,0,0))
#cv2.imshow("img",img)
#cv2.waitKey(1)
index=index+1
faces=np.asarray(faces)
labels=np.asarray(labels)
f=h5py.File('train.h5','w')
f['data']=faces.astype(np.float32)
f['label']=labels.astype(np.float32)
f.close()
def viewginhdf5():
f = h5py.File('train.h5','r')
f.keys()
faces=f['data'][:]
for face in faces:
face=face.astype(np.uint8)
cv2.imshow("img",face)
cv2.waitKey(1)
f.close()
def convertimgset(img_set="train"):
imgdir=rootdir+"/WIDER_"+img_set+"/images"
gtfilepath=rootdir+"/wider_face_split/wider_face_"+img_set+"_bbx_gt.txt"
imagesdir=rootdir+"/images"
vocannotationdir=rootdir+"/Annotations"
labelsdir=rootdir+"/labels"
if not os.path.exists(imagesdir):
os.mkdir(imagesdir)
if convet2yoloformat:
if not os.path.exists(labelsdir):
os.mkdir(labelsdir)
if convert2vocformat:
if not os.path.exists(vocannotationdir):
os.mkdir(vocannotationdir)
index=0
with open(gtfilepath,'r') as gtfile:
while(True ):#and len(faces)<10
filename=gtfile.readline()[:-1]
if(filename==""):
break;
sys.stdout.write("\r"+str(index)+":"+filename+"\t\t\t")
sys.stdout.flush()
imgpath=imgdir+"/"+filename
img=cv2.imread(imgpath)
if not img.data:
break;
imgheight=img.shape[0]
imgwidth=img.shape[1]
maxl=max(imgheight,imgwidth)
paddingleft=(maxl-imgwidth)>>1
paddingright=(maxl-imgwidth)>>1
paddingbottom=(maxl-imgheight)>>1
paddingtop=(maxl-imgheight)>>1
saveimg=cv2.copyMakeBorder(img,paddingtop,paddingbottom,paddingleft,paddingright,cv2.BORDER_CONSTANT,value=0)
showimg=saveimg.copy()
numbbox=int(gtfile.readline())
bboxes=[]
for i in range(numbbox):
line=gtfile.readline()
line=line.split()
line=line[0:4]
if(int(line[3])<=0 or int(line[2])<=0):
continue
x=int(line[0])+paddingleft
y=int(line[1])+paddingtop
width=int(line[2])
height=int(line[3])
bbox=(x,y,width,height)
x2=x+width
y2=y+height
#face=img[x:x2,y:y2]
if width>=minsize2select and height>=minsize2select:
bboxes.append(bbox)
cv2.rectangle(showimg,(x,y),(x2,y2),(0,255,0))
#maxl=max(width,height)
#x3=(int)(x+(width-maxl)*0.5)
#y3=(int)(y+(height-maxl)*0.5)
#x4=(int)(x3+maxl)
#y4=(int)(y3+maxl)
#cv2.rectangle(img,(x3,y3),(x4,y4),(255,0,0))
else:
cv2.rectangle(showimg,(x,y),(x2,y2),(0,0,255))
filename=filename.replace("/","_")
if len(bboxes)==0:
print "warrning: no face"
continue
cv2.imwrite(imagesdir+"/"+filename,saveimg)
if convet2yoloformat:
height=saveimg.shape[0]
width=saveimg.shape[1]
txtpath=labelsdir+"/"+filename
txtpath=txtpath[:-3]+"txt"
ftxt=open(txtpath,'w')
for i in range(len(bboxes)):
bbox=bboxes[i]
xcenter=(bbox[0]+bbox[2]*0.5)/width
ycenter=(bbox[1]+bbox[3]*0.5)/height
wr=bbox[2]*1.0/width
hr=bbox[3]*1.0/height
txtline="0 "+str(xcenter)+" "+str(ycenter)+" "+str(wr)+" "+str(hr)+"\n"
ftxt.write(txtline)
ftxt.close()
if convert2vocformat:
xmlpath=vocannotationdir+"/"+filename
xmlpath=xmlpath[:-3]+"xml"
doc = Document()
annotation = doc.createElement('annotation')
doc.appendChild(annotation)
folder = doc.createElement('folder')
folder_name = doc.createTextNode('widerface')
folder.appendChild(folder_name)
annotation.appendChild(folder)
filenamenode = doc.createElement('filename')
filename_name = doc.createTextNode(filename)
filenamenode.appendChild(filename_name)
annotation.appendChild(filenamenode)
source = doc.createElement('source')
annotation.appendChild(source)
database = doc.createElement('database')
database.appendChild(doc.createTextNode('wider face Database'))
source.appendChild(database)
annotation_s = doc.createElement('annotation')
annotation_s.appendChild(doc.createTextNode('PASCAL VOC2007'))
source.appendChild(annotation_s)
image = doc.createElement('image')
image.appendChild(doc.createTextNode('flickr'))
source.appendChild(image)
flickrid = doc.createElement('flickrid')
flickrid.appendChild(doc.createTextNode('-1'))
source.appendChild(flickrid)
owner = doc.createElement('owner')
annotation.appendChild(owner)
flickrid_o = doc.createElement('flickrid')
flickrid_o.appendChild(doc.createTextNode('yanyu'))
owner.appendChild(flickrid_o)
name_o = doc.createElement('name')
name_o.appendChild(doc.createTextNode('yanyu'))
owner.appendChild(name_o)
size = doc.createElement('size')
annotation.appendChild(size)
width = doc.createElement('width')
width.appendChild(doc.createTextNode(str(saveimg.shape[1])))
height = doc.createElement('height')
height.appendChild(doc.createTextNode(str(saveimg.shape[0])))
depth = doc.createElement('depth')
depth.appendChild(doc.createTextNode(str(saveimg.shape[2])))
size.appendChild(width)
size.appendChild(height)
size.appendChild(depth)
segmented = doc.createElement('segmented')
segmented.appendChild(doc.createTextNode('0'))
annotation.appendChild(segmented)
for i in range(len(bboxes)):
bbox=bboxes[i]
objects = doc.createElement('object')
annotation.appendChild(objects)
object_name = doc.createElement('name')
object_name.appendChild(doc.createTextNode('face'))
objects.appendChild(object_name)
pose = doc.createElement('pose')
pose.appendChild(doc.createTextNode('Unspecified'))
objects.appendChild(pose)
truncated = doc.createElement('truncated')
truncated.appendChild(doc.createTextNode('1'))
objects.appendChild(truncated)
difficult = doc.createElement('difficult')
difficult.appendChild(doc.createTextNode('0'))
objects.appendChild(difficult)
bndbox = doc.createElement('bndbox')
objects.appendChild(bndbox)
xmin = doc.createElement('xmin')
xmin.appendChild(doc.createTextNode(str(bbox[0])))
bndbox.appendChild(xmin)
ymin = doc.createElement('ymin')
ymin.appendChild(doc.createTextNode(str(bbox[1])))
bndbox.appendChild(ymin)
xmax = doc.createElement('xmax')
xmax.appendChild(doc.createTextNode(str(bbox[0]+bbox[2])))
bndbox.appendChild(xmax)
ymax = doc.createElement('ymax')
ymax.appendChild(doc.createTextNode(str(bbox[1]+bbox[3])))
bndbox.appendChild(ymax)
f=open(xmlpath,"w")
f.write(doc.toprettyxml(indent = ''))
f.close()
#cv2.imshow("img",showimg)
#cv2.waitKey()
index=index+1
def generatetxt(img_set="train"):
gtfilepath=rootdir+"/wider_face_split/wider_face_"+img_set+"_bbx_gt.txt"
f=open(rootdir+"/"+img_set+".txt","w")
with open(gtfilepath,'r') as gtfile:
while(True ):#and len(faces)<10
filename=gtfile.readline()[:-1]
if(filename==""):
break;
filename=filename.replace("/","_")
imgfilepath=datasetprefix+"/images/"+filename
f.write(imgfilepath+'\n')
numbbox=int(gtfile.readline())
for i in range(numbbox):
line=gtfile.readline()
f.close()
def generatevocsets(img_set="train"):
if not os.path.exists(rootdir+"/ImageSets"):
os.mkdir(rootdir+"/ImageSets")
if not os.path.exists(rootdir+"/ImageSets/Main"):
os.mkdir(rootdir+"/ImageSets/Main")
gtfilepath=rootdir+"/wider_face_split/wider_face_"+img_set+"_bbx_gt.txt"
f=open(rootdir+"/ImageSets/Main/"+img_set+".txt",'w')
with open(gtfilepath,'r') as gtfile:
while(True ):#and len(faces)<10
filename=gtfile.readline()[:-1]
if(filename==""):
break;
filename=filename.replace("/","_")
imgfilepath=filename[:-4]
f.write(imgfilepath+'\n')
numbbox=int(gtfile.readline())
for i in range(numbbox):
line=gtfile.readline()
f.close()
def convertdataset():
img_sets=["train","val"]
for img_set in img_sets:
convertimgset(img_set)
generatetxt(img_set)
generatevocsets(img_set)
if __name__=="__main__":
convertdataset()
shutil.move(rootdir+"/"+"train.txt",rootdir+"/"+"trainval.txt")
shutil.move(rootdir+"/"+"val.txt",rootdir+"/"+"test.txt")
shutil.move(rootdir+"/ImageSets/Main/"+"train.txt",rootdir+"/ImageSets/Main/"+"trainval.txt")
shutil.move(rootdir+"/ImageSets/Main/"+"val.txt",rootdir+"/ImageSets/Main/"+"test.txt")
| 2.359375 | 2 |
WORC/addexceptions.py | MStarmans91/WORC | 47 | 12770397 | #!/usr/bin/env python
# Copyright 2016-2021 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains all WORC-related Exceptions
"""
# pylint: disable=too-many-ancestors
# Because fo inheriting from FastrError and a common exception causes this
# exception, even though this behaviour is desired
class WORCError(Exception):
"""
This is the base class for all WORC related exceptions. Catching this
class of exceptions should ensure a proper execution of WORC.
"""
pass
class WORCNotImplementedError(WORCError, NotImplementedError):
"""
This function/method has not been implemented on purpose (e.g. should be
overwritten in a sub-class)
"""
pass
class WORCIOError(WORCError, IOError):
"""
IOError in WORC
"""
pass
class WORCTypeError(WORCError, TypeError):
"""
TypeError in the WORC system
"""
pass
class WORCValueError(WORCError, ValueError):
"""
ValueError in the WORC system
"""
pass
class WORCKeyError(WORCError, KeyError):
"""
KeyError in the WORC system
"""
pass
class WORCAssertionError(WORCError, AssertionError):
"""
AssertionError in the WORC system
"""
pass
class WORCIndexError(WORCError, IndexError):
"""
IndexError in the WORC system
"""
pass
| 2.34375 | 2 |
Python/PycharmProjects/aula 12/desafio 045.py | MarcelaSamili/Desafios-do-curso-de-Python | 0 | 12770398 | #Crie um programa que faça o computador jogar Jokenpô com você.
import random
from time import sleep
print('VAMOS <NAME>!')
print('''Coloque:
[1]PEDRA
[2]PAPEL
[3]TESOURA''')
op = input('Qual opção voce escolhe?')
lista = ['1','2','3']
pc = random.choice(lista)
sleep(1)
print('\033[36mJO!')
sleep(1)
print('\033[36mKEM!')
sleep(1)
print('\033[36mPO!!\033[m')
if op == pc or pc == op:
print('\033[34mEMPATE!\033[m, PC TAMBÉM ESCOLHEU {} '.format(pc))
elif op == '1' and pc == '2':
print('\033[33mPC GANHOU!\033[m, PC ESCOLHEU {} '.format(pc))
elif op == '2' and pc == '1':
print('\033[32mJOGADOR GANHOU!\033[m, PC ESCOLHEU {}'.format(pc))
elif op == '2' and pc == '3':
print('\033[33mPC GANHOU!\033[m, PC ESCOLHEU {}'.format(pc))
elif op == '3' and pc == '2':
print('\033[32mJOGADOR GANHOU!\033[m, PC ESCOLHEU {}'.format(pc))
elif op == '3' and pc == '1':
print('\033[33mPC GANHOU!\033[m, PC ESCOLHEU {}'.format(pc))
elif op == '1' and pc == '3':
print('\033[32mJOGANDOR GANHOU!\033[m, PC ESCOLHEU {}'.format(pc))
| 3.984375 | 4 |
MNIST_framework/data_gen/main.py | vanessadamario/data_efficiency | 0 | 12770399 | <reponame>vanessadamario/data_efficiency
# output_dimension = [28, 36, 40, 56, 80, 120, 160]
# scenario = [1, 2, 4]
import os
import numpy as np
import argparse
from os.path import join
from tensorflow.keras.datasets import mnist
from DATASET_GENERATOR import DatasetGenerator
root_path = '/om/user/vanessad/foveation'
parser = argparse.ArgumentParser()
parser.add_argument('--scenario', type=int, required=True)
parser.add_argument('--output_dimension', type=int, required=True)
parser.add_argument('--dataset_name', type=int, required=False)
FLAGS = parser.parse_args()
print(FLAGS.output_dimension)
print(FLAGS.scenario)
if FLAGS.dataset_name is None:
FLAGS.dataset_name = 'standardized_MNIST_dataset'
n_splits = 100
folder_dataset = join(root_path, FLAGS.dataset_name)
os.makedirs(folder_dataset, exist_ok=True)
folder_scenario = join(folder_dataset, 'exp_%i' % FLAGS.scenario)
os.makedirs(folder_scenario, exist_ok=True)
folder_dimension = join(folder_scenario, 'dim_%i' % FLAGS.output_dimension)
os.makedirs(folder_dimension)
folder_train = join(folder_dimension, 'train')
folder_test = join(folder_dimension, 'test')
os.makedirs(folder_train)
os.makedirs(folder_test)
if FLAGS.dataset_name == 'standardized_MNIST_dataset':
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0 # normalization
list_of_tr_splits = np.split(x_train, n_splits, axis=0)
list_of_ts_splits = np.split(x_test, n_splits, axis=0)
elif FLAGS.dataset_name == 'foveated_MNIST_dataset':
path_to_std_mnist = join(root_path, 'standardized_MNIST_dataset',
'exp_%i' % FLAGS.scenario,
'dim_%i' % FLAGS.output_dimension)
list_of_tr_splits = [np.load(join(path_to_std_mnist, 'train', ' split_%i.npy' % j)
for j in range(n_splits))]
list_of_ts_splits = [np.load(join(path_to_std_mnist, 'test', 'split_%i.npy' % j)
for j in range(n_splits))]
else:
raise ValueError("The required dataset still does not exists")
for kk, x_train_split in enumerate(list_of_tr_splits):
DG = DatasetGenerator(x_train_split,
output_dim=FLAGS.output_dimension,
scenario=FLAGS.scenario)
DG.run()
np.save(join(folder_train, 'split_%i.npy' % kk), DG.output)
for kk, x_test_split in enumerate(list_of_ts_splits):
DG = DatasetGenerator(x_test_split,
output_dim=FLAGS.output_dimension,
scenario=FLAGS.scenario)
DG.run()
np.save(join(folder_test, 'split_%i.npy' % kk), DG.output) | 2.609375 | 3 |
setup.py | thep0y/types-jieba | 0 | 12770400 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: thepoy
# @Email: <EMAIL>
# @File Name: setup.py
# @Created: 2022-03-31 09:43:35
# @Modified: 2022-03-31 18:45:33
import codecs
from setuptools import setup, find_packages
with codecs.open("README.md", "r", "utf-8") as fd:
setup(
name="types-jieba",
version="0.0.2",
description="""
jieba 类型库
""",
long_description_content_type="text/markdown",
long_description=fd.read(),
author="thepoy",
author_email="<EMAIL>",
url="https://github.com/thep0y/types-jieba",
license="MIT",
keywords="jieba 中文 分词 类型",
packages=["jieba-stubs"],
package_data={
"jieba-stubs": [
"__init__.pyi",
"_compat.pyi",
"analyse/__init__.pyi",
"analyse/textrank.pyi",
"analyse/tfidf.pyi",
"posseg/__init__.pyi",
"posseg/char_state_tab.pyi",
"posseg/prob_emit.pyi",
"posseg/prob_start.pyi",
"posseg/prob_trans.pyi",
"posseg/viterbi.pyi",
]
},
)
| 1.3125 | 1 |
.eggs/py2app-0.14-py3.6.egg/py2app/recipes/uuid.py | stfbnc/mtsa_py | 17 | 12770401 | def check(cmd, mf):
m = mf.findNode('uuid')
if m:
return dict(expected_missing_imports=set(['netbios', 'win32wnet']))
| 2.03125 | 2 |
10. Old Project/Projects/Sampling/wiseml/data_preparation/labeling.py | Collapse24/Work-projects | 0 | 12770402 | import pandas as pd
import numpy as np
def add_fthm_label(data: pd.DataFrame, target_column: str, thresholds: list, window_size: int):
"""
Fixed Time-Horizon model labeling
0 - keep
1 - up
2 - down
:param data: source pandas dataframe
:param target_column: name of price column at which data will be labeled
:param thresholds: 'keep' threshold, thresholds[0] - upper limit, thresholds[1] - lower limit
:param window_size: how many observations in the future we predict
:return: pandas dataframe with target label `target`
"""
data['price_target'] = data[target_column].shift(-window_size)
data.dropna(inplace=True)
data['target'] = 0
data.loc[data[target_column] + (data[target_column] * thresholds[0]) < data['price_target'], 'target'] = 1
data.loc[data[target_column] - (data[target_column] * thresholds[1]) > data['price_target'], 'target'] = 2
data.drop(['price_target'], axis=1, inplace=True)
return data
# Нужно объеденить с функцией выше или переделать (дублирование кода)
def add_fthm_bin_label(data: pd.DataFrame, target_column: str, threshold: float, window_size: int):
"""
Fixed Time-Horizon model binary labeling
0 - keep
1 - up
2 - down
:param data: source pandas dataframe
:param target_column: name of price column at which data will be labeled
:param threshold: 'keep' threshold
:param window_size: how many observations in the future we predict
:return: pandas dataframe with target label `target`
"""
data['price_target'] = data[target_column].shift(-window_size)
data.dropna(inplace=True)
data['target'] = 0
data.loc[data[target_column] + (data[target_column] * threshold) < data['price_target'], 'target'] = 1
data.loc[data[target_column] - (data[target_column] * threshold) > data['price_target'], 'target'] = 1
data.drop(['price_target'], axis=1, inplace=True)
return data
def add_tbm_label(data: pd.DataFrame, target_column: str, thresholds: list, window_size: int):
"""
Triple-Barrier Model labeling
0 - keep
1 - up
2 - down
:param data: source pandas data frame
:param target_column: name of price column at which data will be labeled
:param thresholds: 'keep' threshold, if > 1.0 then will calculate volatility threshold, thresholds[0] - upper limit, thresholds[1] - lower limit
:param window_size: how many observations in the future we predict
:return: pandas data frame with target label `target`
"""
upper_bound = np.inf
lower_bound = -np.inf
if thresholds[0] > 0.0:
upper_bound = thresholds[0]
if thresholds[1] > 0.0:
lower_bound = -thresholds[1]
data['target'] = 0
for i in range(window_size, 0, -1):
data['wt_pct'] = data[target_column].pct_change(periods=i).shift(-i)
data.loc[data['wt_pct'] > upper_bound, 'target'] = 1
data.loc[data['wt_pct'] < lower_bound, 'target'] = 2
data.drop(columns='wt_pct', axis=1, inplace=True)
data = data.iloc[:-window_size, :]
return data
# 1. Необходимо добавить расчет волатильности;
# 2. Добавить документацию.
def vol_horiz(data: pd.DataFrame, price_column='close', horizon=10):
data['price_target'] = data[price_column].shift(-horizon)
data.dropna(inplace=True)
data['target_1'] = 0
data.loc[((data['price_target'] / data[price_column] - 1) > data['volatility_wt']), 'target_1'] = 1
data.loc[((data['price_target'] / data[price_column] - 1) < -data['volatility_wt']), 'target_1'] = 2
return data
| 3.453125 | 3 |
networking_cisco/plugins/cisco/cfg_agent/cfg_exceptions.py | mail2nsrajesh/networking-cisco | 1 | 12770403 | # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Exceptions by Cisco Configuration Agent."""
from neutron_lib import exceptions
from networking_cisco._i18n import _
class DriverException(exceptions.NeutronException):
"""Exception created by the Driver class."""
class DriverExpectedKeyNotSetException(DriverException):
"""An attribute expected to be set by plugin is missing"""
message = (_("Value for expected key: %(key)s is missing."
"Driver cannot proceed"))
class InitializationException(DriverException):
"""Exception when initialization of Routing Driver object."""
message = (_("Critical device parameter missing. Failed initializing "
"routing driver object."))
class ConnectionException(DriverException):
"""Connection exception when connecting to IOS XE hosting device."""
message = (_("Failed connecting to Device. Reason: %(reason)s. "
"Connection params are User:%(user)s, Host:%(host)s, "
"Port:%(port)s, Device timeout:%(timeout)s."))
class CSR1kvConfigException(DriverException):
"""Configuration exception thrown when modifying the running config."""
message = (_("Error executing snippet:%(snippet)s. "
"Hosting device:%(dev_id)s Mgmt IP:%(ip)s "
"ErrorType:%(type)s ErrorTag:%(tag)s Config string:%("
"confstr)s."))
class CSR1kvMissingInterfaceException(DriverException):
"""Configuration exception thrown when modifying the running config."""
message = (_("Interface corresponding to port:%(id)s and mac-address:%("
"mac)s is missing in the CSR. Cannot proceed with interface"
"config."))
class CSR1kvUnknownValueException(DriverException):
"""CSR1kv Exception thrown when an unknown value is received."""
message = (_("Data in attribute: %(attribute)s does not correspond to "
"expected value. Value received is %(value)s. "))
class DriverNotExist(DriverException):
message = _("Driver %(driver)s does not exist.")
class DriverNotFound(DriverException):
message = _("Driver not found for %(resource)s id:%(id)s.")
class DriverNotSetForMissingParameter(DriverException):
message = _("Driver cannot be set for missing parameter:%(p)s.")
class HAParamsMissingException(DriverException):
"""MissingParams exception thrown when HA params are missing"""
message = (_("For router: %(r_id)s and port: %(p_id)s, HA_ENABLED is set, "
"but port ha info is missing. Port details: %(port)s"))
| 1.804688 | 2 |
synapse_antispam/mjolnir/antispam.py | grahamc/mjolnir | 153 | 12770404 | # -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from .list_rule import ALL_RULE_TYPES, RECOMMENDATION_BAN
from .ban_list import BanList
from synapse.types import UserID
logger = logging.getLogger("synapse.contrib." + __name__)
class AntiSpam(object):
def __init__(self, config, api):
self.block_invites = config.get("block_invites", True)
self.block_messages = config.get("block_messages", False)
self.block_usernames = config.get("block_usernames", False)
self.list_room_ids = config.get("ban_lists", [])
self.rooms_to_lists = {} # type: Dict[str, BanList]
self.api = api
# Now we build the ban lists so we can match them
self.build_lists()
def build_lists(self):
for room_id in self.list_room_ids:
self.build_list(room_id)
def build_list(self, room_id):
logger.info("Rebuilding ban list for %s" % (room_id))
self.get_list_for_room(room_id).build()
def get_list_for_room(self, room_id):
if room_id not in self.rooms_to_lists:
self.rooms_to_lists[room_id] = BanList(api=self.api, room_id=room_id)
return self.rooms_to_lists[room_id]
def is_user_banned(self, user_id):
for room_id in self.rooms_to_lists:
ban_list = self.rooms_to_lists[room_id]
for rule in ban_list.user_rules:
if rule.matches(user_id):
return rule.action == RECOMMENDATION_BAN
return False
def is_room_banned(self, invite_room_id):
for room_id in self.rooms_to_lists:
ban_list = self.rooms_to_lists[room_id]
for rule in ban_list.room_rules:
if rule.matches(invite_room_id):
return rule.action == RECOMMENDATION_BAN
return False
def is_server_banned(self, server_name):
for room_id in self.rooms_to_lists:
ban_list = self.rooms_to_lists[room_id]
for rule in ban_list.server_rules:
if rule.matches(server_name):
return rule.action == RECOMMENDATION_BAN
return False
# --- spam checker interface below here ---
def check_event_for_spam(self, event):
room_id = event.get("room_id", "")
event_type = event.get("type", "")
state_key = event.get("state_key", None)
# Rebuild the rules if there's an event for our ban lists
if state_key is not None and event_type in ALL_RULE_TYPES and room_id in self.list_room_ids:
logger.info("Received ban list event - updating list")
self.get_list_for_room(room_id).build(with_event=event)
return False # Ban list updates aren't spam
if not self.block_messages:
return False # not spam (we aren't blocking messages)
sender = UserID.from_string(event.get("sender", ""))
if self.is_user_banned(sender.to_string()):
return True
if self.is_server_banned(sender.domain):
return True
return False # not spam (as far as we're concerned)
def user_may_invite(self, inviter_user_id, invitee_user_id, room_id):
if not self.block_invites:
return True # allowed (we aren't blocking invites)
sender = UserID.from_string(inviter_user_id)
if self.is_user_banned(sender.to_string()):
return False
if self.is_room_banned(room_id):
return False
if self.is_server_banned(sender.domain):
return False
return True # allowed (as far as we're concerned)
def check_username_for_spam(self, user_profile):
if not self.block_usernames:
return True # allowed (we aren't blocking based on usernames)
# Check whether the user ID or display name matches any of the banned
# patterns.
return self.is_user_banned(user_profile["user_id"]) or self.is_user_banned(user_profile["display_name"])
def user_may_create_room(self, user_id):
return True # allowed
def user_may_create_room_alias(self, user_id, room_alias):
return True # allowed
def user_may_publish_room(self, user_id, room_id):
return True # allowed
@staticmethod
def parse_config(config):
return config # no parsing needed
| 2.15625 | 2 |
dl_tutorials/__init__.py | learnerzhang/AnalyticsVidhya | 1 | 12770405 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/12/21 2:30 PM
# @Author : zhangzhen
# @Site :
# @File : __init__.py
# @Software: PyCharm
import tensorflow as tf
from numpy.random import RandomState as rdm
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
class MLP:
def __init__(self, in_size=10, out_size=2, hiddens=[10], act_function=None) -> object:
self.x_dimension = in_size
self.y_dimension = out_size
self.build(in_size, out_size, hiddens=hiddens, act_function=act_function)
def build(self, in_size, out_size, hiddens=[], act_function=tf.nn.relu):
def add_layer(inputs: object, in_size: object, out_size: object, act_function: object = None) -> object:
W = tf.Variable(tf.random_normal([in_size, out_size]))
b = tf.Variable(tf.constant(0.1, shape=[out_size]))
Wx_plus_b = tf.matmul(inputs, W) + b
if act_function:
outputs = act_function(Wx_plus_b)
else:
outputs = Wx_plus_b
logging.info("tmp hidden layer out: {}".format(outputs))
return outputs
self.x = tf.placeholder(dtype=tf.float32, shape=(None, in_size), name='X-input')
self.y_ = tf.placeholder(dtype=tf.float32, shape=(None, out_size), name='y-input')
tmp_in_size = in_size
tmp_inputs = self.x
for hidden in hiddens:
tmp_outputs = add_layer(tmp_inputs, tmp_in_size, hidden, act_function=act_function)
tmp_in_size = hidden
tmp_inputs = tmp_outputs
self.y = add_layer(tmp_inputs, tmp_in_size, out_size, act_function=None)
logging.info("last out: {}".format(self.y))
self.cross_entropy = -tf.reduce_mean(self.y_ * tf.log(tf.clip_by_value(self.y, 1e-10, 1.0)))
self.step = tf.train.AdamOptimizer(0.001).minimize(self.cross_entropy)
logging.info("loss: {}".format(self.cross_entropy))
def train(self, steps=5000, batch_size=8):
X, Y = self.generate_data(size=128)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# logging.info()
for i in range(steps):
start = (i*batch_size) % self.dataset_size
end = min(start+batch_size, self.dataset_size)
sess.run(self.step, feed_dict={self.x: X[start: end], self.y_: Y[start: end]})
if i % 1000 == 0:
total_losses = sess.run(self.cross_entropy, feed_dict={self.x: X, self.y_: Y})
logging.info("After {} training steps, crosses entropy on all data is {}".format(i, total_losses))
def predict(self):
pass
def generate_data(self, size=128, rdm_seed=1):
r = rdm(rdm_seed)
self.dataset_size = size
X = r.rand(size, self.x_dimension)
Y = [[int(sum(xs) < self.x_dimension/2)] for xs in X]
return X, Y
if __name__ == '__main__':
mlp = MLP(in_size=2, out_size=1)
mlp.train()
| 2.546875 | 3 |
python/set_add.py | HannoFlohr/hackerrank | 0 | 12770406 | <reponame>HannoFlohr/hackerrank
s = set()
for _ in range(int(input())):
s.add(input())
print (len(s))
# one line solution
print ( len(set([str(input()) for _ in range(int(input()))])) )
# https://www.hackerrank.com/challenges/py-set-add/problem | 3.265625 | 3 |
remediation/aws/delete_s3_bucket/bot.py | peijianju-sonrai/sonrai-bots | 4 | 12770407 | import sonrai.platform.aws.arn
import logging
def run(ctx):
# Create AWS S3 client
s3_client = ctx.get_client().get('s3')
bucket_resource_srn = ctx.resource_srn
bucket_name = bucket_resource_srn.split('/')[-1]
# delete all objects from the bucket first
response = s3_client.list_objects(Bucket=bucket_name)
# there could be several iterations if a bucket contains large amount of objects
logging.info('Deleting objects from S3 bucket {}.'.format(bucket_name))
while response.get('Contents', None):
for item in response['Contents']:
s3_client.delete_object(Bucket=bucket_name, Key=item["Key"])
response = s3_client.list_objects(Bucket=bucket_name)
logging.info('Deleting S3 bucket {}.'.format(bucket_name))
s3_client.delete_bucket(Bucket=bucket_name)
| 2.390625 | 2 |
singleton/singleton.py | rlelito/DesignPatterns | 0 | 12770408 | from __future__ import annotations
from typing import Optional
class Singleton(object):
_instance: Optional[Singleton] = None
def __init__(self) -> None:
if Singleton._instance is not None:
raise Exception("Class is a Singleton")
else:
Singleton._instance = self
@staticmethod
def get_instance() -> None:
if Singleton._instance is None:
Singleton()
return Singleton._instance
@staticmethod
def message_1() -> None:
print("Message 1")
@staticmethod
def message_2() -> None:
print("Message 2")
| 3.375 | 3 |
metaprogramming/metaclass2.py | wapj/pyconkr2019 | 10 | 12770409 | <reponame>wapj/pyconkr2019<filename>metaprogramming/metaclass2.py
def make_class(name):
if name == "pdf":
class PdfFile:
pass
return PdfFile
else:
class TxtFile:
pass
return TxtFile
PdfFile = make_class("pdf")
print(PdfFile)
print(PdfFile())
<class '__main__.make_class.<locals>.PdfFile'>
<__main__.make_class.<locals>.PdfFile object at 0x105654240>
| 2.703125 | 3 |
tests/test_geofabrik.py | BLSQ/geohealthaccess | 8 | 12770410 | """Tests for Geofabrik module."""
import os
from datetime import datetime
from tempfile import TemporaryDirectory
import pytest
import vcr
from geohealthaccess.geofabrik import Geofabrik, Page, Region
BASEURL = "http://download.geofabrik.de/"
@vcr.use_cassette("tests/cassettes/geofabrik-index.yaml")
def test_page_parsing_index():
url = BASEURL + "index.html"
page = Page(url)
assert page.name == "OpenStreetMap Data Extracts"
assert len(page.continents) == 8
@vcr.use_cassette("tests/cassettes/geofabrik-africa.yaml")
def test_page_parsing_continent():
url = BASEURL + "africa.html"
page = Page(url)
assert page.name == "Africa"
assert len(page.raw_details) == 37
assert len(page.subregions) == 55
assert len(page.special_subregions) == 1
@vcr.use_cassette("tests/cassettes/geofabrik-kenya.yaml")
def test_page_parsing_country():
url = BASEURL + "africa/kenya.html"
page = Page(url)
assert page.name == "Kenya"
assert len(page.raw_details) == 73
@vcr.use_cassette("tests/cassettes/geofabrik-comores.yaml")
def test_region():
region = Region("/africa/comores")
assert region.id == "africa/comores"
assert region.level == 1
assert region.name == "Comores"
assert region.extent.is_valid
assert region.url == "http://download.geofabrik.de/africa/comores.html"
@vcr.use_cassette("tests/cassettes/geofabrik-comores.yaml")
def test_region_files():
region = Region("/africa/comores")
assert len(region.files) == 65
assert "/africa/comores-latest.osm.pbf" in region.files
@vcr.use_cassette("tests/cassettes/geofabrik-comores.yaml")
def test_region_datasets():
region = Region("africa/comores")
assert len(region.datasets) == 12
assert isinstance(region.datasets[0]["date"], datetime)
assert isinstance(region.datasets[0]["file"], str)
assert isinstance(region.datasets[0]["url"], str)
assert region.datasets[0]["url"].startswith("http://")
assert region.datasets[0]["file"].endswith(".osm.pbf")
@vcr.use_cassette("tests/cassettes/geofabrik-comores.yaml")
def test_region_latest():
region = Region("africa/comores")
assert region.latest.endswith(".osm.pbf")
@vcr.use_cassette("tests/cassettes/geofabrik-france.yaml")
def test_region_subregions():
region = Region("europe/france")
assert len(region.subregions) == 27
assert "/europe/france/alsace" in region.subregions
def test_geofabrik_sindex():
geofab = Geofabrik()
assert len(geofab.sindex) == 363
row = geofab.sindex.loc["africa"]
assert row.name == "africa"
assert row.geometry.is_valid
def test_geofabrik_search(senegal):
geofab = Geofabrik()
region_id, match = geofab.search(senegal)
assert region_id == "africa/senegal-and-gambia"
assert match == pytest.approx(0.62, rel=0.01)
@vcr.use_cassette("tests/cassettes/geofabrik-saotomeprincipe-download.yaml")
def test_geofabrik_download():
geofabrik = Geofabrik()
with TemporaryDirectory(prefix="geohealthaccess_") as tmpdir:
osmpbf = geofabrik.download("africa/sao-tome-and-principe", tmpdir)
mtime = os.path.getmtime(osmpbf)
assert os.path.isfile(osmpbf)
# should not download again (overwrite=False)
geofabrik.download("africa/sao-tome-and-principe", tmpdir, overwrite=False)
assert os.path.getmtime(osmpbf) == mtime
# should download again (overwrite=True)
geofabrik.download("africa/sao-tome-and-principe", tmpdir, overwrite=True)
assert os.path.getmtime(osmpbf) != mtime
| 2.59375 | 3 |
pyvib/utils/sample.py | pawsen/pyvib | 6 | 12770411 | # -*- coding: utf-8 -*-
"""pyvib sample data files"""
import socket
import os.path
import warnings
from shutil import move
from tempfile import TemporaryDirectory
from subprocess import check_call
from .config import get_and_create_sample_dir
__all__ = ['download_sample_data', 'get_sample_file']
# https://api.github.com/repos/pawsen/pyvib_data/contents/pyvib/data/nlbeam
# https://stackoverflow.com/a/18194523/1121523
#_github_downloader = 'https://minhaskamal.github.io/DownGit/#/home?url='
_base_urls = (
'https://api.github.com/repos/pawsen/pyvib_data/contents/pyvib/data/',
#'https://github.com/pawsen/pyvib_data/tree/master/pyvib/data/',
)
# files or folders to download
sample_files = {
"NLBEAM": "nlbeam",
"2DOF": "2dof",
"BOUCWEN": "boucwen",
"SILVERBOX": "silverbox",
}
def download_sample_data(show_progress=True):
"""
Download all sample data at once. This will overwrite any existing files.
Parameters
----------
show_progress: `bool`
Show a progress bar during download
Returns
-------
None
"""
for filename in sample_files.values():
get_sample_file(filename, url_list=_base_urls, overwrite=True)
def get_sample_file(filename, url_list=_base_urls, overwrite=False):
"""
Downloads a sample file. Will download a sample data file and move it to
the sample data directory. Also, uncompresses zip files if necessary.
Returns the local file if exists.
Parameters
----------
filename: `str`
Name of the file
url_list: `str` or `list`
urls where to look for the file
show_progress: `bool`
Show a progress bar during download
overwrite: `bool`
If True download and overwrite an existing file.
timeout: `float`
The timeout in seconds. If `None` the default timeout is used from
`astropy.utils.data.Conf.remote_timeout`.
Returns
-------
result: `str`
The local path of the file. None if it failed.
"""
# Creating the directory for sample files to be downloaded
sampledata_dir = get_and_create_sample_dir()
src = os.path.join(sampledata_dir, filename)
if not overwrite and os.path.isfile(src):
return src
else:
# check each provided url to find the file
for base_url in url_list:
try:
url = base_url + filename
with TemporaryDirectory() as d:
rc = check_call(['github-download.sh', url], cwd=d)
# move files to the data directory
move(d, src)
return src
except (socket.error, socket.timeout) as e:
warnings.warn("Download failed with error {}. \n"
"Retrying with different mirror.".format(e))
# if reach here then file has not been downloaded.
warnings.warn("File {} not found.".format(filename))
return None
| 2.65625 | 3 |
python/search/past_file_search.py | Ericizepic/discordfs | 0 | 12770412 | """Search for files purely in discord."""
from .async_search_client import AsyncSearchClient
import discord
from typing import List, Dict
from fuzzywuzzy import fuzz
from utils import attachment_to_search_dict
import datetime
class PastFileSearch(AsyncSearchClient):
"""Search for files in discord with just discord."""
def __init__(self, thresh: int = 85):
"""
Create a DiscordSearch object.
It's annoying to need bot_user but we do this to enable searching on files from other bots.
Args:
bot_user: The name of the bot user.
"""
self.banned_file_ids = set()
self.thresh = thresh
self.user = None
def initialize(self, bot_user: str, *args, **kwargs) -> bool:
"""
Initialize past file search.
Args:
bot_user: The bot username.
"""
self.user = bot_user
return True
def match(self, message: discord.Message, filename: str, **kwargs) -> List[discord.Attachment]:
"""
Match the message against possible arguments.
Args:
message: The message to test
kwargs: kwargs of args to match
Returns:
A list of discord.Attachments that match the query.
"""
if not message.attachments or message.author == self.user:
return []
if kwargs.get("content"):
if fuzz.partial_ratio(kwargs['content'].lower(), message.content.lower()) < self.thresh:
return []
if kwargs.get("after"):
if message.created_at < kwargs["after"]:
return []
if kwargs.get("before"):
if message.created_at > kwargs["before"]:
return []
if kwargs.get("author"):
if message.author != kwargs["author"]:
return []
if kwargs.get("channel"):
if message.channel != kwargs["channel"]:
return []
# print(message.author, self.user, message.attachments[0].filename)
res = filter(lambda atch: fuzz.partial_ratio(atch.filename.lower(),
filename.lower()) > self.thresh, message.attachments)
if kwargs.get("mimetype"):
return [attachment for attachment in res if attachment.content_type == kwargs["mimetype"]]
if kwargs.get("banned_ids"):
return [attachment for attachment in res if attachment.id not in kwargs["banned_ids"]]
return list(res)
async def search(self, filename: str, onii_chan, ctx_channel, *args, **kwargs) -> List[Dict]:
"""
Iterate through previous messages in a discord channel for files.
Args:
filename: The query
onii_chan: The channel to search in
kawrgs: Search paramaters
Returns:
A list of dicts of files.
"""
if self.user is None or not isinstance(filename, str):
return ""
files = []
onii_chan = ctx_channel
if kwargs.get('channel'):
onii_chan = kwargs['channel']
if kwargs.get('banned_ids'):
kwargs['banned_ids'].update(self.banned_file_ids)
else:
kwargs['banned_ids'] = self.banned_file_ids
matched_messages = onii_chan.history(limit=int(1e9), before=kwargs.get('before'), after=kwargs.get('after'))
async for message in matched_messages:
matched = self.match(message, filename, **kwargs)
files.extend([{**attachment_to_search_dict(message, atch), 'url': atch.url,
'jump_url': message.jump_url} for atch in matched])
return files
async def create_doc(self, *args, **kwargs):
"""We don't maintain search indices in this class, so this is not needed."""
return
async def clear(self, *args, **kwargs):
"""We don't maintain search indices in this class, so this is not needed."""
return
async def remove_doc(self, file_ids: list, *args, **kwargs):
"""Update banned ids with the file ids."""
self.banned_file_ids.add(tuple(file_ids))
return
| 3.3125 | 3 |
GeoLiberator/geoliberator.py | jakeee51/GeoLiberator | 4 | 12770413 | <reponame>jakeee51/GeoLiberator
# -*- coding: utf-8 -*-
'''
Author: <NAME>
Application Name: GeoLiberator
Functionality Purpose: Instill data quality upon address data
Version: Beta
'''
#8/6/20
import re
import sys
import time
import pandas as pd
#Account for post cardinal direction
#Account for '&' and 'STS' and multiple street types
#Create custom address formatter
#Options to return Street Type, Cardinal Direction
#Implement __main__.py for cli tools (use argparse)
#https://realpython.com/command-line-interfaces-python-argparse/
#Function to parse city
#Allow street types, wordTypes & Cities to be appended to library or option to use one's own library
reason = ["Invalid parse argument given", "File type not supported", "Use 'address_field' argument for csv & excel files"]
class AddressError(BaseException):
pass
class ArgumentError(Exception):
pass
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
class GeoLiberator:
'''
A class for instantiating governance & uniformity on full addresses
GeoLiberator class takes in entire address as first argument and returns the instantiated GeoLiberator Object.
One may manipulate object using member functions in order to parse certain properties of the address.
Second 'log' argument set to return value by default or write to new specified file name.
Third 'mode' argument set to False by default or True to print output results.
Sample Code:
from geoliberator import *
GL_Object = GeoLiberator("123 Sample Street, New York 12345")
GL_Object.getAddress(log="output_log.txt") #This code appends parsed address to a log file (useful in loops)
'''
def __init__(self, addr):
self.addr = str(addr)
self.states = {"Alabama": "AL", "Alaska": "AK", "Arizona": "AZ", "Arkansas": "AR", "California": "CA",
"Colorado": "CO", "Connecticut": "CT", "Delaware": "DE", "District of Columbia": "DC",
"Florida": "FL", "Georgia": "GA", "Hawaii": "HI", "Idaho": "ID", "Illinois": "IL",
"Indiana": "IN", "Iowa": "IA", "Kansas": "KS", "Kentucky": "KY", "Louisiana": "LA",
"Maine": "ME", "Maryland": "MD", "Massachusetts": "MA", "Michigan": "MI", "Minnesota": "MN",
"Mississippi": "MS", "Missouri": "MO", "Montana": "MT", "Nebraska": "NE", "Nevada": "NV",
"New Hampshire": "NH", "New Jersey": "NJ", "New Mexico": "NM", "New York": "NY",
"North Carolina": "NC", "North Dakota": "ND", "Ohio": "OH", "Oklahoma": "OK", "Oregon": "OR",
"Pennsylvania": "PA", "Rhode Island": "RI", "South Carolina": "SC", "South Dakota": "SD",
"Tennessee": "TN", "Texas": "TX", "Utah": "UT", "Vermont": "VT", "Virginia": "VA",
"Washington": "WA", "West Virginia": "WV", "Wisconsin": "WI", "Wyoming": "WY"}
self.streetTypes = {"HEIGHTS": ["HEIGHTS","HTS"],
"ROAD": ["ROAD","RD","RO"],
"AVENUE": ["AVENUE","AVEN","AVE","AV","AE"],
"DRIVE": ["DRIVE","DRIV","DR"],
"PLACE": ["PLACE","PLAC","PLCE","PLC","PL"],
"BOULEVARD": ["BOULEVARD","BLVD","BOUL","BLV","BO"],
"COURT": ["COURT","CRT","CT"],
"PARKWAY": ["PARKWAY","PKWAY","PKWY","PWAY","PWY","PKY"],
"HIGHWAY": ["HIGHWAY","HWAY","HWY"],
"EXPRESSWAY": ["EXPRESSWAY","EXPRESWY","EXPRESWAY","EXPREWAY","EXPWA","EXPWY","EXPY","EXWY","EWY","EXP"],
"PLAZA": ["PLAZA","PLAZ","PLZA","PLZ"],
"BRIDGE": ["BRIDGE","BRDG","BRG","BR"],
"CONCOURSE": ["CONCOURSE","CONC","CNCRS","CON","CO"],
"TERRACE": ["TERRACE","TERR","TER","TE"],
"CRESCENT": ["CRESCENT","CRESCNT","CRES"],
"ALLEY": ["ALLEY","ALY"], "GARDENS": ["GARDENS","GDNS"],
"PARK": ["PARK","PRK","PK"],
"HILL": ["HILL","HL"], "LANE": ["LANE","LN"],
"PROMENADE": ["PROMENADE","PROM"], "COURSE": ["COURSE","CRSE"],
"FREEWAY": ["FREEWAY","FWY"], "TURNPIKE": ["TURNPIKE","TPKE"],
"SQUARE": ["SQUARE","SQ"], "CIRCLE": ["CIRCLE","CIR"],
"CLOSE": ["CLOSE","CLOS"], "VILLAGE": ["VILLAGE","VLG"],
"RIDGE": ["RIDGE","RDG"], "COVE": ["COVE","CV"],
"TRAIL": ["TRAIL","TRL"], "GREEN": ["GREEN","GRN"], "CAMP": ["CAMP","CP"],
"STREET": ["STREET","STREE","STRE","STR","ST"],
"SLIP": ["SLIP"],"LOOP": ["LOOP"], "WAY": ["WAY"],"EST": ["EST"],"ROW": ["ROW"],"OVAL": ["OVAL"],"PATH": ["PATH"]}
self.wordTypes = ['ARCADIA', 'ATLANTIC', 'ATLANTIC COMMONS', 'BATH', 'BAYSIDE',
'BAYVIEW', 'BAYWAY', 'BCH RESERVATION', 'BOARDWALK',
'BOULEVARD', 'BOWERY', 'BRANT', 'BRIGHTON 1', 'BRIGHTON 2', 'BRIGHTON 3',
'BRIGHTON 4', 'BRIGHTON 7', 'BROADWAY ATRIUM', 'CENTRE MALL', 'CHESTER',
'CLINTON', 'CROSS BRONX EP SR', 'CROSS BRONX EP SR', 'CUMBERLAND', 'DEAUVILLE',
'DEVON', 'ESSEX', 'FLEET', 'FULTON', 'GOTHAM', 'GREENWAY', 'GREENWAY',
'GREENWICH MEWS', 'HAMILTON', 'HILLCREST', 'HUDSON', 'IRVING', 'JAMAICA', 'JONES',
'KILDARE', 'KINGSBOROUGH 2', 'KINGSBOROUGH 3', 'KINGSBOROUGH 4', 'KINGSBOROUGH 5',
'KINGSBOROUGH 6', 'KINGSBOROUGH 7', 'LAFAYETTE', 'LINCOLN', 'MARION', 'MONUMENT',
'ELLIOTT', 'OXFORD', 'NAVY', 'NEPTUNE', 'NEW ENGLAND THRUWAY', 'NEWPORT',
'NORTH RIVER PIERS', 'NORTHERN BL SR', 'OCEAN DRIVEWAY', 'OLIVE', 'PELHAM',
'PINEAPPLE', 'PLOUGHMANS BUSH', 'POMANDER', 'QUEENS MIDTOWN EP SR',
'QUEENS MIDTOWN EP SR', 'REGAL', 'ROOSEVELT', 'SEA BREEZE', 'STAGG', 'SUFFOLK',
'TEN EYCK', 'UTICA', 'WASHINGTON', 'WASHINGTON MEWS',
{"ESPLANADE": ["ESPLANADE","ESPL"], 'BROADWAY': ['BROADWAY','BRDWY','BDWY','BWAY','BWY']}]
hold = ''
for typ in self.streetTypes:
hold += re.sub(r"[\[\]' ]", '', str(self.streetTypes[typ])) + ','
self.streetTypesAll = list(hold.strip(',').split(','))
def __none_other(self, *args):
for val in args:
if val == "OTHER":
return False
return True
def _get_compass(self, direc):
if 'N' == direc or "NO" == direc:
return "NORTH"
elif 'S' == direc or "SO" == direc:
return "SOUTH"
elif 'E' == direc:
return "EAST"
elif 'W' == direc:
return "WEST"
else:
return False
def _ordinal_add(self, num):
nn = ''
if num == '11' or num == '12' or num == '13':
return num + 'th'
if re.search(r"(?<!1)1$", num):
nn = num + 'st'
elif num[-1] == '2':
nn = num + 'nd'
elif num[-1] == '3':
nn = num + 'rd'
else:
nn = num + 'th'
return nn
def _search_cycle(self, g, sF):
new_find = ''
if sF == False:
for stre in self.wordTypes: #Check for word/name street types
getStreet = re.search(fr"(?!\d)?(\W|^)({stre})(\W|$)", g)
if type(stre) == dict:
streA = '|'.join(stre["ESPLANADE"])
streB = '|'.join(stre["BROADWAY"])
if re.search(fr"(?!\d)?\W?({streA})(\W|$)", g):
new_find = "ESPLANADE"
break
if re.search(fr"(?!\d)?\W?({streB})(\W|$)", g):
new_find = "BROADWAY"
break
if getStreet:
new_find = getStreet.group(2)
break
for key, val in self.streetTypes.items():
if new_find != '':
break
sType = '|'.join(val)
getStreetPattern1 = re.search(fr"(?!\d)?(\W|^|\d)([NSEW]|NO|SO)(\.? ?\d+(ST)? ?|(\. ?| )([A-Z]+ )+)({sType})\.?(?=\W|$)", g)
getStreetPattern2 = re.search(fr"(?!\d)?( ?(NORTH |SOUTH |EAST |WEST )?[^\W]?\d+(ST)? ?|([A-Z]+ )+)({sType})\.?((?=\W)|$)", g)
getStreetPattern3 = re.search(r"(?!\d)?(AVENUE|AVEN\.?|AVE\.?|AV\.?|AE\.?) ([A-Z]|OF ([A-Z]+ )?[A-Z]+)(?=\W|$)", g)
if getStreetPattern1:
if getStreetPattern1.group(4) in self.streetTypes[key] or getStreetPattern1.group(7) in self.streetTypes[key]:
new_find = self._get_compass(getStreetPattern1.group(2)) + ' ' + getStreetPattern1.group(3).strip('. ') + f" {key}"
break
elif getStreetPattern2:
if getStreetPattern2.group(3) in self.streetTypes[key] or getStreetPattern2.group(5) in self.streetTypes[key]:
new_find = getStreetPattern2.group(1).strip(' ') + f" {key}"
break
elif getStreetPattern3: #IMPLEMENT CARDINAL DIRECTIONS
new_find = "AVENUE " + getStreetPattern3.group(2)
break
if sF == True and new_find != '':
cardir = re.search(r"(NORTH|SOUTH|EAST|WEST) ", str(new_find))
if cardir:
new_find = re.sub(fr"{cardir[1]} ", f"{cardir[1]} SAINT ", new_find)
else:
new_find = "SAINT " + str(new_find)
if new_find == '':
new_find = "OTHER"
return new_find
def get_zip(self, log=''):
get = (self.addr).upper(); full_zip = '' #Uppercase and create get zipcode to return
get = (re.sub(r"[\t!#$@%^*+=`~/]+| +", ' ', get)).strip(' ') #Strip any anomalies
if re.search(r"\b\d{5}\b", get):
full_zip = re.search(r"\b\d{5}\b", get).group()
else:
full_zip = "OTHER"
if log != '': #Write to new or specfied file
fileName = re.sub(r"\..+", '', log)
if fileName.isdigit() or re.search(r'[\/:*?"<>|]', fileName):
fileName = "newly_parsed_zipcodes"
nf = open(f"{fileName}.txt", 'a')
nf.write(full_zip + '\n')
nf.close()
return full_zip
def get_state(self, log=''):
get = (self.addr).upper(); full_state = '' #Uppercase and find full state to return
get = (re.sub(r"[\t!#$@%^*+=`~/]+| +", ' ', get)).strip(' ') #Strip any anomalies
for key, val in self.states.items():
if re.search(fr"\b{val}\b", get):
full_state = key
if full_state == '':
full_state = "OTHER"
if log != '': #Write to new or specfied file
fileName = re.sub(r"\..+", '', log)
if fileName.isdigit() or re.search(r'[\/:*?"<>|]', fileName):
fileName = "newly_parsed_states"
nf = open(f"{fileName}.txt", 'a')
nf.write(full_state + '\n')
nf.close()
return str(full_state).upper()
def getAddressNum(self, log=''):
get = (self.addr).upper(); new_addr_num = '' #Uppercase and create get house number to return
if not get[0].isdigit():
for key, val in self.states.items():
if re.search(fr"^{val}\b", get):
get = re.sub(fr"^{val}(\W|$)", ' ', get)
get = (re.sub(r"[\t!#$@%^*+=`~/]+| +", ' ', get)).strip(' ') #Strip any anomalies
get = re.sub(r"(?<=2)(ND)|(?<=[4-9]|0)(TH|RTH)", '', get) #Strip any char of ordinal numbers
get = re.sub(r"(?<=[^1]3)(RD)", '', get); get = re.sub(r"(?<=11)(TH)", '', get)
else:
get = (re.sub(r"[\t!#$@%^*+=`~/]+| +", ' ', get)).strip(' ') #Strip any anomalies
get = re.sub(r"(?<=2)(ND)|(?<=[4-9]|0)(TH|RTH)", '', get) #Strip any char of ordinal numbers
get = re.sub(r"(?<=[^1]3)(RD)", '', get); get = re.sub(r"(?<=11)(TH)", '', get)
for val in self.wordTypes: #Word Street Names
if type(val) == dict:
streA = '|'.join(val["ESPLANADE"])
streB = '|'.join(val["BROADWAY"])
grab = re.search(fr"(?!\d)?\W?({streA}|{streB})(\W|$)", get); wType = ''
if grab:
wType = grab.group(1)
group1 = fr"(^\d+([- ]\d+)?)(?= ?[NSEW][. ] ?({wType})\.?(\W|$))"
group2 = fr"(^\d+([- ]\d+)?)(?= (NORTH |SOUTH |EAST |WEST )?({val})\.?(\W|$))"
group3 = r"(?=\d+([- ]\d+)? (AVENUE|AVEN\.?|AVE\.?|AV\.?|AE\.?) ([A-Z]|OF ([A-Z]+ )?[A-Z]+)(?=\W|$))^\d+([- ]\d+)?"
gANpat1 = re.search(fr"{group1}|{group2}", get)
gANpat2 = re.search(fr"{group3}", get)
if gANpat1:
new_addr_num = gANpat1.group().replace(' ', '-')
elif gANpat2:
new_addr_num = gANpat2.group().replace(' ', '-')
else:
group1 = fr"(^\d+([- ]\d+)?)(?= ?[NSEW][. ] ?({val})\.?(\W|$))"
group2 = fr"(^\d+([- ]\d+)?)(?= (NORTH |SOUTH |EAST |WEST )?({val})\.?(\W|$))"
group3 = r"(?=\d+([- ]\d+)? (AVENUE|AVEN\.?|AVE\.?|AV\.?|AE\.?) ([A-Z]|OF ([A-Z]+ )?[A-Z]+)(?=\W|$))^\d+([- ]\d+)?"
gANpat1 = re.search(fr"{group1}|{group2}", get)
gANpat2 = re.search(fr"{group3}", get)
if gANpat1:
new_addr_num = gANpat1.group().replace(' ', '-')
elif gANpat2:
new_addr_num = gANpat2.group().replace(' ', '-')
for key, val in self.streetTypes.items(): #Regular Street Names
sType = '|'.join(val)
group = fr"(^\d+([- ]\d+)?)(?= ?[NSEW][. ]([A-Z]+ )+({sType})\.?(\W|$))"
group1 = fr"(^\d+([- ]\d+)?)(?= ?[NSEW][. ]? ?\d+ ?({sType})\.?(\W|$))"
group2 = fr"(^\d+([- ]\d+)?)(?=( ?(NORTH|SOUTH|EAST|WEST)? )((\w+\.? ?)+)({sType})\.?(\W|$))"
group3 = r"(?=\d+([- ]\d+)? (AVENUE|AVEN\.?|AVE\.?|AV\.?|AE\.?) ([A-Z]|OF ([A-Z]+ )?[A-Z]+)(?=\W|$))^\d+([- ]\d+)?"
gANpat1 = re.search(fr"{group}|{group1}|{group2}", get)
gANpat2 = re.search(group3, get)
if gANpat1:
new_addr_num = gANpat1.group().replace(' ', '-')
elif gANpat2:
new_addr_num = gANpat2.group().replace(' ', '-')
if new_addr_num == '':
new_addr_num = "OTHER"
if log != '': #Write to new or specfied file
fileName = re.sub(r"\..+", '', log)
if fileName.isdigit() or re.search(r'[\/:*?"<>|]', fileName):
fileName = "newly_parsed_address_numbers"
nf = open(f"{fileName}.txt", 'a')
nf.write(new_addr_num + '\n')
nf.close()
return str(new_addr_num)
def getStreet(self, log=''):
get = (self.addr).upper(); new_street = ''; saintFlag = False #Uppercase and get street name to return
if '/' in get:
for key, val in self.states.items():
if re.search(fr"\b{val}\b", get):
get = re.sub(fr"(^|\W){val}(\W|$)", ' ', get)
get = (re.sub(r"[\t!#$@%^*+=`~/]+| +", ' ', get)).strip(' ') #Strip any anomalies
get = re.sub(r"(?<=2)(ND)|(?<=[4-9]|0)(TH|RTH)", '', get) #Strip any char of ordinal numbers
get = re.sub(r"(?<=[^1]3)(RD)", '', get); get = re.sub(r"(?<=11)(TH)", '', get)
else:
get = (re.sub(r"[\t!#$@%^*+=`~/]+| +", ' ', get)).strip(' ') #Strip any anomalies
get = re.sub(r"(?<=2)(ND)|(?<=[4-9]|0)(TH|RTH)", '', get) #Strip any char of ordinal numbers
get = re.sub(r"(?<=[^1]3)(RD)", '', get); get = re.sub(r"(?<=11)(TH)", '', get)
if re.search(r"(\W|^)(ST|SNT)\W", get): #Check for 'Saint'
get1 = re.sub(r"(\W|^)(ST|SNT)\W", ' ', get)
saintFlag = True
new_street = self._search_cycle(get1, saintFlag)
if new_street == "OTHER":
saintFlag = False
new_street = self._search_cycle(get, saintFlag)
else:
new_street = self._search_cycle(get, saintFlag)
new_street = re.sub(r"^FT\W| FT\W", "FORT ", new_street) #Replace 'FT' with 'FORT'
new_street = re.sub(r"(?<=1)ST", '', new_street) #Strip 1st ordinal number
if re.search(r"\d+", new_street): #Apply ordinal numbers
ordNum = self._ordinal_add(str(re.search(r"\d+", new_street).group()))
new_street = re.sub(r"\d+", ordNum, new_street)
if log != '': #Write to new or specfied file
fileName = re.sub(r"\..+", '', log)
if fileName.isdigit() or re.search(r'[\/:*?"<>|]', fileName):
fileName = "newly_parsed_streets"
nf = open(f"{fileName}.txt", 'a')
nf.write(new_street + '\n')
nf.close()
return new_street
def getAddress(self, log=''):
get = (self.addr).upper(); new_addr = '' #Uppercase and create new address to return
gS = GeoLiberator(get).getStreet()
gAN = GeoLiberator(get).getAddressNum()
if self.__none_other(gS, gAN):
new_addr = gAN + ' ' + gS
else:
new_addr = "OTHER"
if log != '': #Write to new or specfied file
fileName = re.sub(r"\..+", '', log)
if fileName == '' or fileName.isdigit() or re.search(r'[\/:*?"<>|]', fileName):
fileName = "newly_parsed_addresses"
nf = open(f"{fileName}.txt", 'a')
nf.write(new_addr + '\n')
nf.close()
return new_addr
def full_address(self, log=''):
get = (self.addr).upper(); full_addr = '' #Uppercase and create 'full' address to return
get = (re.sub(r"[\t!#$@%^*+=`~/]+| +", ' ', get)).strip(' ') #Strip any anomalies
gA = GeoLiberator(get).getAddress()
gST = GeoLiberator(get).get_state()
gZP = GeoLiberator(get).get_zip()
if self.__none_other(gA, gST, gZP):
full_addr = gA + ', ' + gST + ' ' + gZP
else:
full_addr = "OTHER"
if log != '': #Write to new or specfied file
fileName = re.sub(r"\..+", '', log)
if fileName.isdigit() or re.search(r'[\/:*?"<>|]', fileName):
fileName = "newly_parsed_addresses"
nf = open(f"{fileName}.txt", 'a')
nf.write(full_addr + '\n')
nf.close()
return full_addr
#Count the lines in a file
def file_len(file_name):
with open(file_name) as f:
for i, L in enumerate(f):
pass
return i + 1
#Takes text file as input and parse argument to determine which address property to be standardized
def autoGeoLiberate(file_path: str, address_field='', parse="address", write=''):
mode = True; lines = ''
if not re.search(r"address|number|street", parse):
raise ArgumentError(reason[0])
if write != '':
mode = False
if address_field == '':
if re.search(r".xlsx?$", file_path) or re.search(r".csv$", file_path):
raise ArgumentError(reason[2])
with open(file_path) as f:
lines = f.readlines()
else:
if re.search(r".xlsx?$", file_path):
df = pd.read_excel(file_path, usecols=[str(address_field)])
elif re.search(r".csv$", file_path):
df = pd.read_csv(file_path, usecols=[str(address_field)])
else:
raise ArgumentError(reason[1])
lines = df[str(address_field)]
if len(sys.argv) == 2 and write != '':
if sys.argv[1] == '--status' or sys.argv[1] == "-S":
FL = file_len(file_path)
barIncr = int(FL * .025); barNum = 0; dashNum = 40; c = 0; lc = 0
print('|' + ('-' * 40) + '|' + " [0.00%]", end=''); sys.stdout.flush()
for line in lines:
perc = (lc/FL)
bar = '\r|' + ('█' * barNum) + ('-' * dashNum) + '|' + " [{:>.2%}]".format(perc)
print(bar, end=''); sys.stdout.flush()
c += 1; lc += 1
if c == barIncr:
if barNum < 39:
c = 0; barNum += 1; dashNum -= 1
print(bar, end=''); sys.stdout.flush()
elif lc == FL:
print('\r|' + ('█' * 40) + '|' + " [100%] "); sys.stdout.flush()
adr = GeoLiberator(str(line))
if parse.lower() == "address":
adr.getAddress(log=write)
elif parse.lower() == "number":
adr.getAddressNum(log=write)
elif parse.lower() == "street":
adr.getStreet(log=write)
elif parse.lower() == "state":
adr.get_state(log=write)
elif parse.lower() == "zipcode":
out = adr.get_zip(log=write)
elif parse.lower() == "full":
out = adr.full_address(log=write)
else:
if mode == False:
print("Running...")
for line in lines:
adr = GeoLiberator(str(line))
if parse.lower() == "address":
out = adr.getAddress(log=write)
elif parse.lower() == "number":
out = adr.getAddressNum(log=write)
elif parse.lower() == "street":
out = adr.getStreet(log=write)
elif parse.lower() == "state":
out = adr.get_state(log=write)
elif parse.lower() == "zipcode":
out = adr.get_zip(log=write)
elif parse.lower() == "full":
out = adr.full_address(log=write)
if mode == True:
print(out)
print("Done!")
#Takes address as input and parse argument to determine which address property to be standardized
def geoLiberate(addr: str, parse="address"):
adr = GeoLiberator(str(addr))
try:
if parse.lower() == "address":
out = adr.getAddress()
elif parse.lower() == "number":
out = adr.getAddressNum()
elif parse.lower() == "street":
out = adr.getStreet()
elif parse.lower() == "state":
out = adr.get_state()
elif parse.lower() == "zipcode":
out = adr.get_zip()
elif parse.lower() == "full":
out = adr.full_address()
print(out)
except (AttributeError, UnboundLocalError):
raise ArgumentError(reason[0])
#Returns standardized address based on parse argument
def parse_address(addr: str, parse="address") -> str:
adr = GeoLiberator(str(addr))
try:
if parse.lower() == "address":
out = adr.getAddress()
elif parse.lower() == "number":
out = adr.getAddressNum()
elif parse.lower() == "street":
out = adr.getStreet()
elif parse.lower() == "state":
out = adr.get_state()
elif parse.lower() == "zipcode":
out = adr.get_zip()
elif parse.lower() == "full":
out = adr.full_address()
return str(out)
except (AttributeError, UnboundLocalError):
raise ArgumentError(reason[0])
| 3.203125 | 3 |
elasticai/creator/vhdl/generator_functions_for_one_lstm_cell.py | es-ude/elastic-ai.creator | 1 | 12770414 | <reponame>es-ude/elastic-ai.creator
import random
from functools import partial
import numpy as np
import torch
from elasticai.creator.qat.layers import QLSTMCell
from elasticai.creator.vhdl.number_representations import (
FloatToSignedFixedPointConverter,
)
from elasticai.creator.vhdl.rom import Rom
def float_list_to_fixed_point(values: list[float], frac_bits: int) -> list[int]:
signed_fixed_point_converter = FloatToSignedFixedPointConverter(
bits_used_for_fraction=frac_bits, strict=False
)
return list(map(signed_fixed_point_converter, values))
def generate_rom_file(
file_path: str,
weights_or_bias_list: list[list[int]],
nbits: int,
name: str,
index: int,
) -> None:
"""
generates the rom files for the weights and bias
Args:
file_path (str): paths where files should be stored
weights_or_bias_list (list[list[int]]): list with four lists with the fixed point values for each weight or bias
nbits (int): number of bits
name (str): name for the file
index (int): index where content is stored in weights_or_bias_list
"""
with open(file_path, "w") as writer:
rom = Rom(
rom_name=name + "_rom",
data_width=nbits,
values=list(weights_or_bias_list[index]),
resource_option="auto",
)
rom_code = rom()
for line in rom_code:
writer.write(line + "\n")
def inference_model(
lstm_signal_cell: QLSTMCell,
frac_bits: int,
input_size: int,
hidden_size: int,
) -> tuple[list[int], list[int], np.array]:
"""
do inference on defined QLSTM Cell
Args:
lstm_signal_cell (QLSTMCell): current QLSTM Cell
frac_bits (int): number of fraction bits
input_size (int): input size of QLSTM Cell
hidden_size (int): hidden size of QLSTM Cell
Returns:
returns three lists/arrays
the first and second list are the x_h input and cx of the lstm cell
the third array is the hx of the lstm cell
"""
torch.manual_seed(0)
random.seed(0)
input = torch.randn(2, 1, input_size) # (time_steps, batch, input_size)
hx = torch.randn(1, hidden_size) # (batch, hidden_size), this is the hidden states
cx = torch.randn(1, hidden_size) # this the cell states
for i in range(input.size()[0]):
x_h_input = np.hstack(
(input[i].detach().numpy().flatten(), hx.detach().numpy().flatten())
)
hx, cx = lstm_signal_cell(input[i], (hx, cx))
return (
float_list_to_fixed_point(
x_h_input,
frac_bits=frac_bits,
),
float_list_to_fixed_point(
cx.detach().numpy().flatten(),
frac_bits=frac_bits,
),
float_list_to_fixed_point(
hx.detach().numpy().flatten(), frac_bits=frac_bits
),
)
def define_weights_and_bias(
lstm_signal_cell: QLSTMCell,
frac_bits: int,
len_weights: int,
len_bias: int,
) -> tuple[list[list[int]], list[list[int]]]:
"""
calculates the weights and bias for the given QLSTM Cell
Args:
lstm_signal_cell (QLSTMCell): current QLSTM Cell
frac_bits (int): number of fraction bits
len_weights (int): (input_size + hidden_size) * hidden_size
len_bias (int): hidden_size
Returns:
returns two lists, one for the weights and one for the bias
in each list are four list of strings with the hex numbers of the weights or bias
"""
for name, param in lstm_signal_cell.named_parameters():
if name == "weight_ih":
weight_ih = param.detach().numpy()
elif name == "weight_hh":
weight_hh = param.detach().numpy()
elif name == "bias_ih":
bias_ih = param.detach().numpy()
elif name == "bias_hh":
bias_hh = param.detach().numpy()
weights = np.hstack((weight_ih, weight_hh)).flatten().flatten()
bias = bias_hh + bias_ih
wi = weights[len_weights * 0 : len_weights * 1] # [Wii, Whi]
wf = weights[len_weights * 1 : len_weights * 2] # [Wif, Whf]
wg = weights[len_weights * 2 : len_weights * 3] # [Wig, Whg]
wo = weights[len_weights * 3 : len_weights * 4] # [Wio, Who]
bi = bias[len_bias * 0 : len_bias * 1] # B_ii+B_hi
bf = bias[len_bias * 1 : len_bias * 2] # B_if+B_hf
bg = bias[len_bias * 2 : len_bias * 3] # B_ig+B_hg
bo = bias[len_bias * 3 : len_bias * 4] # B_io+B_ho
to_fixed_point = partial(float_list_to_fixed_point, frac_bits=frac_bits)
fixed_point_weights = list(map(to_fixed_point, [wi, wf, wg, wo]))
fixed_point_bias = list(map(to_fixed_point, [bi, bf, bg, bo]))
return fixed_point_weights, fixed_point_bias
| 2.171875 | 2 |
jobs/send_zipfile_to_teacher.py | jin-hao-chen/filegoback | 0 | 12770415 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
PROJ_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, PROJ_DIR)
import pymysql
import datetime
import fire
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.schedulers.background import BackgroundScheduler
from libs import email_sender
from apps import utils
sender = email_sender.EmailSender(sender='jin_hao_chen',
password='<PASSWORD>',
receiver='<EMAIL>')
connection = pymysql.connect(host='localhost',
port=3306,
user='root',
password='<PASSWORD>',
db='file_go',
charset='utf8mb4')
scheduler = BlockingScheduler()
def send_zipfile_to_teacher():
print('Running send_zipfile_to_teacher')
cursor = connection.cursor()
sql = """
SELECT name FROM category WHERE is_current = 1
"""
cursor.execute(sql)
category = cursor.fetchone()[0]
cursor.close()
email_sender.zip_dir('./uploads/' + category, './sends/' + category + '.zip')
msg = sender.make_msg_with_zipfile('计科1701算法导论作业: ' + category,
utils.get_datetime(),
'计科1701算法导论作业: ' + category,
'./sends/', category + '.zip')
sender.send(msg)
print('DONE')
scheduler.remove_job('send_zipfile_to_teacher')
def run(year, month, day, hour, minute):
target = datetime.datetime(year, month, day, hour, minute)
scheduler.add_job(func=send_zipfile_to_teacher,
next_run_time=target,
id='send_zipfile_to_teacher')
scheduler.start()
def main():
fire.Fire()
if __name__ == '__main__':egg
main() | 2.5 | 2 |
7 term/Local-Computer-Networks-System-Software/Lab 1/server_package/commands/echo_command.py | Vanya112/BSUIR_Labs | 24 | 12770416 | <reponame>Vanya112/BSUIR_Labs
from server_package.commands.command import Command
from server_package.client_descriptor import ClientDescriptor
from shared.utils.message import compose_message
class EchoCommand(Command):
_data: str
_client: ClientDescriptor
def __init__(self, configuration: dict, client: ClientDescriptor):
self._data = configuration['payload']
self._client = client
def execute(self):
data = {'payload': self._data}
print(f'Echoed data from client: {self._data}')
message = compose_message(data)
self._client.connection.sendall(message)
| 2.6875 | 3 |
tests/utils/test_task_handler_with_custom_formatter.py | npodewitz/airflow | 8,092 | 12770417 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import pytest
from airflow.config_templates.airflow_local_settings import DEFAULT_LOGGING_CONFIG
from airflow.models import DAG, TaskInstance
from airflow.operators.empty import EmptyOperator
from airflow.utils.log.logging_mixin import set_context
from airflow.utils.state import DagRunState
from airflow.utils.timezone import datetime
from airflow.utils.types import DagRunType
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_runs
DEFAULT_DATE = datetime(2019, 1, 1)
TASK_HANDLER = 'task'
TASK_HANDLER_CLASS = 'airflow.utils.log.task_handler_with_custom_formatter.TaskHandlerWithCustomFormatter'
PREV_TASK_HANDLER = DEFAULT_LOGGING_CONFIG['handlers']['task']
DAG_ID = "task_handler_with_custom_formatter_dag"
TASK_ID = "task_handler_with_custom_formatter_task"
@pytest.fixture(scope="module", autouse=True)
def custom_task_log_handler_config():
DEFAULT_LOGGING_CONFIG['handlers']['task'] = {
'class': TASK_HANDLER_CLASS,
'formatter': 'airflow',
'stream': 'sys.stdout',
}
logging.config.dictConfig(DEFAULT_LOGGING_CONFIG)
logging.root.disabled = False
yield
DEFAULT_LOGGING_CONFIG['handlers']['task'] = PREV_TASK_HANDLER
logging.config.dictConfig(DEFAULT_LOGGING_CONFIG)
@pytest.fixture()
def task_instance():
dag = DAG(DAG_ID, start_date=DEFAULT_DATE)
task = EmptyOperator(task_id=TASK_ID, dag=dag)
dagrun = dag.create_dagrun(DagRunState.RUNNING, execution_date=DEFAULT_DATE, run_type=DagRunType.MANUAL)
ti = TaskInstance(task=task, run_id=dagrun.run_id)
ti.log.disabled = False
yield ti
clear_db_runs()
def assert_prefix(task_instance: TaskInstance, prefix: str) -> None:
handler = next((h for h in task_instance.log.handlers if h.name == TASK_HANDLER), None)
assert handler is not None, "custom task log handler not set up correctly"
assert handler.formatter is not None, "custom task log formatter not set up correctly"
expected_format = f"{prefix}:{handler.formatter._fmt}"
set_context(task_instance.log, task_instance)
assert expected_format == handler.formatter._fmt
def test_custom_formatter_default_format(task_instance):
"""The default format provides no prefix."""
assert_prefix(task_instance, "")
@conf_vars({("logging", "task_log_prefix_template"): "{{ti.dag_id }}-{{ ti.task_id }}"})
def test_custom_formatter_custom_format_not_affected_by_config(task_instance):
assert_prefix(task_instance, f"{DAG_ID}-{TASK_ID}")
| 1.84375 | 2 |
sort/radixSort.py | heqin-zhu/algorithm | 2 | 12770418 | ''' mbinary
#########################################################################
# File : radixSort.py
# Author: mbinary
# Mail: <EMAIL>
# Blog: https://mbinary.xyz
# Github: https://github.com/mbinary
# Created Time: 2018-07-06 15:52
# Description:
#########################################################################
'''
from random import randint
from quickSort import quickSort
from time import time
def radixSort(lst, radix=10):
ls = [[] for i in range(radix)]
mx = max(lst)
weight = 1
while mx >= weight:
for i in lst:
ls[(i // weight) % radix].append(i)
weight *= radix
lst = sum(ls, [])
ls = [[] for i in range(radix)]
return lst
def countSort(lst, mn, mx):
mark = [0]*(mx-mn+1)
for i in lst:
mark[i-mn] += 1
ret = []
for n, i in enumerate(mark):
ret += [n+mn]*i
return ret
def timer(funcs, span, num=1000000):
lst = [randint(0, span) for i in range(num)]
print('range({}), {} items'.format(span, num))
for func in funcs:
data = lst.copy()
t = time()
func(data)
t = time()-t
print('{}: {}s'.format(func.__name__, t))
if __name__ == '__main__':
timer([quickSort, radixSort, sorted], 1000000000000, 1000)
timer([quickSort, radixSort, sorted], 10000, 100000)
lst = [randint(0, 100) for i in range(1000)]
print(countSort(lst, 0, 100) == sorted(lst))
| 3.25 | 3 |
ibmcnx/config/WebSessionTO.py | stoeps13/ibmcnx2 | 11 | 12770419 | <reponame>stoeps13/ibmcnx2
'''
Set WebSession Timeout in all application servers
Author: <NAME>
Mail: <EMAIL>
Documentation: http://scripting101.stoeps.de
Version: 5.0.1
Date: 09/19/2015
License: Apache 2.0
'''
import ibmcnx.functions
def getAnswer(question):
answer = ''
while not answer.isnumeric():
answer = raw_input('\t' + question)
return answer
wasServers = []
wasServers = AdminTask.listServers(
'[-serverType APPLICATION_SERVER]').splitlines()
print '\n'
timeoutValue = getAnswer(
"Which value should be set as websession timeout (integer)? ")
print '\n'
for wasServer in wasServers:
tuningVM = AdminConfig.list('TuningParams', wasServer)
AdminConfig.modify(
tuningVM, '[[invalidationTimeout "' + timeoutValue + '"]]')
print "\tSession Timeout set for " + wasServer.split('|')[0].split('(')[0] + ":\t\t" + timeoutValue
print '\n'
ibmcnx.functions.saveChanges()
print '\n'
| 2.796875 | 3 |
main.py | mark-lvl/recommendation-engine | 1 | 12770420 | # import libraries
import pandas as pd
import os
import sys
from operator import itemgetter
from collections import defaultdict
class MovieRecommendation:
user_threshold: int
min_support: int
min_confidence: float
def __init__(self):
# users in train set
self.user_threshold = 200
# minimum support for movies
self.min_support = 50
# Defining a minimum for confidence level
self.min_confidence = 0.9
self.itemsets = {}
# Dataset placeholders
self.ratings_full = {}
self.ratings = {} # Minimise version of the whole dataset
self.movies = {}
self.users_movies = {}
self.significant_rules = {}
def load_data(self):
# Setting path
data_folder = os.path.join(os.path.curdir, "input")
rating_filename = os.path.join(data_folder, "ratings.dat")
movie_name_filename = os.path.join(data_folder, "movies.dat")
# Defining dateparser for reading date column
date_parser = lambda x: pd.to_datetime(x, unit='s')
# Reading the reviews file and defining the columns name
self.ratings_full = pd.read_csv(rating_filename,
delimiter="::",
header=None,
names=['UserID', 'MovieID', 'Rating', 'Datetime'],
parse_dates=['Datetime'],
date_parser=date_parser,
engine='python')
# Reading movies dat file and setting column names
self.movies = pd.read_csv(movie_name_filename,
delimiter="::",
header=None,
encoding="mac-roman",
engine='python')
self.movies.columns = ['MovieID', 'Title', 'Genres']
def data_prep(self, rating_threshold=3, user_threshold=200):
# Adding Favorable feature if user rated over 3
self.ratings_full['Favorable'] = self.ratings_full['Rating'] > rating_threshold
# Make a sample dataset to make our Apriori algorithm faster
self.ratings = self.ratings_full[self.ratings_full.UserID <= user_threshold]
# Filtering the dataset for only favorable movies
favorable_ratings = self.ratings[self.ratings['Favorable']]
# List of movies which each user considered as favorable
self.users_movies = dict((user_id, frozenset(movies)) # why frozenset, only cuz of speed in search
for user_id, movies in \
favorable_ratings.groupby("UserID")["MovieID"])
def create_initial_itemset(self):
# Frequency of each movie given a favorable review
movie_freq = self.ratings[['MovieID', 'Favorable']].groupby('MovieID').sum()
"""
The structure of itemsets will be as a dictionary with following format:
Structure:
{length_of_itemset: {(set_of_movies_list_in_current_itemset): frequency_of_itemset},}
Key: int
length of itemset
Value: dict
Key: frozenset
a frozenset of list of involving movies in current itemset
Value: int
how many times current combination of movies occurred in user ratings
Example:
for a itemset comprises of 3 movies
{(movie_1,movie_15,movie_495) : 59}
"""
# itemset_length=1 are a list of all movies which have rating more than min_support
self.itemsets[1] = dict((frozenset((movie_id,)), row["Favorable"])
for movie_id, row in movie_freq.iterrows()
if row["Favorable"] > self.min_support)
print("[length:itemsets]: ({}:{})".format(1, len(self.itemsets[1])))
sys.stdout.flush()
def create_freq_itemsets(self, superset_max_size=15):
print("Itemsets creation is in progress, be patient...\n")
sys.stdout.flush()
# Creating the first itemsets
self.create_initial_itemset()
# Creating further itemsets with size bigger than 2
for superset_length in range(2, superset_max_size + 1):
# Finding candidate itemsets in various lengths up to super_max_size based on preceding itemset
candidate_superset_freq = defaultdict(int)
for user_id, user_movies in self.users_movies.items():
for itemset in self.itemsets[superset_length - 1]:
# Check if itemset is a subset of user favorite movies
if itemset.issubset(user_movies):
# Construct superset with union of current itemset and each of another movies
# which user liked separately
for other_reviewed_movie in user_movies - itemset: # exclude current movies in itemset first
current_superset = itemset | frozenset((other_reviewed_movie,)) # union each remaining itemset
# increase the frequency of recent superset which just occurred
candidate_superset_freq[current_superset] += 1
# Checking for frequency of any recent built itemset (candidates) again minimum threshold
superset = dict([(candidate_superset, candidate_superset_frequency)
for candidate_superset, candidate_superset_frequency in candidate_superset_freq.items()
if candidate_superset_frequency >= self.min_support])
print("[length:itemsets]: ({}:{})".format(superset_length, len(superset)))
sys.stdout.flush()
if len(superset):
self.itemsets[superset_length] = superset
elif len(superset) == 0:
print("No further exploring.")
sys.stdout.flush()
break
# Itemsets in length 1 are not useful for recommending system so we can drop it
del self.itemsets[1]
print('\nItemsets total count: {0}'.format(sum(len(itemsets) for itemsets in self.itemsets.values())))
def extract_association_rules(self):
"""
In order to identifying association rules we have to iterate over all itemsets and within each itemset
pick each member and consider it as conclusion and all others as premises at a time.
"""
candidate_rules = []
for itemset_length, itemset_dict in self.itemsets.items():
for itemset in itemset_dict.keys():
# selecting each item in itemset and consider it as conclusion
for conclusion in itemset:
# making premise set by excluding conclusion
premise = itemset - set((conclusion,))
candidate_rules.append((premise, conclusion))
# Next, we compute the confidence of each of these rules.
valid_rule = defaultdict(int)
invalid_rule = defaultdict(int)
for user, fav_movies in self.users_movies.items():
for candidate_rule in candidate_rules:
premise, conclusion = candidate_rule
# If user liked all premise movies
if premise.issubset(fav_movies):
# If user liked conclusion movie too
if conclusion in fav_movies:
# Then rule should be considered as valid
valid_rule[candidate_rule] += 1
else:
invalid_rule[candidate_rule] += 1
# Calculating confidence level for candidate_rules
rules_confidence = {
candidate_rule: valid_rule[candidate_rule] / float(valid_rule[candidate_rule] +
invalid_rule[candidate_rule])
for candidate_rule in candidate_rules}
# Filter out the rules with poor confidence
self.significant_rules = {rule: confidence for rule, confidence in rules_confidence.items()
if confidence > self.min_confidence}
print("Among {} candidate rules only which {} of them are significant.".format(len(candidate_rules),
len(self.significant_rules)))
def get_movie_name(self, movie_id):
return self.movies.loc[self.movies["MovieID"] == movie_id, 'Title'].values[0]
def report_associations(self, rule_count=10):
# Sorting significant rules dictionary based on significant level
sorted_confidence = sorted(self.significant_rules.items(), key=itemgetter(1), reverse=True)
for index in range(rule_count):
(premise, conclusion) = sorted_confidence[index][0]
premise_names = "\n ".join(self.get_movie_name(movie_id=mov_id) for mov_id in premise)
conclusion_name = self.get_movie_name(movie_id=conclusion)
print("Rule rank #{0} (confidence {1:.3f}):".format(index + 1,
self.significant_rules[(premise, conclusion)]))
print("If a person recommends:\n {0} \nThey will also recommend: \n {1}".format(premise_names,
conclusion_name))
print("\n")
def evaluate_model(self, rule_count=10):
# Make a test dataset to evaluate model
test_df = self.ratings_full[self.ratings_full.UserID > self.user_threshold]
test_fav = test_df[test_df["Favorable"]]
test_users_movies = dict((test_user_id, frozenset(movies))
for test_user_id, movies in
test_fav.groupby("UserID")["MovieID"])
candidate_rules = []
for itemset_length, itemset_dict in self.itemsets.items():
for itemset in itemset_dict.keys():
for conclusion in itemset:
premise = itemset - set((conclusion,))
candidate_rules.append((premise, conclusion))
# Same evaluation as what we have done in extarcting association rules
valid_rule = defaultdict(int)
invalid_rule = defaultdict(int)
for user, fav_movies in test_users_movies.items():
for candidate_rule in candidate_rules:
premise, conclusion = candidate_rule
# If user liked all premise movies
if premise.issubset(fav_movies):
# If user liked conclusion movie too
if conclusion in fav_movies:
# Then rule should be considered as valid
valid_rule[candidate_rule] += 1
else:
invalid_rule[candidate_rule] += 1
test_confidence = {candidate_rule: valid_rule[candidate_rule] / float(
valid_rule[candidate_rule] + invalid_rule[candidate_rule])
for candidate_rule in candidate_rules}
sorted_confidence = sorted(self.significant_rules.items(), key=itemgetter(1), reverse=True)
for index in range(rule_count):
(premise, conclusion) = sorted_confidence[index][0]
premise_names = "\n ".join(self.get_movie_name(movie_id=mov_id) for mov_id in premise)
conclusion_name = self.get_movie_name(movie_id=conclusion)
print("Rule rank #{0} \n({1:.3f} confidence )\n({2:.3f} test confidence):".format(index + 1,
self.significant_rules[
(premise, conclusion)],
test_confidence.get(
(premise, conclusion),
-1)))
print("If a person recommends:\n {0} \nThey will also recommend: \n {1}".format(premise_names,
conclusion_name))
print("\n")
if __name__ == '__main__':
engine = MovieRecommendation()
# Load datasets
engine.load_data()
# Minimizing the dataset size
engine.data_prep(user_threshold=200)
# Constructing itemsets
engine.create_freq_itemsets(superset_max_size=15)
# Making association rules
engine.extract_association_rules()
# Printing reports of extracted rules
engine.report_associations()
# Evaluate model
engine.evaluate_model()
| 3.09375 | 3 |
limitedink/utils/utils_dataset.py | huashen218/LimitedInk | 3 | 12770421 | <reponame>huashen218/LimitedInk
import os
import json
import torch
import bisect
from tqdm import tqdm
import numpy as np
from itertools import chain
from copy import deepcopy as copy
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler, TensorDataset
from typing import Any, Callable, List, Tuple, Dict, Set, Union
from dataclasses import dataclass, asdict, is_dataclass
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
import logging
logger = logging.getLogger(__name__)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
"""Step1: Read Annotations"""
@dataclass(eq=True, frozen=True)
class Evidence:
"""
(docid, start_token, end_token) form the only official Evidence; sentence level annotations are for convenience.
Args:
text: Some representation of the evidence text
docid: Some identifier for the document
start_token: The canonical start token, inclusive
end_token: The canonical end token, exclusive
start_sentence: Best guess start sentence, inclusive
end_sentence: Best guess end sentence, exclusive
"""
text: Union[str, List[int]]
docid: str
start_token: int=-1
end_token: int=-1
start_sentence: int=-1
end_sentence: int=-1
@dataclass(eq=True, frozen=True)
class Annotation:
"""
Args:
annotation_id: unique ID for this annotation element
query: some representation of a query string
evidences: a set of "evidence groups".
Each evidence group is:
* sufficient to respond to the query (or justify an answer)
* composed of one or more Evidences
* may have multiple documents in it (depending on the dataset)
- e-snli has multiple documents
- other datasets do not
classification: str
query_type: Optional str, additional information about the query
docids: a set of docids in which one may find evidence.
"""
annotation_id: str
classification: str
evidences: Set[Tuple[Evidence]]
query: Union[str, List[int]]
query_type: str = None
docids: Set[str] = None
def all_evidences(self) -> Tuple[Evidence]:
return tuple(list(chain.from_iterable(self.evidences)))
def annotations_from_jsonl(fp: str) -> List[Annotation]:
ret = []
with open(fp, 'r') as inf:
for line in inf:
content = json.loads(line)
ev_groups = []
for ev_group in content['evidences']:
ev_group = tuple([Evidence(**ev) for ev in ev_group])
ev_groups.append(ev_group)
content['evidences'] = frozenset(ev_groups)
ret.append(Annotation(**content))
return ret
def load_jsonl(fp: str) -> List[dict]:
ret = []
with open(fp, 'r') as inf:
for line in inf:
content = json.loads(line)
ret.append(content)
return ret
def write_jsonl(jsonl, output_file):
with open(output_file, 'w') as of:
for js in jsonl:
as_str = json.dumps(js, sort_keys=True)
of.write(as_str)
of.write('\n')
"""Step2: Read Full Texts"""
def load_documents(data_dir: str, docids: Set[str]=None) -> Dict[str, List[List[str]]]:
"""Loads a subset of available documents from disk.
Each document is assumed to be serialized as newline ('\n') separated sentences.
Each sentence is assumed to be space (' ') joined tokens.
"""
if os.path.exists(os.path.join(data_dir, 'docs.jsonl')):
assert not os.path.exists(os.path.join(data_dir, 'docs'))
return load_documents_from_file(data_dir, docids)
docs_dir = os.path.join(data_dir, 'docs')
res = dict()
if docids is None:
docids = sorted(os.listdir(docs_dir))
else:
docids = sorted(set(str(d) for d in docids))
for d in docids:
with open(os.path.join(docs_dir, d), 'r') as inf:
lines = [l.strip() for l in inf.readlines()]
lines = list(filter(lambda x: bool(len(x)), lines))
tokenized = [list(filter(lambda x: bool(len(x)), line.strip().split(' '))) for line in lines]
res[d] = tokenized
return res
def load_documents_from_file(data_dir: str, docids: Set[str] = None) -> Dict[str, List[List[str]]]:
"""Loads a subset of available documents from 'docs.jsonl' file on disk.
Each document is assumed to be serialized as newline ('\n') separated sentences.
Each sentence is assumed to be space (' ') joined tokens.
"""
docs_file = os.path.join(data_dir, 'docs.jsonl')
documents = load_jsonl(docs_file)
documents = {doc['docid']: doc['document'] for doc in documents}
res = dict()
if docids is None:
docids = sorted(list(documents.keys()))
else:
docids = sorted(set(str(d) for d in docids))
for d in docids:
lines = documents[d].split('\n')
tokenized = [line.strip().split(' ') for line in lines]
res[d] = tokenized
return res
def load_flattened_documents(data_dir: str, docids: Set[str]) -> Dict[str, List[str]]:
"""Loads a subset of available documents from disk.
Returns a tokenized version of the document.
"""
unflattened_docs = load_documents(data_dir, docids)
flattened_docs = dict()
for doc, unflattened in unflattened_docs.items():
flattened_docs[doc] = list(chain.from_iterable(unflattened))
return flattened_docs
"""Step3: Generate Token-wise Examples"""
class Example(object):
def __init__(self, id, doc_id, query, query_type, doc_toks, sentences, evidence_spans, evidence_toks, label):
self.id = id
self.doc_id = doc_id
self.query = query
self.query_type = query_type
self.doc_toks = doc_toks
self.sentences = sentences
self.evidence_spans = evidence_spans
self.evidence_toks = evidence_toks
self.label = label
### Truncate for Sentences
def read_examples(configs, data_dir, annotated_dataset, documents, split):
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
no_annot = 0
no_evidence_in_window = 0
truncated_annotation_dataset = {}
for example_no, item in enumerate(annotated_dataset):
evidences = item.evidences
label = item.classification
evidence_list = []
start_tokens = []
end_tokens = []
document_list = []
for g in evidences:
# List made up of one or more Evidence objects
# Sometimes entire sentences in the document are turned on sometimes its just words in combined tokens
for e in g:
evidence_list.append(e)
start_tokens.append(e.start_token)
# seems like boolq end token is not inclusive
if "boolq" in configs['task']:
end_tokens.append(e.end_token + 1)
else:
end_tokens.append(e.end_token)
document_list.append(e.docid)
document_list = list(set(document_list))
if configs['task'] in ["beer", "imdb", "twitter", "sst"]:
document_list = [item.annotation_id]
# Add for SciFact ['4414547', '5956380'] docid
if "scifact" in configs['task']:
if len(document_list) > 1:
document_list_new = []
document_list_new.append(document_list[0])
document_list = document_list_new
assert len(document_list) < 2
if len(document_list) == 0:
no_annot += 1
document_list = item.docids
if not document_list:
document_list = [item.annotation_id]
doc = documents[document_list[0]]
tokens = [item for sublist in doc for item in sublist]
sentences = doc
# Unique evidence pairs
evidence_spans = list(set(zip(start_tokens, end_tokens)))
evidence_tokens = [] # List of lists
for span in evidence_spans:
evidence_tokens.append(tokens[span[0]:span[1]])
# Label
label = item.classification
query = item.query
annotation_id = item.annotation_id
query_type = item.query_type
# if evidence inference or boolq: do tfidf over of query over 20 sentence windows with stride 5
if configs['truncate']:
tfidf_vectorizer = TfidfVectorizer(use_idf=True)
all_docs = [query]
doc_spans = []
sent_spans = []
sentence_starts = [0] + np.cumsum([len(s) for s in sentences])[:-1].tolist()
for span_start in range(0, len(sentences) - configs['max_num_sentences'] + 1, 5):
sentences_in_span = sentences[span_start:span_start + configs['max_num_sentences']]
paragraph = [tok for sentence in sentences_in_span for tok in sentence]
doc_spans.append((sentence_starts[span_start], len(paragraph)))
sent_spans.append((span_start, span_start + configs['max_num_sentences']))
all_docs.append(" ".join(paragraph))
# tfidf vectorize
# put the last configs['max_num_sentences'] in another doc
last_paragraph = [tok for sentence in sentences[-configs['max_num_sentences']:] for tok in sentence]
if len(sentences) - configs['max_num_sentences'] > 0:
all_docs.append(" ".join(last_paragraph))
sent_spans.append((len(sentences) - configs['max_num_sentences'], len(sentences)))
doc_spans.append((sentence_starts[len(sentences) - configs['max_num_sentences']] ,len(last_paragraph)))
else:
# doc has fewer than max_num_sentences
all_docs.append(" ".join(last_paragraph))
sent_spans.append((0, len(sentences)))
doc_spans.append((sentence_starts[0], len(last_paragraph)))
tfidf_vecs = tfidf_vectorizer.fit_transform(all_docs)
cosine_similarities = linear_kernel(tfidf_vecs[0:1], tfidf_vecs).flatten()
# How often does the best window containing evidence (see what % of time some evidence s
best_window = np.argsort(cosine_similarities[1:])[::-1][0]
tokens = all_docs[best_window+1].split() # possible there is an error here since enough
sentences = sentences[sent_spans[best_window][0]:sent_spans[best_window][1]]
best_span = (doc_spans[best_window][0], doc_spans[best_window][0] + doc_spans[best_window][1])
evidence_in_window = False
new_evidence_spans = []
for ev_no, evidence_span in enumerate(evidence_spans):
if evidence_span[0] >= best_span[0] and evidence_span[1] <= best_span[1]:
evidence_in_window = True
new_evidence_spans.append((evidence_span[0] - best_span[0], evidence_span[1] - best_span[0]))
### Added for non-empty []
else:
new_evidence_spans.append((0, best_span[1]-best_span[0]))
## new evidence spans
evidence_spans = new_evidence_spans
truncated_annotation_dataset[(annotation_id, document_list[0])] = (best_span[0], best_span[1],
sent_spans[best_window][0],
sent_spans[best_window][1])
if evidence_in_window:
no_evidence_in_window += 1
examples.append(Example(
id = annotation_id,
doc_id = document_list[0],
query=query,
query_type = query_type,
doc_toks=tokens,
sentences = sentences,
evidence_spans=evidence_spans,
evidence_toks=evidence_tokens,
label=label
))
# The idea is to rewrite the jsonl so that when its loaded again used again,
# the annotations of rationales are for the selected BERT window
# if split is not "train":
if configs['truncate']:
json_data = load_jsonl(os.path.join(data_dir, split + '.jsonl'))
for ex_no, pt in enumerate(json_data):
key = pt['annotation_id'], pt['docids'][0]
new_window = truncated_annotation_dataset[key]
new_evidences = []
for ev_grp in pt['evidences']:
new_evidence_grp = []
for ev in ev_grp:
if ev['start_token'] >= new_window[0] and ev['end_token'] <= new_window[1]:
new_start_token = ev['start_token'] - new_window[0]
new_end_token = new_start_token + (ev['end_token'] - ev['start_token'])
new_evidence_grp.append({
"docid" : pt['docids'][0],
"start_token" : new_start_token,
"start_sentence" : ev['start_sentence'] - new_window[2],
"end_token": new_end_token,
"end_sentence" : ev['end_sentence'] - new_window[2],
"text" : ev['text']
})
### Added for non-empty
else:
new_start_token = 0
new_end_token = new_window[1]-new_window[0]
new_evidence_grp.append({
"docid" : pt['docids'][0],
"start_token" : new_start_token,
"start_sentence" : 0,
"end_token": new_end_token,
"end_sentence" : new_window[3] - new_window[2],
"text" : ev['text']
})
if len(new_evidence_grp) > 0:
new_evidences.append(new_evidence_grp)
# replace current data
json_data[ex_no]['evidences'] = new_evidences
return examples
### Truncate for Sentences
def read_examples_truncate_tokens(configs, data_dir, annotated_dataset, documents, split):
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
no_annot = 0
no_evidence_in_window = 0
truncated_annotation_dataset = {}
for example_no, item in enumerate(annotated_dataset):
evidences = item.evidences
label = item.classification
evidence_list = []
start_tokens = []
end_tokens = []
document_list = []
for g in evidences:
# List made up of one or more Evidence objects
# Sometimes entire sentences in the document are turned on sometimes its just words in combined tokens
for e in g:
evidence_list.append(e)
start_tokens.append(e.start_token)
# seems like boolq end token is not inclusive
if "boolq" in configs['task']:
end_tokens.append(e.end_token + 1)
else:
end_tokens.append(e.end_token)
document_list.append(e.docid)
document_list = list(set(document_list))
assert len(document_list) < 2
if len(document_list) == 0:
no_annot += 1
document_list = item.docids
if not document_list:
document_list = [item.annotation_id]
doc = documents[document_list[0]]
tokens = [item for sublist in doc for item in sublist]
sentences = doc
# Unique evidence pairs
evidence_spans = list(set(zip(start_tokens, end_tokens)))
evidence_tokens = [] # List of lists
for span in evidence_spans:
evidence_tokens.append(tokens[span[0]:span[1]])
# Label
label = item.classification
query = item.query
annotation_id = item.annotation_id
query_type = item.query_type
# if evidence inference or boolq: do tfidf over of query over 20 sentence windows with stride 5
if configs['truncate']:
tfidf_vectorizer = TfidfVectorizer(use_idf=True)
all_docs = [query]
doc_spans = []
sent_spans = []
sentence_starts = [0] + np.cumsum([len(s) for s in sentences])[:-1].tolist()
for span_start in range(0, len(sentences) - configs['max_num_sentences'] + 1, 5):
sentences_in_span = sentences[span_start:span_start + configs['max_num_sentences']]
paragraph = [tok for sentence in sentences_in_span for tok in sentence]
doc_spans.append((sentence_starts[span_start], len(paragraph)))
sent_spans.append((span_start, span_start + configs['max_num_sentences']))
all_docs.append(" ".join(paragraph))
# tfidf vectorize
# put the last configs['max_num_sentences'] in another doc
last_paragraph = [tok for sentence in sentences[-configs['max_num_sentences']:] for tok in sentence]
if len(sentences) - configs['max_num_sentences'] > 0:
all_docs.append(" ".join(last_paragraph))
sent_spans.append((len(sentences) - configs['max_num_sentences'], len(sentences)))
doc_spans.append((sentence_starts[len(sentences) - configs['max_num_sentences']] ,len(last_paragraph)))
else:
# doc has fewer than max_num_sentences
all_docs.append(" ".join(last_paragraph))
sent_spans.append((0, len(sentences)))
doc_spans.append((sentence_starts[0], len(last_paragraph)))
tfidf_vecs = tfidf_vectorizer.fit_transform(all_docs)
cosine_similarities = linear_kernel(tfidf_vecs[0:1], tfidf_vecs).flatten()
# How often does the best window containing evidence (see what % of time some evidence s
best_window = np.argsort(cosine_similarities[1:])[::-1][0]
tokens = all_docs[best_window+1].split() # possible there is an error here since enough
sentences = sentences[sent_spans[best_window][0]:sent_spans[best_window][1]]
best_span = (doc_spans[best_window][0], doc_spans[best_window][0] + doc_spans[best_window][1])
evidence_in_window = False
new_evidence_spans = []
for ev_no, evidence_span in enumerate(evidence_spans):
if evidence_span[0] >= best_span[0] and evidence_span[1] <= best_span[1]:
evidence_in_window = True
new_evidence_spans.append((evidence_span[0] - best_span[0], evidence_span[1] - best_span[0]))
### Added for non-empty []
else:
new_evidence_spans.append((0, best_span[1]-best_span[0]))
## new evidence spans
evidence_spans = new_evidence_spans
truncated_annotation_dataset[(annotation_id, document_list[0])] = (best_span[0], best_span[1],
sent_spans[best_window][0],
sent_spans[best_window][1])
if evidence_in_window:
no_evidence_in_window += 1
examples.append(Example(
id = annotation_id,
doc_id = document_list[0],
query=query,
query_type = query_type,
doc_toks=tokens,
sentences = sentences,
evidence_spans=evidence_spans,
evidence_toks=evidence_tokens,
label=label
))
# The idea is to rewrite the jsonl so that when its loaded again used again,
# the annotations of rationales are for the selected BERT window
# if split is not "train":
if configs['truncate']:
json_data = load_jsonl(os.path.join(data_dir, split + '.jsonl'))
for ex_no, pt in enumerate(json_data):
key = pt['annotation_id'], pt['docids'][0]
new_window = truncated_annotation_dataset[key]
new_evidences = []
for ev_grp in pt['evidences']:
new_evidence_grp = []
for ev in ev_grp:
if ev['start_token'] >= new_window[0] and ev['end_token'] <= new_window[1]:
new_start_token = ev['start_token'] - new_window[0]
new_end_token = new_start_token + (ev['end_token'] - ev['start_token'])
new_evidence_grp.append({
"docid" : pt['docids'][0],
"start_token" : new_start_token,
"start_sentence" : ev['start_sentence'] - new_window[2],
"end_token": new_end_token,
"end_sentence" : ev['end_sentence'] - new_window[2],
"text" : ev['text']
})
### Added for non-empty
else:
new_start_token = 0
new_end_token = new_window[1]-new_window[0]
new_evidence_grp.append({
"docid" : pt['docids'][0],
"start_token" : new_start_token,
"start_sentence" : 0,
"end_token": new_end_token,
"end_sentence" : new_window[3] - new_window[2],
"text" : ev['text']
})
if len(new_evidence_grp) > 0:
new_evidences.append(new_evidence_grp)
# replace current data
json_data[ex_no]['evidences'] = new_evidences
# write the new json data
save_dir = data_dir + "_truncated"
if not os.path.exists(save_dir):
os.makedirs(save_dir)
write_jsonl(json_data, os.path.join(save_dir, split + '.jsonl'))
return examples
"""Step4: Convert Examples to Features (Token)"""
class TokenFeature(object):
def __init__(self,
unique_id,
annotation_id,
doc_id,
doc_span_index,
tokens,
token_to_orig_map,
input_ids,
input_mask,
segment_ids,
cls_index,
p_mask,
paragraph_len,
class_label,
evidence_mask):
self.unique_id = unique_id
self.annotation_id = annotation_id
self.doc_id = doc_id
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.cls_index = cls_index
self.p_mask = p_mask
self.paragraph_len = paragraph_len
self.class_label = class_label
self.evidence_mask = evidence_mask,
def convert_examples_to_features(configs, examples, tokenizer,
max_seq_length, max_query_length,
cls_token_at_end=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=0, pad_token_segment_id=0,
mask_padding_with_zero=True):
features = []
token_length = []
evidence_token_length = []
if "roberta" in configs['model']: # Change the start and end token
cls_token = "<s>"
sep_token = "</s>"
evidence_classes = dict((y, x) for (x, y) in enumerate(configs['classes']))
unique_id = 1000000000
for (example_index, example) in enumerate(examples):
query_tokens = []
if example.query is not None:
query_tokens = tokenizer.tokenize(example.query)
query_tokens = query_tokens[:max_query_length-1]
if len(query_tokens) < max_query_length-1:
while len(query_tokens) < max_query_length-1:
query_tokens.append(pad_token)
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
example.doc_toks = [tok.strip() for tok in example.doc_toks]
for (i, token) in enumerate(example.doc_toks):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
if configs['task'] not in ["beer", "twitter", "sst"]: ### No human annotation
evidence_span_positions = []
for span in example.evidence_spans:
if "scifact" in configs['task']: # # Add for SciFact [out of index]
if span[0] > len(orig_to_tok_index)-1:
span = tuple([0, len(orig_to_tok_index)-1])
tok_start_position = orig_to_tok_index[span[0]]
if span[1] < len(example.doc_toks) - 1:
tok_end_position = orig_to_tok_index[span[1] + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
evidence_span_positions.append((tok_start_position, tok_end_position))
# Add dataset stats
token_length.append(len(all_doc_tokens))
evidence_token_length.append(tok_end_position-tok_start_position+1)
# Not breaking up doc_tokens for now since its just a classification,
# TODO: Introduce global normalization
doc_span_index = 0
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
tokens = []
segment_ids = []
p_mask = [] # p_mask (inverted) is turned off for [CLS]=0 and [Paragraph]=0 tokens only, others ([Query]=1, [SEP]=1)
token_to_orig_map = {}
# Human annotation evidence
evidence_mask = []
# CLS (Required for classification)
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = 0
evidence_mask.append(0)
# Query (if not Empty)
for token in query_tokens:
tokens.append(token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
evidence_mask.append(0)
# SEP token -> Paragraph
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
evidence_mask.append(0)
for i,tok in enumerate(all_doc_tokens):
token_to_orig_map[len(tokens)] = tok_to_orig_index[i]
tokens.append(all_doc_tokens[i])
segment_ids.append(sequence_b_segment_id)
p_mask.append(0)
evidence_mask.append(0)
paragraph_len = len(all_doc_tokens)
if configs['task'] not in ["beer", "twitter", "sst"]: # No human annotation evidence
evidence_length = 0
for span in evidence_span_positions:
### Add Query Span Here ###
for i in range(span[0]+max_query_length+1,span[1]+max_query_length+1+1):
evidence_mask[i] = 1
evidence_length += 1
# SEP token -> Pad / Truncate
tokens.append(sep_token)
segment_ids.append(sequence_b_segment_id)
p_mask.append(1)
evidence_mask.append(0)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(pad_token)
input_mask.append(0 if mask_padding_with_zero else 1)
segment_ids.append(pad_token_segment_id)
p_mask.append(1)
evidence_mask.append(0)
# Truncate
if len(input_ids) > max_seq_length:
input_ids = input_ids[:max_seq_length-1]
input_mask = input_mask[:max_seq_length-1]
segment_ids = segment_ids[:max_seq_length-1]
p_mask = segment_ids[:max_seq_length-1]
evidence_mask = evidence_mask[:max_seq_length-1]
# Add SEP token after this
input_ids.append(tokenizer._convert_token_to_id(sep_token))
input_mask.append(1)
segment_ids.append(sequence_b_segment_id)
p_mask.append(1)
evidence_mask.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(evidence_mask) == max_seq_length
label = evidence_classes[example.label] if configs['task'] not in ["beer"] else 0 # Beer is Regression Task
features.append(
TokenFeature(
annotation_id=example.id,
doc_id = example.doc_id,
unique_id=unique_id,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
cls_index=cls_index,
p_mask=p_mask,
paragraph_len=paragraph_len,
class_label=label,
evidence_mask=evidence_mask,
))
unique_id += 1
return features
"""Step4: Convert Examples to Features (Sentence)"""
class SentenceFeature(object):
def __init__(self,
unique_id,
annotation_id,
doc_id,
doc_span_index,
tokens,
sentences,
gold_sentences,
token_to_orig_map,
input_ids,
input_mask,
segment_ids,
cls_index,
p_mask,
sentence_starts,
sentence_ends,
sentence_mask,
paragraph_len,
class_label,
# evidence_mask):
evidence_mask=None):
self.unique_id = unique_id
self.annotation_id = annotation_id
self.doc_id = doc_id
self.doc_span_index = doc_span_index
self.tokens = tokens
self.sentences = sentences
self.gold_sentences =gold_sentences
self.token_to_orig_map = token_to_orig_map
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.cls_index = cls_index
self.p_mask = p_mask
self.sentence_starts = sentence_starts
self.sentence_ends = sentence_ends
self.sentence_mask = sentence_mask
self.paragraph_len = paragraph_len
self.class_label = class_label
self.evidence_mask = evidence_mask
def convert_examples_to_sentence_features(configs, examples, tokenizer, max_seq_length, max_query_length,
cls_token_at_end=False, cls_token='[CLS]', sep_token='[SEP]',
pad_token=0, sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=0, pad_token_segment_id=0, mask_padding_with_zero=True):
# Enumerating sentences (for multirc, fever and boolq that is already being done)
# F start and ends to sample from
# Issue is how to deconvolute and spread the results back
features = []
num_sentences = []
if "roberta" in configs['model']:
# Change the start and end token
cls_token = "<s>"
sep_token = "</s>"
evidence_classes = dict((y, x) for (x, y) in enumerate(configs['classes']))
unique_id = 1000000000
for (example_index, example) in enumerate(examples):
"""Each example is one instance in input. -> Start to Parse Query
"""
query_tokens = []
if example.query is not None:
query_tokens = tokenizer.tokenize(example.query)
query_tokens = query_tokens[:max_query_length-1]
if len(query_tokens) < max_query_length-1:
while len(query_tokens) < max_query_length-1:
query_tokens.append(pad_token)
""" Start to Parse Sentences and Tokens
"""
all_sentences = []
sentence_starts = []
sentence_ends = []
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
token_cnt = 0
for sent_no, sent in enumerate(example.sentences):
sent_tokens = [tok.strip() for tok in sent]
sent_tokenized = []
sentence_starts.append(len(all_doc_tokens))
for token in sent_tokens:
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
all_doc_tokens.append(sub_token)
tok_to_orig_index.append(token_cnt)
token_cnt += 1
sent_tokenized += sub_tokens
sentence_ends.append(len(all_doc_tokens))
# If ther are sentences with fewer than 5 tokens ignore them
# if len(sent_tokenized) < 5:
# continue
all_sentences.append(sent_tokenized)
all_sentences_untruncated = copy(all_sentences)
sentence_starts_untruncated = copy(sentence_starts)
sentence_ends_untruncated = copy(sentence_ends)
# truncate if all_doc_tokens + query + 2 > max_seq_len
if len(query_tokens) + len(all_doc_tokens) + 3 > max_seq_length:
sentence_break = bisect.bisect_left(sentence_starts, max_seq_length - len(query_tokens)- 2) - 1
surplus = (max_seq_length - len(query_tokens) - 3) - sentence_starts[sentence_break] #>=0
last_sentence = all_sentences[sentence_break][:surplus]
all_sentences = all_sentences[:sentence_break] + [last_sentence]
sentence_starts = sentence_starts[:sentence_break+1]
sentence_ends = sentence_ends[:sentence_break] + [sentence_starts[-1] + len(last_sentence)]
all_doc_tokens = all_doc_tokens[:sentence_ends[-1]]
# update tok_to_orig and vice versa??
sentence_mask = [1] * configs['max_num_sentences']
num_valid_sentences = configs['max_num_sentences']
if len(all_sentences) > configs['max_num_sentences']:
# Shave off requisite from sentence_ends, sentence_starts, all_sentences and all_doc_tokens
sentence_starts = sentence_starts[:configs['max_num_sentences']]
sentence_ends = sentence_ends[:configs['max_num_sentences']]
all_sentences = all_sentences[:configs['max_num_sentences']]
all_doc_tokens = all_doc_tokens[:sentence_ends[-1]]
num_valid_sentences = configs['max_num_sentences']
elif len(all_sentences) < configs['max_num_sentences']:
# sentence_starts, sentence_ends need to be expanded
# tokens can be added anymore
num_valid_sentences = len(all_sentences)
num_sentences_to_add = configs['max_num_sentences'] - len(all_sentences)
for sent_no in range(num_sentences_to_add):
# add position of the ending SEP token
sentence_starts.append(-1)
sentence_ends.append(-1)
all_sentences.append([])
sentence_mask[configs['max_num_sentences'] - sent_no - 1] = 0
# all_doc_tokens cant take more tokens and will be separately padded if required
# Construct the sequence of CLS Q SEP DOC SEP
for sent_num, sent in enumerate(all_sentences):
assert sent == all_doc_tokens[sentence_starts[sent_num]:sentence_ends[sent_num]]
doc_span_index = 0
# Fill up tokens from query and sentence tokens
tokens = []
segment_ids = []
p_mask = [] # p_mask (inverted) is turned off for CLS and Paragraph tokens only
token_to_orig_map = {}
# CLS (Required for classification)
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = 0
# Query tokens
for token in query_tokens:
tokens.append(token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
# SEP token
# The only difference between this setting and the setting for cognitive features
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
for i, tok in enumerate(all_doc_tokens):
token_to_orig_map[len(tokens)] = tok_to_orig_index[i]
tokens.append(all_doc_tokens[i])
segment_ids.append(sequence_b_segment_id)
p_mask.append(0)
# the only changes in all_doc_tokens is shortening it, so tok_to_orig_index is still valid upto len(all_doc_tokens)
paragraph_len = len(all_doc_tokens)
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_b_segment_id)
p_mask.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1 if input_ids[i] != 0 else 0 for i in range(len(input_ids))]
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(pad_token)
input_mask.append(0 if mask_padding_with_zero else 1)
segment_ids.append(pad_token_segment_id)
p_mask.append(1)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
# Evidence labels
evidence_sentences = [0]* configs['max_num_sentences']
gold_sentences = [0] * len(all_sentences_untruncated)
doc_offset = len(query_tokens) + 2
for span in example.evidence_spans:
tok_start_position = orig_to_tok_index[span[0]]
if span[1] < len(example.doc_toks) - 1:
tok_end_position = orig_to_tok_index[span[1] + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
range_start = bisect.bisect_left(sentence_starts[:num_valid_sentences], tok_start_position)
range_end = bisect.bisect_left(sentence_ends[:num_valid_sentences], tok_end_position)+1
if range_start < len(sentence_starts) and \
not tok_start_position == sentence_starts[range_start]:
range_start -= 1
sentence_membership = range(range_start, range_end)
tok_seq_start_position, tok_seq_end_position = tok_start_position + doc_offset, tok_end_position + doc_offset
for j in sentence_membership:
if j > len(evidence_sentences) - 1:
# The sentence that was found is beyond what fits in this bert window
continue
evidence_sentences[j] = 1
range_start = bisect.bisect_left(sentence_starts_untruncated, tok_start_position)
range_end = bisect.bisect_left(sentence_ends_untruncated, tok_end_position)+1
if range_start < len(sentence_starts_untruncated) and \
not tok_start_position == sentence_starts_untruncated[range_start]:
range_start -= 1
sentence_membership_untruncated = range(range_start, range_end)
for j in sentence_membership_untruncated:
if j > len(gold_sentences) - 1:
# The sentence that was found is beyond what fits in this bert window
continue
gold_sentences[j] = 1
# map from sentence in 0, args.max_num_sentences to the 512 tokens (store it in p_mask)
# p_mask only contains sentence membership of the paragrph and is of length 512 - max_quey_len
p_mask = np.zeros(max_seq_length-max_query_length+1)
last_sent_id = None
for sent_no, sent_boundary in enumerate(zip(sentence_starts, sentence_ends)):
if sent_boundary != (-1,-1):
# 1 offset is to accomodate first SEP token
p_mask[sent_boundary[0]+1:sent_boundary[1]+1] = sent_no
last_sent_id = sent_no
# SEP tokens part of first and last sentence
# handle empty sentences that have no tokens
p_mask[sentence_ends[last_sent_id]+1] = last_sent_id
p_mask[0] = 0
# the rest of the tokens will have 0, but will not be included because of the input_mask
# After this operation all the useless sentences will point to the first SEP
sentence_starts = np.asarray(sentence_starts) + max_query_length + 1
sentence_ends = np.asarray(sentence_ends) + max_query_length + 1
# Label (Intern class label)
if len(evidence_classes) == 1:
label = example.label
else:
label = evidence_classes[example.label]
features.append(SentenceFeature(
unique_id=unique_id,
annotation_id = example.id,
doc_id = example.doc_id,
doc_span_index=doc_span_index,
tokens=tokens,
sentences = all_sentences_untruncated,
gold_sentences = gold_sentences,
token_to_orig_map=token_to_orig_map,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
cls_index=cls_index,
p_mask=p_mask,
sentence_starts=sentence_starts,
sentence_ends=sentence_ends,
sentence_mask=sentence_mask,
paragraph_len=paragraph_len,
class_label=label,
evidence_mask=evidence_sentences))
num_sentences.append(len(all_sentences_untruncated))
return features
| 2.125 | 2 |
idl2py/wcs/add_distort.py | RapidLzj/idl2py | 0 | 12770422 | <reponame>RapidLzj/idl2py
"""
By Dr <NAME> -Q, NAOC
v1 2019-04-27
"""
import numpy as np
from..util import *
def add_distort():
pass
# pro add_distort, hdr, astr
#; NAME:
#; ADD_DISTORT
#; PURPOSE:
#; Add the distortion parameters in an astrometry structure to a FITS header.
#; EXPLANATION:
#; Called by PUTAST to add SIP (http://fits.gsfc.nasa.gov/registry/sip.html )
#; or TNX ( http://fits.gsfc.nasa.gov/registry/tnx.html ) distortion
#; parameters in an astrometry structure to a FITS header
#;
#; Prior to April 2012, PUTAST did not add distortion parameters so one
#; had to call ADD_DISTORT after PUTAST.
#;
#; IDL> putast,h ,astr0
#; IDL> add_distort,h,astr0
#;
#; CALLING SEQUENCE:
#; add_distort, hdr, astr
#;
#; INPUTS:
#; HDR - FITS header, string array. HDR will be updated to contain
#; the supplied astrometry.
#; ASTR - IDL structure containing values of the astrometry parameters
#; CDELT, CRPIX, CRVAL, CTYPE, LONGPOLE, PV2, and DISTORT
#; See EXTAST.PRO for more info about the structure definition
#;
#; PROCEDURES USED:
#; SXADDPAR, TAG_EXIST()
#; REVISION HISTORY:
#; Written by <NAME> May 2005
#; Enforce i+j = n for ij coefficients of order n <NAME> April 2012
#; Support IRAF TNX distortion M. Sullivan March 2014
#;;-
# npar = N_params()
#
# if ( npar LT 2 ) then begin ;Was header supplied?
# print,'Syntax: ADD_DISTORT, Hdr, astr'
# return
# endif
#
# add_distort = tag_exist(astr,'distort')
# IF(~ add_distort)THEN RETURN
#
# IF(astr.distort.name EQ 'SIP') then begin
#
# sxaddpar,hdr,'CTYPE1','RA---TAN-SIP'
# sxaddpar,hdr,'CTYPE2','DEC--TAN-SIP'
# distort = astr.distort
# a_dimen = size(distort.a,/dimen)
# b_dimen = size(distort.b,/dimen)
# ap_dimen = size(distort.ap,/dimen)
# bp_dimen = size(distort.bp,/dimen)
#
# if a_dimen[0] GT 0 then begin
# a_order = a_dimen[0]-1
# sxaddpar, hdr, 'A_ORDER', a_order, /savec, $
# 'polynomial order, axis 1, detector to sky '
# for i=0, a_order do begin
# for j = 0, a_order-i do begin
# aij = distort.a[i,j]
# if aij NE 0.0 then $
# sxaddpar, hdr, 'A_' + strtrim(i,2)+ '_' + strtrim(j,2), aij, $
# ' distortion coefficient', /savec
# endfor
# endfor
# endif
#
# if b_dimen[0] GT 0 then begin
# b_order = b_dimen[0]-1
# sxaddpar, hdr, 'B_ORDER', a_order, /savec , $
# 'polynomial order, axis 2, detector to sky'
# for i=0, b_order do begin
# for j = 0, b_order-i do begin
# bij = distort.b[i,j]
# if bij NE 0.0 then $
# sxaddpar, hdr, 'B_' + strtrim(i,2)+ '_' + strtrim(j,2), bij, $
# ' distortion coefficient', /savec
# endfor
# endfor
# endif
#
# if ap_dimen[0] GT 0 then begin
# ap_order = ap_dimen[0]-1
# sxaddpar, hdr, 'AP_ORDER', a_order, /savec, $
# ' polynomial order, axis 1, sky to detector '
# for i=0, ap_order do begin
# for j = 0, ap_order-i do begin
# apij = distort.ap[i,j]
# if apij NE 0.0 then $
# sxaddpar, hdr, 'AP_' + strtrim(i,2)+ '_' + strtrim(j,2), apij, $
# ' distortion coefficient', /savec
# endfor
# endfor
# endif
#
#
# if bp_dimen[0] GT 0 then begin
# bp_order = bp_dimen[0]-1
# sxaddpar, hdr, 'BP_ORDER', a_order, /savec, $
# ' polynomial order, axis 2, sky to detector '
# for i=0, bp_order do begin
# for j = 0, bp_order-i do begin
# bpij = distort.bp[i,j]
# if bpij NE 0.0 then $
# sxaddpar, hdr, 'BP_' + strtrim(i,2)+ '_' + strtrim(j,2), bpij, $
# ' distortion coefficient', /savec
# endfor
# endfor
# endif
#
# ENDIF ELSE IF(astr.distort.name EQ 'TNX')THEN BEGIN
#
# sxaddpar, hdr,'WAT0_001','system=image'
#
# string1='wtype=tnx axtype=ra lngcor = "3.'
# string1+= ' '+STRN(astr.distort.lngcor.xiorder,FORMAT='(F2.0)')
# string1+= ' '+STRN(astr.distort.lngcor.etaorder,FORMAT='(F2.0)')
# string1+= ' '+STRN(astr.distort.lngcor.xterms,FORMAT='(F2.0)')
# string1+= ' '+STRN(astr.distort.lngcor.ximin,FORMAT='(F19.16)')
# string1+= ' '+STRN(astr.distort.lngcor.ximax,FORMAT='(F19.16)')
# string1+= ' '+STRN(astr.distort.lngcor.etamin,FORMAT='(F19.16)')
# string1+= ' '+STRN(astr.distort.lngcor.etamax,FORMAT='(F19.16)')
# FOR i=0,N_ELEMENTS(astr.distort.lngcor.coeff)-1 DO BEGIN
# string1+=' '+STRN(astr.distort.lngcor.coeff[i],FORMAT='(F19.16)')
# ENDFOR
# string1+= '"'
#
# string2='wtype=tnx axtype=dec latcor = "3. '
# string2+= ' '+STRN(astr.distort.latcor.xiorder,FORMAT='(F2.0)')
# string2+= ' '+STRN(astr.distort.latcor.etaorder,FORMAT='(F2.0)')
# string2+= ' '+STRN(astr.distort.latcor.xterms,FORMAT='(F2.0)')
# string2+= ' '+STRN(astr.distort.latcor.ximin,FORMAT='(F19.16)')
# string2+= ' '+STRN(astr.distort.latcor.ximax,FORMAT='(F19.16)')
# string2+= ' '+STRN(astr.distort.latcor.etamin,FORMAT='(F19.16)')
# string2+= ' '+STRN(astr.distort.latcor.etamax,FORMAT='(F19.16)')
# FOR i=0,N_ELEMENTS(astr.distort.latcor.coeff)-1 DO BEGIN
# string2+= ' '+STRN(astr.distort.latcor.coeff[i],FORMAT='(F19.16)')
# ENDFOR
# string2+= '"'
#
# len1=STRLEN(string1)
# n1=len1/70
# IF(len1 MOD 68 GT 0)THEN n1++
# FOR i=0,n1-1 DO BEGIN
# s=STRMID(string1,i*68,68)
#; PRINT,'WAT1_'+STRN(i+1,FORMAT='(I3.3)'),' ',s
# sxaddpar, hdr,'WAT1_'+STRN(i+1,FORMAT='(I3.3)'),s
# ENDFOR
# len2=STRLEN(string2)
# n2=len2/70
# IF(len2 MOD 68 GT 0)THEN n2++
# FOR i=0,n2-1 DO BEGIN
# s=STRMID(string2,i*68,68)
#; PRINT,'WAT1_'+STRN(i+1,FORMAT='(I3.3)'),' ',s
# sxaddpar, hdr,'WAT2_'+STRN(i+1,FORMAT='(I3.3)'),s
# ENDFOR
#
# ENDIF
#
# return
# end
| 2.59375 | 3 |
mpf/platforms/visual_pinball_engine/generate.py | enteryourinitials/mpf | 0 | 12770423 | """Generate python files from protobufs."""
import glob
import re
from grpc_tools import protoc
protoc.main([
'grpc_tools.protoc',
'--proto_path=protobuf/',
'--python_out=.',
'--grpc_python_out=.'
] + list(glob.iglob('./protobuf/*.proto')))
# Make pb2 imports in generated scripts relative
for script in glob.iglob('./*_pb2*.py'):
with open(script, 'r+') as file:
code = file.read()
file.seek(0)
file.write(re.sub(r'\n(import .+_pb2.*)', '\nfrom . \\1', code))
file.truncate()
| 2.46875 | 2 |
MAX6675/demo.py | jonalter/micropython-hw-lib | 18 | 12770424 | <filename>MAX6675/demo.py
from max6675 import MAX6675
from machine import Pin
import time
so = Pin(12, Pin.IN)
sck = Pin(14, Pin.OUT)
cs = Pin(16, Pin.OUT)
max = MAX6675(sck, cs , so)
for _ in range(10):
print(max.read())
time.sleep(1) | 2.59375 | 3 |
pylti1p3/contrib/django/oidc_login.py | edubadges/pylti1.3 | 62 | 12770425 | from django.http import HttpResponse # type: ignore
from pylti1p3.oidc_login import OIDCLogin
from pylti1p3.request import Request
from .cookie import DjangoCookieService
from .redirect import DjangoRedirect
from .request import DjangoRequest
from .session import DjangoSessionService
class DjangoOIDCLogin(OIDCLogin):
def __init__(self, request, tool_config, session_service=None, cookie_service=None, launch_data_storage=None):
django_request = request if isinstance(request, Request) else DjangoRequest(request)
cookie_service = cookie_service if cookie_service else DjangoCookieService(django_request)
session_service = session_service if session_service else DjangoSessionService(request)
super(DjangoOIDCLogin, self).__init__(django_request, tool_config, session_service, cookie_service,
launch_data_storage)
def get_redirect(self, url):
return DjangoRedirect(url, self._cookie_service)
def get_response(self, html):
return HttpResponse(html)
| 1.96875 | 2 |
sciquence/sequences/cutting.py | krzjoa/sciquence | 8 | 12770426 | # <NAME> 2017
# sciquence: Time series & sequences in Python
#
# Functions for sequence processing
# Author: <NAME>
#
# License: MIT
import numpy as np
from itertools import groupby
############## Getting sequences ##############
def seq(array):
'''
Cut input array into sequences consisting of the same elements
Parameters
----------
array: ndarray
Numpy array
Returns
-------
seq_list: list of ndarray
List of sequences
Examples
--------
>>> import sciquence.sequences as sq
>>> import numpy as np
>>> x = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0])
>>> print sq.seq(x)
[array([1, 1, 1, 1, 1, 1]), array([0, 0, 0, 0, 0, 0]), array([1, 1, 1, 1, 1]), array([0, 0, 0, 0])]
'''
return [np.array(list(group)) for elem, group in groupby(array)]
def specseq(array, element):
'''
Return sequences consisting of specific tag
Parameters
----------
array: ndarray
Numpy array
element: object
Element
Returns
-------
seq_list: list of ndarray
List of sequences consisting of specific tag
Examples
--------
>>> import sciquence.sequences as sq
>>> import numpy as np
>>> x = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 44, 44, 44, 44, 44, 1, 1, 0, 0, 0, 0])
>>> print sq.specseq(x, 44)
[array([44, 44, 44, 44, 44])]
'''
return [np.array(list(group)) for elem, group in groupby(array) if elem == element]
def nseq(array):
'''
Returns sequences consisting of zeros
Parameters
----------
array: array-like
Numpy array
Returns
-------
seq_list: list of ndarray
List of negative sequences
Examples
--------
>>> from sciquence import sequences as sq
>>> import numpy as np
>>> x = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0])
>>> print sq.nseq(x)
[array([0, 0, 0, 0, 0, 0]), array([0, 0, 0, 0])]]
'''
return [np.array(list(group)) for elem, group in groupby(array) if not elem]
def pseq(array):
'''
Returns sequences consisting of ones
Parameters
----------
array: array-like
Numpy array
Returns
-------
seq_list: list of ndarray
List of positive sequences
Examples
--------
>>> from sciquence import sequences as sq
>>> import numpy as np
>>> x = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0])
>>> print sq.nseq(x)
[array([1, 1, 1, 1, 1, 1]), array([1, 1, 1, 1, 1])]
'''
return [np.array(list(group)) for elem, group in groupby(array) if elem]
def seqi(array):
'''
Get list of sequences and corresponding list of indices
Parameters
----------
array: ndarray
Numpy array
Returns
-------
seq_list: list of ndarray
List of sequences
idx_list: list of ndarray
List of seqences indices
Examples
--------
>>> import sciquence.sequences as sq
>>> import numpy as np
>>> x = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 44, 44, 44, 44, 44, 1, 1, 0, 0, 0, 0])
>>> print sq.seqi(x)
([array([0, 1, 2, 3, 4, 5]), array([6, 7, 8, 9, 10, 11]), array([12]),
array([13, 14, 15, 16, 17]), array([18, 19]), array([20, 21, 22, 23])],
'''
# TODO: optimize
lseq = seq(array)
indices = []
last_index = 0
for s in lseq:
indices.append(range(last_index, last_index + len(s)))
last_index += len(s)
return lseq, indices
def nseqi(array):
'''
Get list of negative sequences indices (consisting of zeroes)
Parameters
----------
array: ndarray
Numpy array
Returns
-------
seq_list: list of ndarray
List of sequences
idx_list: list of ndarray
List of seqences indices
Examples
--------
>>> import sciquence.sequences as sq
>>> import numpy as np
>>> x = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0])
>>> print sq.seqi(x)
[array([ 6, 7, 8, 9, 10, 11]), array([17, 18, 19, 20])]
'''
lseq = seq(array)
indices = []
last_index = 0
nlseq = []
for s in lseq:
if s[0] == 0:
indices.append(np.array(range(last_index, last_index + len(s))))
nlseq.append(s)
last_index += len(s)
return indices
def pseqi(array):
'''
Get list of positive sequences indices (consisting of ones)
Parameters
----------
array: ndarray
Numpy array
Returns
-------
seq_list: list of ndarray
List of sequences
idx_list: list of ndarray
List of seqences indices
Examples
--------
>>> import sciquence.sequences as sq
>>> import numpy as np
>>> x = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0])
>>> print sq.seqi(x)
[array([0, 1, 2, 3, 4, 5]), array([12, 13, 14, 15, 16])]
'''
lseq = seq(array)
indices = []
last_index = 0
plseq = []
for s in lseq:
if s[0] == 1:
indices.append(np.array(range(last_index, last_index + len(s))))
plseq.append(s)
last_index += len(s)
return indices
def specseqi(array, elem):
'''
Get list of sequences indices, consisting of specific element
Parameters
----------
array: ndarray
Numpy array
elem: object
A sequence element
Returns
-------
seq_list: list of ndarray
List of sequences
idx_list: list of ndarray
List of seqences indices
Examples
--------
>>> import sciquence.sequences as sq
>>> import numpy as np
>>> x = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 44, 44, 44, 44, 44, 1, 1, 0, 0, 0, 0])
>>> print sq.seqi(x)
[array([13, 14, 15, 16, 17])]
'''
lseq = seq(array)
indices = []
last_index = 0
plseq = []
for s in lseq:
if s[0] == elem:
indices.append(np.array(range(last_index, last_index + len(s))))
plseq.append(s)
last_index += len(s)
return indices
############### Splitting into chunks ###########
def chunk(array, chunk_size):
'''
Split numpy array into chunks of equal length.
Parameters
----------
array: ndarray
A numpy array
chunk_size: int
Desired length of a single chunk
Returns
-------
chunks: list of ndarray
Chunks of equal length
Examples
--------
>>> import numpy as np
>>> import sciquence.sequences as sq
>>> x = np.array([1,2,3,4,5,6,7,8,9,10])
>>> sq.chunk(x, 3)
[array([1, 2, 3]), array([4, 5, 6]), array([7, 8, 9]), array([10])]
'''
chunks = []
for i in xrange(0, len(array), chunk_size):
chunks.append(array[i:i+chunk_size])
return chunks
if __name__ == '__main__':
x = np.array([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 44, 44, 44, 44, 44, 1, 1, 0, 0, 0, 0])
print specseqi(x, 44)
| 3.46875 | 3 |
mmskeleton/utils/__init__.py | sj-li/PR-GCN | 5 | 12770427 | <filename>mmskeleton/utils/__init__.py
from .importer import import_obj, call_obj, set_attr, get_attr
from .checkpoint import load_checkpoint
__all__ = ['import_obj', 'call_obj', 'set_attr', 'get_attr', 'load_checkpoint'] | 1.335938 | 1 |
backup-23.09.2021/core/find_hot_functions.py | ComputerSystemsLaboratory/Code-Size-Prediction | 0 | 12770428 | import os
import csv
import yaml
import argparse
import numpy as np
from operator import itemgetter
from os import listdir
from os.path import isfile, join
""" Find the biggest files
"""
def Main():
parser = argparse.ArgumentParser()
parser.add_argument("dataset",
metavar='p0',
nargs='?',
const=1,
help='datasetname',
type=str,
default='Mibench-f-complete.yaml')
parser.add_argument("max_hot_function",
metavar='p1',
nargs='?',
const=2,
help='The first n hot functions',
type=int,
default=300)
args = parser.parse_args()
statistics_path = '/home/andrefz/research/m-project/core-massalin/tools/inst-count-pass/'
with open(statistics_path + args.dataset) as f:
insts = yaml.safe_load(f)
res = dict(sorted(insts.items(), key=itemgetter(1), reverse = True) [:args.max_hot_function])
for key, value in res.items():
#key = key[:-5]
#key = key + 'yaml'
key = key+'.ll'
print(key)
#print(key + ': '+str(value))
if __name__ == '__main__':
Main()
| 2.734375 | 3 |
example.py | Techno263/python-minecraft-data | 0 | 12770429 | import minecraft_data
# Java edition minecraft-data
mcd = minecraft_data("1.13")
print(mcd.version)
print(mcd.find_item_or_block(1))
print(mcd.find_item_or_block('stone'))
print(mcd.recipes['5'][0])
print(mcd.windows['minecraft:brewing_stand'])
print(mcd.effects_name['Haste'])
# Pocket Edition minecraft-data
mcd_pe = minecraft_data("1.0", "pe")
print(mcd_pe.version)
print(mcd_pe.find_item_or_block('stone'))
| 2.4375 | 2 |
misago/threads/tests/test_post_model.py | HenryChenV/iJiangNan | 1 | 12770430 | <reponame>HenryChenV/iJiangNan<filename>misago/threads/tests/test_post_model.py
from datetime import timedelta
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.utils import timezone
from misago.categories.models import Category
from misago.threads.checksums import update_post_checksum
from misago.threads.models import Post, Thread
UserModel = get_user_model()
class PostModelTests(TestCase):
def setUp(self):
self.user = UserModel.objects.create_user("Bob", "<EMAIL>", "<PASSWORD>")
datetime = timezone.now()
self.category = Category.objects.all_categories()[:1][0]
self.thread = Thread(
category=self.category,
started_on=datetime,
starter_name='Tester',
starter_slug='tester',
last_post_on=datetime,
last_poster_name='Tester',
last_poster_slug='tester',
)
self.thread.set_title("Test thread")
self.thread.save()
self.post = Post.objects.create(
category=self.category,
thread=self.thread,
poster=self.user,
poster_name=self.user.username,
poster_ip='127.0.0.1',
original="Hello! I am test message!",
parsed="<p>Hello! I am test message!</p>",
checksum="nope",
posted_on=datetime,
updated_on=datetime,
)
update_post_checksum(self.post)
self.post.save(update_fields=['checksum'])
self.thread.first_post = self.post
self.thread.last_post = self.post
self.thread.save()
def test_merge_invalid(self):
"""see if attempts for invalid merges fail"""
# can't merge with itself
with self.assertRaises(ValueError):
self.post.merge(self.post)
other_user = UserModel.objects.create_user("Jeff", "<EMAIL>", "<PASSWORD>")
other_thread = Thread.objects.create(
category=self.category,
started_on=timezone.now(),
starter_name='Tester',
starter_slug='tester',
last_post_on=timezone.now(),
last_poster_name='Tester',
last_poster_slug='tester',
)
# can't merge with other users posts
with self.assertRaises(ValueError):
self.post.merge(
Post.objects.create(
category=self.category,
thread=self.thread,
poster=other_user,
poster_name=other_user.username,
poster_ip='127.0.0.1',
original="Hello! I am test message!",
parsed="<p>Hello! I am test message!</p>",
checksum="nope",
posted_on=timezone.now() + timedelta(minutes=5),
updated_on=timezone.now() + timedelta(minutes=5),
)
)
# can't merge across threads
with self.assertRaises(ValueError):
self.post.merge(
Post.objects.create(
category=self.category,
thread=other_thread,
poster=self.user,
poster_name=self.user.username,
poster_ip='127.0.0.1',
original="Hello! I am test message!",
parsed="<p>Hello! I am test message!</p>",
checksum="nope",
posted_on=timezone.now() + timedelta(minutes=5),
updated_on=timezone.now() + timedelta(minutes=5),
)
)
# can't merge with events
with self.assertRaises(ValueError):
self.post.merge(
Post.objects.create(
category=self.category,
thread=self.thread,
poster=self.user,
poster_name=self.user.username,
poster_ip='127.0.0.1',
original="Hello! I am test message!",
parsed="<p>Hello! I am test message!</p>",
checksum="nope",
posted_on=timezone.now() + timedelta(minutes=5),
updated_on=timezone.now() + timedelta(minutes=5),
is_event=True,
)
)
def test_merge(self):
"""merge method merges two posts into one"""
other_post = Post.objects.create(
category=self.category,
thread=self.thread,
poster=self.user,
poster_name=self.user.username,
poster_ip='127.0.0.1',
original="I am other message!",
parsed="<p>I am other message!</p>",
checksum="nope",
posted_on=timezone.now() + timedelta(minutes=5),
updated_on=timezone.now() + timedelta(minutes=5),
)
other_post.merge(self.post)
self.assertIn(other_post.original, self.post.original)
self.assertIn(other_post.parsed, self.post.parsed)
self.assertTrue(self.post.is_valid)
def test_move(self):
"""move method moves post to other thread"""
new_thread = Thread.objects.create(
category=self.category,
started_on=timezone.now(),
starter_name='Tester',
starter_slug='tester',
last_post_on=timezone.now(),
last_poster_name='Tester',
last_poster_slug='tester',
)
self.post.move(new_thread)
self.assertEqual(self.post.thread, new_thread)
| 2.390625 | 2 |
balanced_news/news/getnews/news_sites/getslate.py | thearyanmittal/news-aggregator | 0 | 12770431 | from ...models import Headline
import requests
from bs4 import BeautifulSoup
from datetime import datetime, time, timedelta
from dateparser import parse
def getslate(per_site):
url = 'https://slate.com/news-and-politics'
html = requests.get(url).text
soup = BeautifulSoup(html, 'lxml')
articles = soup.find('div', class_='topic-stories-list').find_all('a')
i = 0
for art in articles:
if i < per_site:
headline = Headline()
headline.leaning = 'left'
headline.url = art['href']
try:
headline.title = art.find('span').text
except AttributeError:
headline.delete()
continue
headline.img = art.find('img')['data-src']
pub_date = parse(art.find('span', class_="topic-story__date").text, languages=['en'])
if pub_date < datetime(year=datetime.now().year, month=datetime.now().month, day=datetime.now().day):
headline.mins_ago = 1441
else:
pub_time = art.find('div', class_='topic-story__byline').text.strip()[-8:].strip()
pub_time = parse(pub_time).time()
delta = datetime.now() - timedelta(hours=pub_time.hour, minutes=pub_time.minute, seconds=pub_time.second)
headline.mins_ago = delta.hour*60 + delta.minute + 60 #for some reason, always short 1 hour
headline.save()
i += 1
else:
break
| 2.78125 | 3 |
sts.py | dhruvildave/DenkInterpreter | 1 | 12770432 | ###############################################################################
# #
# SYMBOLS, TABLES, SEMANTIC ANALYSIS #
# #
###############################################################################
from astvisitor import NodeVisitor
from base import _SHOULD_LOG_SCOPE, _SHOULD_LOG_STACK, ErrorCode, SemanticError
from lex import TokenType
class Symbol:
def __init__(self, name, type=None):
self.name = name
self.type = type
class VarSymbol(Symbol):
def __init__(self, name, type):
super().__init__(name, type)
def __str__(self):
return "<{class_name}(name='{name}', type='{type}')>".format(
class_name=self.__class__.__name__, name=self.name, type=self.type,
)
__repr__ = __str__
class BuiltinTypeSymbol(Symbol):
def __init__(self, name):
super().__init__(name)
def __str__(self):
return self.name
def __repr__(self):
return "<{class_name}(name='{name}')>".format(
class_name=self.__class__.__name__, name=self.name,
)
class ProcedureSymbol(Symbol):
def __init__(self, name, params=None):
super().__init__(name)
# a list of formal parameters
self.params = params if params is not None else []
def __str__(self):
return "<{class_name}(name={name}, parameters={params})>".format(
class_name=self.__class__.__name__, name=self.name, params=self.params,
)
__repr__ = __str__
class ScopedSymbolTable:
hasReturnStatement = False
def __init__(self, scope_name, scopeType, scope_level, enclosing_scope=None):
self._symbols = {}
self.scope_name = scope_name
self.scopeType = scopeType
self.scope_level = scope_level
self.enclosing_scope = enclosing_scope
self._init_builtins()
def _init_builtins(self):
self.insert(BuiltinTypeSymbol("INTEGER"))
self.insert(BuiltinTypeSymbol("REAL"))
def __str__(self):
h1 = "SCOPE (SCOPED SYMBOL TABLE)"
lines = ["\n", h1, "=" * len(h1)]
for header_name, header_value in (
("Scope name", self.scope_name),
("Scope level", self.scope_level),
(
"Enclosing scope",
self.enclosing_scope.scope_name if self.enclosing_scope else None,
),
):
lines.append("%-15s: %s" % (header_name, header_value))
h2 = "Scope (Scoped symbol table) contents"
lines.extend([h2, "-" * len(h2)])
lines.extend(("%7s: %r" % (key, value)) for key, value in self._symbols.items())
lines.append("\n")
s = "\n".join(lines)
return s
__repr__ = __str__
def log(self, msg):
if _SHOULD_LOG_SCOPE:
print(msg)
def insert(self, symbol):
self.log(f"Insert: {symbol.name}")
self._symbols[symbol.name] = symbol
def lookup(self, name, current_scope_only=False):
self.log(f"Lookup: {name}. (Scope name: {self.scope_name})")
# 'symbol' is either an instance of the Symbol class or None
symbol = self._symbols.get(name)
if symbol is not None:
return symbol
if current_scope_only:
return None
# recursively go up the chain and lookup the name
if self.enclosing_scope is not None:
return self.enclosing_scope.lookup(name)
class SemanticAnalyzer(NodeVisitor):
def __init__(self, scope):
self.current_scope = ScopedSymbolTable("initial", TokenType.PROGRAM, 1)
_SHOULD_LOG_SCOPE = scope
def log(self, msg):
if _SHOULD_LOG_SCOPE:
print(msg)
def error(self, error_code, token):
raise SemanticError(
error_code=error_code,
token=token,
message=f"{error_code.value} -> {token}",
)
def visit_Block(self, node):
for declaration in node.declarations:
self.visit(declaration)
self.visit(node.compound_statement)
def visit_Program(self, node):
self.log("ENTER scope: global")
global_scope = ScopedSymbolTable(
scope_name="global",
scopeType=TokenType.PROGRAM,
scope_level=1,
enclosing_scope=self.current_scope, # None
)
self.current_scope = global_scope
# visit subtree
self.visit(node.block)
self.log(global_scope)
self.current_scope = self.current_scope.enclosing_scope
self.log("LEAVE scope: global")
def visit_Compound(self, node):
for child in node.children:
self.visit(child)
def visit_NoOp(self, node):
pass
def visit_Type(self, node):
pass
def visit_BinOp(self, node):
self.visit(node.left)
self.visit(node.right)
def visit_ProcedureDecl(self, node):
proc_name = node.procName
proc_symbol = ProcedureSymbol(proc_name)
self.current_scope.insert(proc_symbol)
self.log(f"ENTER scope: {proc_name}")
# Scope for parameters and local variables
procedure_scope = ScopedSymbolTable(
scope_name=proc_name,
scopeType=TokenType.PROCEDURE,
scope_level=self.current_scope.scope_level + 1,
enclosing_scope=self.current_scope,
)
self.current_scope = procedure_scope
# Insert parameters into the procedure scope
for param in node.params:
param_type = self.current_scope.lookup(param.type_node.value)
param_name = param.var_node.value
var_symbol = VarSymbol(param_name, param_type)
self.current_scope.insert(var_symbol)
proc_symbol.params.append(var_symbol)
self.visit(node.blockNode)
self.log(procedure_scope)
self.current_scope = self.current_scope.enclosing_scope
self.log(f"LEAVE scope: {proc_name}")
def visit_FunctionDecl(self, node):
funcName = node.funcName
funcSymbol = ProcedureSymbol(funcName)
self.current_scope.insert(funcSymbol)
self.log("Enter Scope:{}".format(funcName))
procedureScope = ScopedSymbolTable(
funcName,
TokenType.FUNCTION,
self.current_scope.scope_level + 1,
self.current_scope,
)
self.current_scope = procedureScope
for param in node.params:
paramType = self.current_scope.lookup(param.type_node.value)
paramName = param.var_node.value
varSymbol = VarSymbol(paramName, paramType)
self.current_scope.insert(varSymbol)
funcSymbol.params.append(varSymbol)
# print(paramName)
self.visit_Type(node.returnType)
self.visit(node.blockNode)
self.log("{}".format(procedureScope))
if procedureScope.hasReturnStatement == False:
self.error(ErrorCode.MISSING_RETURN, node.token)
self.current_scope = self.current_scope.enclosing_scope
self.log("Leave scope : {}".format(funcName))
def visit_VarDecl(self, node):
type_name = node.type_node.value
type_symbol = self.current_scope.lookup(type_name)
# We have all the information we need to create a variable symbol.
# Create the symbol and insert it into the symbol table.
var_name = node.var_node.value
var_symbol = VarSymbol(var_name, type_symbol)
# Signal an error if the table already has a symbol
# with the same name
if self.current_scope.lookup(var_name, current_scope_only=True):
self.error(
error_code=ErrorCode.DUPLICATE_ID, token=node.var_node.token,
)
self.current_scope.insert(var_symbol)
def visit_Assign(self, node):
# right-hand side
# self.visit(node.right)
# # left-hand side
# self.visit(node.left)
varName = node.left.value
currentScope = self.current_scope
# priprint(varName,currentScope.scopeType,currentScope.scope_name)
if (
currentScope.scopeType == TokenType.FUNCTION
and varName == currentScope.scope_name
):
currentScope.hasReturnStatement = True
else:
VarSymbol = self.current_scope.lookup(varName)
if VarSymbol == None:
self.error(error_code=ErrorCode.ID_NOT_FOUND, token=node.token)
self.visit(node.right)
def visit_Var(self, node):
var_name = node.value
var_symbol = self.current_scope.lookup(var_name)
if var_symbol is None:
self.error(error_code=ErrorCode.ID_NOT_FOUND, token=node.token)
def visit_Num(self, node):
pass
def visit_String(self, node):
pass
def visit_UnaryOp(self, node):
self.visit(node.right)
def visit_ProcedureCall(self, node):
for param_node in node.actual_params:
self.visit(param_node)
def visit_Call(self, node):
for param_node in node.actualParams:
self.visit(param_node)
def visit_Readint(self, node):
return
def visit_Readfloat(self, node):
return
def visit_Readstring(self, node):
return
def visit_WritelnCall(self, node):
for param_node in node.actual_params:
self.visit(param_node)
def visit_Condition(self, node):
self.visit(node.condition)
self.visit(node.then)
if node.myElse != None:
self.visit(node.myElse)
def visit_Then(self, node):
self.visit(node.child)
def visit_MyElse(self, node):
self.visit(node.child)
def visit_While(self, node):
self.visit(node.condition)
def visit_MyDo(self, node):
self.visit(node.child)
def visit_MyBoolean(self, node):
return node.value
| 2.421875 | 2 |
shapesDetector.py | charlesaurav13/OpenCV | 22 | 12770433 | <gh_stars>10-100
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
def shapes():
img = cv.imread('./img/shapess.jpg')
img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
_, thresh = cv.threshold(img_gray, 240, 255, cv.THRESH_BINARY)
contours, _ = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
white = np.ones((img.shape[0], img.shape[1], 3))
for c in contours:
approx = cv.approxPolyDP(c, 0.01*cv.arcLength(c, True), True)
cv.drawContours(img, [approx], 0, (0, 255, 0), 5)
x = approx.ravel()[0]
y = approx.ravel()[1] - 5
if len(approx) == 3:
cv.putText(img, "Triangle", (x, y),
cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1)
elif len(approx) == 4:
x1, y1, w, h = cv.boundingRect(approx)
aspect_ratio = float(w) / float(h)
print(aspect_ratio)
if aspect_ratio >= 0.95 and aspect_ratio <= 1.05:
cv.putText(img, "Square", (x, y),
cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1)
else:
cv.putText(img, "Rectangle", (x, y),
cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1)
elif len(approx) == 5:
cv.putText(img, "Pentagon", (x, y),
cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1)
elif len(approx) == 10:
cv.putText(img, "Star", (x, y),
cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1)
else:
cv.putText(img, "Circle", (x, y),
cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1)
cv.imshow("Shapes", img)
cv.waitKey()
cv.destroyAllWindows()
if __name__ == "__main__":
shapes()
| 2.84375 | 3 |
tests/test_raw.py | fredstro/mrq | 745 | 12770434 | from mrq.job import Job
import datetime
from mrq.queue import Queue
import time
import pytest
@pytest.mark.parametrize(["p_queue", "p_pushback", "p_timed", "p_flags"], [
["test_timed_set", False, True, "--greenlets 10"],
["pushback_timed_set", True, True, "--greenlets 10"],
["test_sorted_set", False, False, "--greenlets 1"]
])
def test_raw_sorted(worker, p_queue, p_pushback, p_timed, p_flags):
worker.start(flags="%s --config tests/fixtures/config-raw1.py" %
p_flags, queues=p_queue)
test_collection = worker.mongodb_logs.tests_inserts
jobs_collection = worker.mongodb_jobs.mrq_jobs
current_time = int(time.time())
assert jobs_collection.count() == 0
assert Queue(p_queue).size() == 0
# Schedule one in the past, one in the future
worker.send_raw_tasks(p_queue, {
"aaa": current_time - 10,
"bbb": current_time + 5,
"ccc": current_time + 10
}, block=False)
# Re-schedule
worker.send_raw_tasks(p_queue, {
"ccc": current_time + 6
}, block=False)
time.sleep(3)
if not p_timed:
assert Queue(p_queue).size() == 0
assert test_collection.count() == 3
assert list(test_collection.find(projection={"params": 1, "_id": 0}).limit(1)) == [
{"params": {"sorted_set": "aaa"}}
]
return
if p_pushback:
assert Queue(p_queue).size() == 3
assert set(Queue(p_queue).list_raw_jobs()) == set([b"bbb", b"ccc", b"aaa"])
else:
assert Queue(p_queue).size() == 2
assert set(Queue(p_queue).list_raw_jobs()) == set([b"bbb", b"ccc"])
# The second one should not yet even exist in mrq_jobs
assert jobs_collection.count() == 1
assert list(jobs_collection.find())[0]["status"] == "success"
assert list(test_collection.find(projection={"params": 1, "_id": 0})) == [
{"params": {"timed_set": "aaa"}}
]
# Then wait for the second job to be done
time.sleep(5)
if p_pushback:
assert Queue(p_queue).size() == 3
else:
assert Queue(p_queue).size() == 0
assert jobs_collection.count() == 3
assert list(jobs_collection.find())[1]["status"] == "success"
assert list(jobs_collection.find())[2]["status"] == "success"
assert list(jobs_collection.find())[2]["worker"]
assert test_collection.count() == 3
@pytest.mark.parametrize("has_subqueue", [False, True])
@pytest.mark.parametrize(["p_queue", "p_set"], [
["test_raw", False],
["test_set", True]
])
def test_raw_set(worker, has_subqueue, p_queue, p_set):
flags = "--greenlets 10 --config tests/fixtures/config-raw1.py"
if has_subqueue:
flags = "%s --subqueues_refresh_interval=0.1" % flags
# worker should dequeue all subqueues
p_queue = "%s/" % p_queue
worker.start(flags=flags, queues=p_queue)
if has_subqueue:
# queue tasks in p_queue/subqueue
p_queue = "%ssubqueue" % p_queue
test_collection = worker.mongodb_logs.tests_inserts
jobs_collection = worker.mongodb_jobs.mrq_jobs
assert jobs_collection.count() == 0
assert Queue(p_queue).size() == 0
worker.send_raw_tasks(p_queue, ["aaa", "bbb", "ccc", "bbb"], block=True)
assert Queue(p_queue).size() == 0
if p_set:
assert jobs_collection.count() == 3
assert jobs_collection.count({"status": "success"}) == 3
assert test_collection.count() == 3
else:
assert jobs_collection.count() == 4
assert jobs_collection.count({"status": "success"}) == 4
assert test_collection.count() == 4
def test_raw_started(worker):
worker.start(
flags="--greenlets 2 --config tests/fixtures/config-raw1.py", queues="teststarted_raw teststartedx")
worker.send_raw_tasks("teststarted_raw", ["f1", "f2", "f3"], block=False)
time.sleep(2)
jobs_collection = worker.mongodb_jobs.mrq_jobs
assert jobs_collection.find({"status": "started", "queue": "teststartedx"}).count() == 2
assert jobs_collection.count() == 2
worker.mongodb_jobs.tests_flags.insert({"flag": "f1"})
time.sleep(1)
assert jobs_collection.find({"status": "success", "queue": "teststartedx"}).count() == 1
assert jobs_collection.find({"status": "started", "queue": "teststartedx"}).count() == 2
assert jobs_collection.count() == 3
worker.mongodb_jobs.tests_flags.insert({"flag": "f2"})
worker.mongodb_jobs.tests_flags.insert({"flag": "f3"})
time.sleep(1)
worker.stop(block=True, deps=False)
assert jobs_collection.find({"status": "success", "queue": "teststartedx"}).count() == 3
assert jobs_collection.count() == 3
worker.stop_deps()
@pytest.mark.parametrize(["p_queue"], [
["test_raw"],
["test_set"],
["test_timed_set"]
])
def test_raw_remove(worker, p_queue):
worker.start_deps()
worker.send_raw_tasks(
p_queue, ["aa", "bb", "cc"], block=False, start=False)
assert Queue(p_queue).size() == 3
Queue(p_queue).remove_raw_jobs(["aa", "cc"])
assert Queue(p_queue).size() == 1
worker.stop_deps()
def test_raw_exception(worker):
p_queue = "testexception_raw"
worker.start(
flags="--greenlets 10 --config tests/fixtures/config-raw1.py", queues=p_queue)
jobs_collection = worker.mongodb_jobs.mrq_jobs
assert jobs_collection.count() == 0
assert Queue(p_queue).size() == 0
worker.send_raw_tasks(p_queue, ["msg1"], block=True)
failjob = list(jobs_collection.find())[0]
assert Queue("default").size() == 0
assert Queue(p_queue).size() == 0
assert jobs_collection.count() == 1
assert failjob["status"] == "failed"
worker.stop(deps=False)
worker.start(
deps=False, flags="--greenlets 10 --config tests/fixtures/config-raw1.py", queues="default")
worker.send_task(
"mrq.basetasks.utils.JobAction",
{
"id": failjob["_id"],
"action": "requeue"
},
block=True
)
assert Queue("default").size() == 0
assert Queue(p_queue).size() == 0
assert jobs_collection.count() == 2
assert list(jobs_collection.find({"_id": failjob["_id"]}))[
0]["status"] == "queued"
assert list(jobs_collection.find({"_id": {"$ne": failjob["_id"]}}))[
0]["status"] == "success"
worker.stop(deps=False)
worker.start(
deps=False, flags="--greenlets 10 --config tests/fixtures/config-raw1.py", queues="default testx")
worker.wait_for_idle()
assert Queue(p_queue).size() == 0
assert jobs_collection.count() == 2
assert Queue("testx").size() == 0
assert list(jobs_collection.find({"_id": failjob["_id"]}))[
0]["status"] == "failed"
def test_raw_retry(worker):
p_queue = "testretry_raw"
worker.start(
flags="--greenlets 10 --config tests/fixtures/config-raw1.py", queues=p_queue)
jobs_collection = worker.mongodb_jobs.mrq_jobs
assert jobs_collection.count() == 0
assert Queue(p_queue).size() == 0
worker.send_raw_tasks(p_queue, [0], block=True)
failjob = list(jobs_collection.find())[0]
assert Queue("default").size() == 0
assert Queue("testx").size() == 1
assert Queue(p_queue).size() == 0
assert jobs_collection.count() == 1
assert failjob["status"] == "queued"
assert failjob["queue"] == "testx"
@pytest.mark.parametrize(["p_queue", "p_greenlets"], [x1 + x2 for x1 in [
["test_raw default test"],
# ["default test_raw test"],
# ["default test_raw test_set"],
# ["test_set test_raw default"],
# ["test test2 test_set test_raw default"]
] for x2 in [
# [1],
[2],
# [10]
]])
def test_raw_mixed(worker, p_queue, p_greenlets):
worker.start_deps()
worker.send_raw_tasks(
"test_raw", ["aaa", "bbb", "ccc"], start=False, block=False)
worker.send_task("tests.tasks.general.MongoInsert", {
"not_raw": "ddd"
}, start=False, block=False)
assert Queue("test_raw").size() == 3
assert Queue("default").size() == 1
worker.start(flags="--greenlets %s --config tests/fixtures/config-raw1.py" %
p_greenlets, queues=p_queue, deps=False)
test_collection = worker.mongodb_logs.tests_inserts
jobs_collection = worker.mongodb_jobs.mrq_jobs
time.sleep(3)
assert Queue("test_raw").size() == 0
assert Queue("default").size() == 0
assert test_collection.count() == 4
assert jobs_collection.count() == 4
assert jobs_collection.find({"status": "success"}).count() == 4
assert list(jobs_collection.find({"status": "success"}))[0]["worker"]
def test_raw_no_storage(worker):
""" Test tasks that don't store unless they go to error status like 'failed' """
worker.start(
flags="--config tests/fixtures/config-raw1.py",
queues="default testnostorage_raw"
)
jobs_collection = worker.mongodb_jobs.mrq_jobs
test_collection = worker.mongodb_logs.tests_inserts
worker.send_raw_tasks("testnostorage_raw", [
"tests.tasks.general.MongoInsert 3"
], block=False)
time.sleep(2)
# No started inserted.
assert jobs_collection.count() == 0
time.sleep(2)
# No success either, but we did insert
assert test_collection.count() == 1
assert jobs_collection.count() == 0
test_collection.remove({})
# However failed tasks get stored.
worker.send_raw_tasks("testnostorage_raw", [
"tests.tasks.general.RaiseException 0"
], block=False)
time.sleep(2)
# Failed was inserted.
assert jobs_collection.count({"status": "failed", "path": "tests.tasks.general.RaiseException"}) == 1
# If we requeue and don't raise, should be OK and inserted this time, even in success
# no_storage depends on a raw queue, not a task path.
_id = jobs_collection.find_one()["_id"]
jobs_collection.update({"_id": _id}, {"$set": {"path": "tests.tasks.general.MongoInsert"}})
job = Job(_id).fetch(full_data=True)
job.requeue(queue="default")
time.sleep(1)
assert test_collection.count() == 1
assert jobs_collection.count() == 1
assert jobs_collection.count({"status": "success"}) == 1
jobs_collection.remove({})
# Test with retry: should be inserted
worker.send_raw_tasks("testnostorage_raw", [
"tests.tasks.general.Retry 0"
], block=False)
assert jobs_collection.count({"status": "started"}) == 0
time.sleep(2)
assert jobs_collection.count({"status": "retry"}) == 1
| 2.0625 | 2 |
qf_lib/common/utils/miscellaneous/kelly.py | webclinic017/qf-lib | 198 | 12770435 | <reponame>webclinic017/qf-lib
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from qf_lib.containers.series.qf_series import QFSeries
from qf_lib.containers.series.simple_returns_series import SimpleReturnsSeries
def kelly(qf_series: QFSeries) -> float:
"""
Calculates the value of the Kelly Criterion (the fraction of money that should be invested) for the series
of returns/prices.
Kelly Criterion assumptions:
1. You trade the same way you traded in the past.
2. Each return corresponds to one trade.
3. Returns are normally distributed (calculated value will be close to the ideal kelly value even for highly skewed
returns. Test showed that the difference of up to 10% (relative) might occur for extremely skewed distributions.
Parameters
----------
qf_series: QFSeries
timeseries of returns/prices. Each return/price must correspond to one trade.
Returns
-------
float
fraction of money that should be invested
"""
# it is important to convert a series to simple returns and not log returns
returns_tms = qf_series.to_simple_returns() # type: SimpleReturnsSeries
mean = returns_tms.mean()
variance = returns_tms.var()
kelly_criterion_value = mean / variance
return kelly_criterion_value
def kelly_binary(win_probability: float, win_size: float, lose_size: float) -> float:
"""
Calculates the value of the Kelly Criterion (the fraction of money that should be invested) for a bet
that has two possible outcomes.
NOTE: This method should not be used to estimate the kelly value for a timeseries.
Parameters
----------
win_probability:float
probability of winning. Assumes that probability of losing is 1 - win_probability.
win_size: float
gain if we win.
For example: 0.7 means that we get additional 70% of what we bet. (if we bet 10$ and we win we now have 17$)
new_value = old_value * (1 + win_size)
lose_size: float
lose if we lose. This value should be negative.
For example: -0.2 means that we lose 20% of what we bet. (if we bet 10$ and we lose we now have 8$)
new_value = old_value * (1 + lose_size)
Returns
-------
float
fraction of money that should be invested
"""
kelly_value = (-win_size * win_probability + lose_size * win_probability - lose_size) / (win_size * lose_size)
return kelly_value
| 2.78125 | 3 |
exercicios/exercicio030.py | NicoCassio/cursoemvideo-python | 0 | 12770436 | <gh_stars>0
n = int(input('Número: '))
print('PAR' if n % 2 == 0 else 'ÍMPAR')
| 3.234375 | 3 |
homework_4.py | pyaiveoleg/computing_practicum | 0 | 12770437 | import math
from typing import Callable
class Method:
@staticmethod
def calculate(f: Callable, a: float, b: float):
pass
@staticmethod
def name():
pass
class left_rectangle(Method):
@staticmethod
def calculate(f: Callable, a: float, b: float):
return f(a) * (b - a)
@staticmethod
def name():
return "Формула левого прямоугольника"
class right_rectangle(Method):
@staticmethod
def calculate(f: Callable, a: float, b: float):
return f(b) * (b - a)
@staticmethod
def name():
return "Формула правого прямоугольника"
class middle_rectangle(Method):
@staticmethod
def calculate(f: Callable, a: float, b: float):
return f((a + b) / 2) * (b - a)
@staticmethod
def name():
return "Формула среднего прямоугольника"
class trapezoid(Method):
@staticmethod
def calculate(f: Callable, a: float, b: float):
return (f(a) + f(b)) / 2 * (b - a)
@staticmethod
def name():
return "Формула трапеции"
class simpson(Method):
@staticmethod
def calculate(f: Callable, a: float, b: float):
return (b - a) / 6 * (f(a) + 4 * f((a + b) / 2) + f(b))
@staticmethod
def name():
return "Формула Симпсона"
class three_eights(Method):
@staticmethod
def calculate(f: Callable, a: float, b: float):
h = (b - a) / 3
return (b - a) * (f(a) + 3 * f(a + h) + 3 * f(a + 2 * h) + f(b)) / 8
@staticmethod
def name():
return "Формула 3 / 8"
def run():
polynomials = [
(lambda x: 5, lambda x: 5 * x, "y = 5"),
(lambda x: 3.7 * x - 2.39, lambda x: 3.7 * (x ** 2) / 2 - 2.39 * x, "y = 3.7 * x - 2.39"),
(lambda x: x ** 2 - 4 * x + 1.18, lambda x: (x ** 3) / 3 - 2 * x ** 2 + 1.18 * x, "y = x ** 2 - 4 * x + 1.18"),
(
lambda x: -17 * x ** 3 - 118 * x ** 2 + 10 * x + 27,
lambda x: -17 * (x ** 4) / 4 - 118 * (x ** 3) / 3 + 10 * (x ** 2) / 2 + 27 * x,
"y = -17 * x ** 3 - 118 * x ** 2 + 10 * x + 27"
)
]
functions = [
(lambda x: math.exp(-x) - (x ** 2) / 2, lambda x: -math.exp(-x) - (x ** 3) / 6, "y = math.exp(-x) - (x ** 2) / 2")
]
print("Задание 4. Приближённое вычисление интеграла по квадратурным формулам.")
print("Задача: Вычислите определённый интеграл от заданной функции f(x), используя квадратурные формулы.")
print("Введите нижний предел интегрирования (a):")
a = float(input())
print("Введите верхний предел интегрирования (b):")
b = float(input())
print("----------------------------------------------")
for f, integral_f, formula in polynomials + functions:
print(f"Рассматриваем функцию: {formula}")
for method in [right_rectangle, left_rectangle, middle_rectangle, trapezoid, simpson, three_eights]:
print(method.name())
res = method.calculate(f, a, b)
print(f"Результат вычисления: {res}")
print(f"Точное значение интеграла: {(integral_f(b) - integral_f(a))}")
print(f"Абсолютная фактическая погрешность: {abs((integral_f(b) - integral_f(a)) - res)}")
print("----------------------------------------------")
if __name__ == "__main__":
run()
| 3.84375 | 4 |
servo/stats/queries.py | ipwnosx/Servo | 0 | 12770438 | # -*- coding: utf-8 -*-
import decimal
from django.db import connection
class StatsManager:
def __init__(self):
self.cursor = connection.cursor()
def _result(self, args):
result = []
self.cursor.execute(self.sql, args)
for k, v in self.cursor.fetchall():
if isinstance(v, decimal.Decimal):
v = float(v)
result.append((k, v,))
return result
def cases_per_tech(self, location, queues, labels, start, end):
users = User.object.filter(location=location)
def statuses_per_location(self, timescale, location, status, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, se.triggered_at))*1000 as p,
COUNT(*) AS v
FROM servo_order so, servo_event se
WHERE (se.triggered_at, se.triggered_at) OVERLAPS (%s, %s)
AND se.action = 'set_status'
AND se.object_id = so.id
AND so.location_id = %s
AND se.description = %s
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, start, end, location, status])
def statuses_per_user(self, timescale, user, status, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, se.triggered_at))*1000 as p,
COUNT(*) AS v
FROM servo_order so, servo_event se
WHERE (se.triggered_at, se.triggered_at) OVERLAPS (%s, %s)
AND se.action = 'set_status'
AND se.object_id = so.id
AND so.user_id = %s
AND se.description = %s
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, start, end, user, status])
def sales_invoices(self, timescale, queue, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, so.created_at))*1000 as p,
SUM(total_gross) AS v
FROM servo_invoice si, servo_order so
WHERE (si.created_at, si.created_at) OVERLAPS (%s, %s)
AND si.order_id = so.id
AND so.queue_id = %s
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, start, end, queue])
def sales_purchases(self, timescale, queue, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, po.created_at))*1000 as p,
SUM(total) AS v
FROM servo_purchaseorder po, servo_order so
WHERE (po.created_at, po.created_at) OVERLAPS (%s, %s)
AND po.sales_order_id = so.id
AND so.queue_id = %s
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, start, end, queue])
def sales_parts_per_labtier(self, start, end):
self.sql = """SELECT labour_tier, count(*)
FROM servo_product p, servo_servicepart sp, servo_serviceorderitem soi
WHERE soi.product_id = p.id
AND sp.order_item_id = soi.id
AND (soi.created_at, soi.created_at) OVERLAPS (%s, %s)
AND char_length(labour_tier) = 4
GROUP BY labour_tier
ORDER BY labour_tier"""
return self._result([start, end])
def order_runrate(self, timescale, location, user, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, started_at))*1000 as p,
COUNT(*) AS v
FROM servo_order
WHERE user_id = %s
AND location_id = %s
AND (started_at, started_at) OVERLAPS (%s, %s)
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, user, location, start, end])
def turnaround_per_location(self, timescale, location, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, created_at))*1000 as p,
EXTRACT(HOUR FROM AVG(closed_at - created_at)) as v
FROM servo_order
WHERE closed_at IS NOT NULL
AND location_id = %s
AND queue_id IS NOT NULL
AND (created_at, created_at) OVERLAPS (%s, %s)
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, location, start, end])
def runrate_per_location(self, timescale, location, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, created_at))*1000 as p,
COUNT(*) AS v
FROM servo_order
WHERE location_id = %s
AND closed_at IS NOT NULL
AND (created_at, created_at) OVERLAPS (%s, %s)
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, location, start, end])
def distribution_per_location(self, start, end):
result = []
self.sql = """SELECT l.title, COUNT(*)
FROM servo_order o LEFT OUTER JOIN servo_location l on (o.location_id = l.id)
WHERE (o.created_at, o.created_at) OVERLAPS (%s, %s)
GROUP BY l.title"""
self.cursor.execute(self.sql, [start, end])
for k, v in self.cursor.fetchall():
result.append({'label': k, 'data': v})
return result
def orders_created_by(self, timescale, location, user, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, created_at))*1000 as p,
COUNT(*) AS v
FROM servo_order
WHERE location_id = %s
AND created_by_id = %s
AND (created_at, created_at) OVERLAPS (%s, %s)
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, location, user, start, end])
def orders_created_at(self, timescale, location, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, created_at))*1000 as p,
COUNT(*) AS v
FROM servo_order
WHERE location_id = %s
AND (created_at, created_at) OVERLAPS (%s, %s)
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, location, start, end])
def orders_closed_at(self, timescale, location, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, created_at))*1000 as p,
COUNT(*) AS v
FROM servo_order
WHERE location_id = %s
AND (closed_at, closed_at) OVERLAPS (%s, %s)
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, location, start, end])
def orders_closed_in(self, timescale, location, queue, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, created_at))*1000 as p,
COUNT(*) AS v
FROM servo_order
WHERE location_id = %s
AND queue_id = %s
AND (closed_at, closed_at) OVERLAPS (%s, %s)
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, location, queue, start, end])
def order_count(self, timescale, location, queue, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, created_at))*1000 as p,
COUNT(*) AS v
FROM servo_order
WHERE location_id = %s
AND queue_id = %s
AND (created_at, created_at) OVERLAPS (%s, %s)
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, location, queue, start, end])
def order_turnaround(self, timescale, location, queue, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, created_at))*1000 as p,
EXTRACT(HOUR FROM AVG(closed_at - created_at)) as v
FROM servo_order
WHERE closed_at IS NOT NULL
AND location_id = %s
AND queue_id = %s
AND queue_id IS NOT NULL
AND (created_at, created_at) OVERLAPS (%s, %s)
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, location, queue, start, end])
def order_turnaround(self, timescale, location, queue, start, end):
self.sql = """SELECT EXTRACT(EPOCH FROM date_trunc(%s, created_at))*1000 as p,
EXTRACT(HOUR FROM AVG(closed_at - created_at)) as v
FROM servo_order
WHERE closed_at IS NOT NULL
AND location_id = %s
AND queue_id = %s
AND queue_id IS NOT NULL
AND (created_at, created_at) OVERLAPS (%s, %s)
GROUP BY p
ORDER BY p ASC"""
return self._result([timescale, location, queue, start, end])
| 2.171875 | 2 |
src/spider/rssSpider.py | huobaolajiao/RsshubReader | 10 | 12770439 | # -*- coding: utf-8 -*-
"""
@author: wangyouqish
"""
import sys
sys.path.append("..")
import time,datetime
import pytz
import requests
import feedparser
import threading
import database.dbConn as dbConn
import log.logCenter as logCenter
def getFeedFromLink(url,name):
head = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36'}
try:
page = requests.get(url, headers=head,timeout=(10,10))
page.encoding = 'utf-8'
page_content = page.text
if(page.status_code==404 or page.status_code==403):
logger.error(name+" 404/403 failed "+url)
return None
except:
logger.error(name+" download failed "+url)
return None
rss = feedparser.parse(page_content)
return rss
def getRsshubFeed(router,name,recommendedServerID):
downloadedFlag=0
conn=dbConn.getConn()
cSer = conn.cursor()
cursorSer = cSer.execute("SELECT ID,Adress,FuncNum,FirstCheck,LastCheck from rsshubServers where ID={}".format(recommendedServerID))
rss=None
if(recommendedServerID!=0):
for row in cursorSer:
server=row[1]
url=server+router
rss = getFeedFromLink(url,name)
if(rss!=None):
downloadedFlag=1
cursorSer = cSer.execute("SELECT ID,Adress,FuncNum,FirstCheck,LastCheck from rsshubServers")#用所有服务器遍历
for row in cursorSer:
if(downloadedFlag==0):
server=row[1]
url=server+router
rss = getFeedFromLink(url,name)
if(rss!=None):
downloadedFlag=1
conn.close()
return rss
def saveOneNormalRss(url,name,unread):
conn=dbConn.getConn()
old=0
new=0
rss = getFeedFromLink(url,name)
if(rss==None):
return
for post in rss.entries:
title=post.title
summary=post.summary
link=post.link
try:
itemTimestamp=int(time.mktime(post.published_parsed))#第一类time是每一条新闻有发布时间,被feedparser捕获
except:
try:
gmtTime=rss.feed.updated#第二类time是GMT时间,从xml文件生成时间获取
local_tz = pytz.timezone('Asia/Shanghai')
utcDT=datetime.datetime.strptime(gmtTime,"%a, %d %b %Y %H:%M:%S GMT")
itemTimestamp=int(time.mktime(utcDT.replace(tzinfo=pytz.utc).astimezone(local_tz).timetuple()))
except:
try:
bjTime=post.published
bjT=time.strptime(bjTime,"%a,%d %b %Y %H:%M:%S +0800")
itemTimestamp=int(time.mktime(bjT))#仅为了适配cili001的时间格式
except:
itemTimestamp=int(time.time())
try:
conn.execute("INSERT INTO rssData (rssName,title,summary,timestamp,link) VALUES ('{}','{}','{}',{},'{}')".format(name,title,summary,itemTimestamp,link))
new=new+1
except:
old=old+1 #增加新闻失败就是老的新闻
try:
unread=unread+new
sqlupdate="UPDATE normalrsslinks set lastget ={} ,unread={} where name='{}'".format(str(int(time.time())),unread,name)#更新rss任务列表最后提交时间
conn.execute(sqlupdate)
conn.commit()
conn.close()
except:
logger.warning("sqlerror "+name)
logger.info(name+" add new:"+str(new)+" old:"+str(old))
def saveOneRsshub(router,name,recommendedServerID,unread):
conn=dbConn.getConn()
old=0
new=0
rss = getRsshubFeed(router,name,recommendedServerID)
if(rss==None):
logger.warning(name+" all download failed "+router)#全部下载失败,可能是网络不好或者router无效、故障
return
for post in rss.entries:
title=post.title
summary=post.summary
link=post.link
try:
itemTimestamp=int(time.mktime(post.published_parsed))
except:
try:
gmtTime=rss.feed.updated
local_tz = pytz.timezone('Asia/Shanghai')
utcDT=datetime.datetime.strptime(gmtTime,"%a, %d %b %Y %H:%M:%S GMT")
itemTimestamp=int(time.mktime(utcDT.replace(tzinfo=pytz.utc).astimezone(local_tz).timetuple()))
except:
itemTimestamp=int(time.time())
try:
conn.execute("INSERT INTO rssData (rssName,title,summary,timestamp,link) VALUES ('{}','{}','{}',{},'{}')".format(name,title,summary,itemTimestamp,link))
new=new+1
except:
old=old+1
try:
unread=unread+new
sqlupdate="UPDATE rsshubtasks set lastget ={} ,unread={} where name='{}'".format(str(int(time.time())),unread,name)
conn.execute(sqlupdate)
conn.commit()
conn.close()
except:
logger.warning("sqlerror "+name)
logger.info("rsshubtask "+name+" add new:"+str(new)+" old:"+str(old))
def updateAllTasksTitle():
conn=dbConn.getConn()
cTask = conn.cursor()
cursor = cTask.execute("SELECT link , name , round , lastget ,title from normalrsslinks where active=1")
for row in cursor:
rss=getFeedFromLink(row[0],row[1])
conn.execute("UPDATE normalrsslinks set title='{}' where name='{}'".format(rss.feed.title,row[1]))
cursor = cTask.execute("SELECT router , name , round , lastget , recommendedServerID, title from rsshubtasks where active=1")
for row in cursor:
rss=getRsshubFeed(row[0],row[1],row[4])
conn.execute("UPDATE rsshubtasks set title='{}' where name='{}'".format(rss.feed.title,row[1]))
conn.commit()
conn.close()
def getAllRssData():
conn=dbConn.getConn()
taskDoCount=0
taskWaitCount=0
conn=dbConn.getConn()
cTask = conn.cursor()
cursor = cTask.execute("SELECT link , name , round , lastget ,unread from normalrsslinks where active=1")
thread_list = []
for row in cursor:
if((int(time.time())-int(row[3]))>row[2]):
t= threading.Thread(target=saveOneNormalRss,args=(row[0],row[1],row[4]))
t.start()
logger.debug(row[1]+" "+row[0]+" start")
taskDoCount=taskDoCount+1
thread_list.append(t)
else:
logger.debug(row[1]+" less than round")
taskWaitCount=taskWaitCount+1
#以上为常规rss任务
cursor = cTask.execute("SELECT router , name , round , lastget , recommendedServerID ,unread from rsshubtasks where active=1")
for row in cursor:
if((int(time.time())-int(row[3]))>row[2]):
t= threading.Thread(target=saveOneRsshub,args=(row[0],row[1],row[4],row[5]))
t.start()
logger.debug("rsshub "+row[1]+" "+row[0]+" start")
taskDoCount=taskDoCount+1
thread_list.append(t)
else:
logger.debug("rsshub "+row[1]+" less than round")
taskWaitCount=taskWaitCount+1
for t in thread_list:
t.join()
logger.info(str(taskDoCount)+" tasks done "+str(taskWaitCount)+" tasks wait")
conn.commit()
conn.close()
#以上为rsshub的router任务
#以下main开始,被import的初始化在if main外
logger=logCenter.getLogger("rssSpider")
if __name__ == "__main__":
getAllRssData()
| 2.5625 | 3 |
collectd.py | allegro/collectd | 0 | 12770440 | import re
import time
import socket
import struct
import logging
import traceback
from functools import wraps
try:
from Queue import Queue, Empty # Python 2
except ImportError:
from queue import Queue, Empty # Python 3
from collections import defaultdict
from threading import RLock, Thread, Semaphore
__all__ = ["Connection", "start_threads"]
__version_info__ = (1, 0, 2, "final", 0)
__version__ = "{0}.{1}.{2}".format(*__version_info__)
logger = logging.getLogger("collectd")
SEND_INTERVAL = 10 # seconds
MAX_PACKET_SIZE = 1024 # bytes
PLUGIN_TYPE = "gauge"
TYPE_HOST = 0x0000
TYPE_TIME = 0x0001
TYPE_PLUGIN = 0x0002
TYPE_PLUGIN_INSTANCE = 0x0003
TYPE_TYPE = 0x0004
TYPE_TYPE_INSTANCE = 0x0005
TYPE_VALUES = 0x0006
TYPE_INTERVAL = 0x0007
LONG_INT_CODES = [TYPE_TIME, TYPE_INTERVAL]
STRING_CODES = [TYPE_HOST, TYPE_PLUGIN, TYPE_PLUGIN_INSTANCE, TYPE_TYPE, TYPE_TYPE_INSTANCE]
VALUE_COUNTER = 0
VALUE_GAUGE = 1
VALUE_DERIVE = 2
VALUE_ABSOLUTE = 3
VALUE_CODES = {
VALUE_COUNTER: "!Q",
VALUE_GAUGE: "<d",
VALUE_DERIVE: "!q",
VALUE_ABSOLUTE: "!Q"
}
def pack_numeric(type_code, number):
return struct.pack("!HHq", type_code, 12, number)
def pack_string(type_code, string):
return struct.pack("!HH", type_code, 5 + len(string)) + string + "\0"
def pack_value(name, value):
return "".join([
pack(TYPE_TYPE_INSTANCE, name),
struct.pack("!HHH", TYPE_VALUES, 15, 1),
struct.pack("<Bd", VALUE_GAUGE, value)
])
def pack(id, value):
if isinstance(id, basestring):
return pack_value(id, value)
elif id in LONG_INT_CODES:
return pack_numeric(id, value)
elif id in STRING_CODES:
return pack_string(id, value)
else:
raise AssertionError("invalid type code " + str(id))
def message_start(when=None, host=socket.gethostname(), plugin_inst="", plugin_name="any"):
return "".join([
pack(TYPE_HOST, host),
pack(TYPE_TIME, when or time.time()),
pack(TYPE_PLUGIN, plugin_name),
pack(TYPE_PLUGIN_INSTANCE, plugin_inst),
pack(TYPE_TYPE, PLUGIN_TYPE),
pack(TYPE_INTERVAL, SEND_INTERVAL)
])
def messages(counts, when=None, host=socket.gethostname(), plugin_inst="", plugin_name="any"):
packets = []
start = message_start(when, host, plugin_inst, plugin_name)
parts = [pack(name, count) for name,count in counts.items()]
parts = [p for p in parts if len(start) + len(p) <= MAX_PACKET_SIZE]
if parts:
curr, curr_len = [start], len(start)
for part in parts:
if curr_len + len(part) > MAX_PACKET_SIZE:
packets.append("".join(curr))
curr, curr_len = [start], len(start)
curr.append(part)
curr_len += len(part)
packets.append("".join(curr))
return packets
def sanitize(s):
return re.sub(r"[^a-zA-Z0-9]+", "_", s).strip("_")
def swallow_errors(func):
@wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
try:
logger.error("unexpected error", exc_info = True)
except:
pass
return wrapped
def synchronized(method):
@wraps(method)
def wrapped(self, *args, **kwargs):
with self._lock:
return method(self, *args, **kwargs)
return wrapped
class Counter(object):
def __init__(self, category):
self.category = category
self._lock = RLock()
self.counts = defaultdict(lambda: defaultdict(float))
@swallow_errors
@synchronized
def record(self, *args, **kwargs):
for specific in list(args) + [""]:
assert isinstance(specific, basestring)
for stat, value in kwargs.items():
assert isinstance(value, (int, float))
self.counts[str(specific)][str(stat)] += value
@swallow_errors
@synchronized
def set_exact(self, **kwargs):
for stat, value in kwargs.items():
assert isinstance(value, (int, float))
self.counts[""][str(stat)] = value
@synchronized
def snapshot(self):
totals = {}
for specific,counts in self.counts.items():
for stat in counts:
name_parts = map(sanitize, [self.category, specific, stat])
name = "-".join(name_parts).replace("--", "-")
totals[name] = counts[stat]
counts[stat] = 0.0
return totals
class Connection(object):
_lock = RLock() # class-level lock, only used for __new__
instances = {}
@synchronized
def __new__(cls, hostname = socket.gethostname(),
collectd_host = "localhost", collectd_port = 25826,
plugin_inst = "", plugin_name = "any"):
id = (hostname, collectd_host, collectd_port, plugin_inst, plugin_name)
if id in cls.instances:
return cls.instances[id]
else:
inst = object.__new__(cls)
cls.instances[id] = inst
return inst
def __init__(self, hostname = socket.gethostname(),
collectd_host = "localhost", collectd_port = 25826,
plugin_inst = "", plugin_name = "any"):
if "_counters" not in self.__dict__:
self._lock = RLock()
self._counters = {}
self._plugin_inst = plugin_inst
self._plugin_name = plugin_name
self._hostname = hostname
self._collectd_addr = (collectd_host, collectd_port)
@synchronized
def __getattr__(self, name):
if name.startswith("_"):
raise AttributeError("{0} object has no attribute {1!r}".format(self.__class__.__name__, name))
if name not in self._counters:
self._counters[name] = Counter(name)
return self._counters[name]
@synchronized
def _snapshot(self):
return [c.snapshot() for c in self._counters.values() if c.counts]
snaps = Queue()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def take_snapshots():
for conn in Connection.instances.values():
snapshots = conn._snapshot()
if snapshots:
stats = {}
for snapshot in snapshots:
stats.update(snapshot)
snaps.put([int(time.time()), stats, conn])
def send_stats(raise_on_empty = False):
try:
when, stats, conn = snaps.get(timeout = 0.1)
for message in messages(stats, when, conn._hostname, conn._plugin_inst, conn._plugin_name):
sock.sendto(message, conn._collectd_addr)
except Empty:
if raise_on_empty:
raise
def daemonize(func, sleep_for = 0):
@wraps(func)
def wrapped():
while True:
try:
func()
except:
try:
logger.error("unexpected error", exc_info = True)
except:
traceback.print_exc()
time.sleep(sleep_for)
t = Thread(target = wrapped)
t.daemon = True
t.start()
single_start = Semaphore()
def start_threads():
assert single_start.acquire(blocking = False)
daemonize(take_snapshots, sleep_for = SEND_INTERVAL)
daemonize(send_stats)
| 2.203125 | 2 |
tantum/model/vit_attention.py | dmitryshendryk/tantum | 0 | 12770441 |
import os
import cv2
import math
import random
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
import timm
class VIT_Attention(nn.Module):
def __init__(self,
arch_name,
pretrained=False,
img_size=256,
multi_drop=False,
multi_drop_rate=0.5,
att_layer=False,
att_pattern="A"
):
super().__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.att_layer = att_layer
self.multi_drop = multi_drop
self.model = timm.create_model(
arch_name, pretrained=pretrained
)
n_features = self.model.head.in_features
self.model.head = nn.Identity()
self.head = nn.Linear(n_features, 5)
self.head_drops = nn.ModuleList()
for i in range(5):
self.head_drops.append(nn.Dropout(multi_drop_rate))
if att_layer:
if att_pattern == "A":
self.att_layer = nn.Sequential(
nn.Linear(n_features, 256),
nn.Tanh(),
nn.Linear(256, 1),
)
elif att_pattern == "B":
self.att_layer = nn.Linear(n_features, 1)
else:
raise ValueError("invalid att pattern")
def forward(self, x):
if self.att_layer:
l = x.shape[2] // 2
h1 = self.model(x[:, :, :l, :l])
h2 = self.model(x[:, :, :l, l:])
h3 = self.model(x[:, :, l:, :l])
h4 = self.model(x[:, :, l:, l:])
w = F.softmax(torch.cat([
self.att_layer(h1),
self.att_layer(h2),
self.att_layer(h3),
self.att_layer(h4),
], dim=1), dim=1)
h = h1 * w[:, 0].unsqueeze(-1) + \
h2 * w[:, 1].unsqueeze(-1) + \
h3 * w[:, 2].unsqueeze(-1) + \
h4 * w[:, 3].unsqueeze(-1)
else:
h = self.model(x)
if self.multi_drop:
for i, dropout in enumerate(self.head_drops):
if i == 0:
output = self.head(dropout(h))
else:
output += self.head(dropout(h))
output /= len(self.head_drops)
else:
output = self.head(h)
return output
| 2.21875 | 2 |
pyecharts/charts/mixins.py | CharileWithZoe/pyecharts | 11,032 | 12770442 | from ..render import engine
class ChartMixin:
def add_js_funcs(self, *fns):
for fn in fns:
self.js_functions.add(fn)
return self
def load_javascript(self):
return engine.load_javascript(self)
class CompositeMixin(ChartMixin):
def __iter__(self):
for chart in self._charts:
yield chart
def __len__(self):
return len(self._charts)
| 2.0625 | 2 |
webtreasures/models/__init__.py | songzxDev/fortunecat | 0 | 12770443 | from webtreasures.models.fortunedrp import *
| 1.1875 | 1 |
src/backup.py | Kynda/Public-Utility-Scripts | 0 | 12770444 | #! /usr/bin/env python
from scp import SCPClient
import datetime
import paramiko
import argparse
import tarfile
import os
# Parse Command Line Arguments
description = '''
This script creates a tarball of a specified destination and copies it to a remote destination
using ssh on the destination. Requires configured ssh keys on remote host to work. Include
trailing '/' on directory paths!
'''
parser = argparse.ArgumentParser(description)
parser.add_argument('-s', required=True, dest='src', help="Path to directory to backup.")
parser.add_argument('-d', required=True, dest='dest', help="Path to directory to put backup. (Note previous backups in this destination will be deleted!)")
parser.add_argument('-r', required=True, dest='host', help="Remote host to put backup on." )
parser.add_argument('-u', required=True, dest='user', help="Remote host user name")
parser.add_argument('-p', required=False, dest='port', type=int, default=22, help="Remote host port")
parser.add_argument('-f', required=False, dest='file', help="Alternative prefix for resulting tar file.")
parser.add_argument('--remove', required=False, dest='rm', help="Attempt to unlink any existing backup files in destination.")
args = parser.parse_args()
# Get ymd
now = datetime.datetime.now();
ymd = str(now.year) + '-' + str( now.month) + '-' + str(now.day)
# Generate file name parts
pre = 'backup-'
ext = '.tgz'
if args.file:
pre = args.file
# Local tarball filename
tgz = '/tmp/'+pre+'-'+ymd+ext
# Create Archive
print 'Creating archive for ' + tgz
with tarfile.open( tgz, mode='w:gz') as tarball:
tarball.add( args.src, arcname=os.path.basename( args.src ) )
# Open SSH Client
print 'Opening SSH Connection'
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.connect( args.host, args.port, args.user )
# If RM remove pre-existing backup files
if args.rm:
print 'Removing files with pattern ' + args.dest + pre + '*' + ext
ssh.exec_command('rm ' + args.dest + pre + '*' + ext )
# SCP tarfile to Remote
print 'Copying tarball to remote destination'
scp = SCPClient( ssh.get_transport() )
scp.put( tgz, args.dest )
# Remove the local tarball
print 'Removing local copy of tarball'
os.unlink( tgz )
| 3.296875 | 3 |
model/encoder.py | ppujol76/-Pere_Transformers | 0 | 12770445 | from torch.functional import Tensor
import torchvision.models as models
import torch.nn as nn
class Encoder_VGG16(nn.Module):
def __init__(self):
super(Encoder_VGG16, self).__init__()
pretrained_model = models.vgg16(pretrained=True)
self.conv_base = pretrained_model.features
# Freeze All layers as they will be used for inference
for param in self.conv_base.parameters():
param.requires_grad = False
# Flaten layer that flatten the dimensions 2 and 3 (H and W of the feature maps respectively)
self.flat = nn.Flatten(2,3)
def forward(self, x):
# For an image size of (224x224) --> x dims (batch_size, 3, 244 , 244)
features = self.conv_base(x)
# For an image size of (224x224) --> features dims (batch_size, feat_maps=512, H=7 , W=7)
features = self.flat(features)
# For an image size of (224x224) --> features dims (batch_size, 512, 7x7=49)
return features
class Encoder_ResNet50(nn.Module):
def __init__(self):
super(Encoder_ResNet50, self).__init__()
pretrained_model = models.resnet50(pretrained=True)
modules = list(pretrained_model.children())[:-2]
self.conv_base = nn.Sequential(*modules)
# Freeze All layers as they will be used for inference
for param in self.conv_base.parameters():
param.requires_grad = False
# Flaten layer that flatten the dimensions 2 and 3 (H and W of the feature maps respectively)
self.flat = nn.Flatten(2,3)
def forward(self, x):
# For an image size of (224x224) --> x dims (batch_size, 3, H=224 , W=224)
features = self.conv_base(x)
# For an image size of (224x224) --> features dims (batch_size, feat_maps=2048, H=7 , W=7)
features = self.flat(features)
# For an image size of (224x224) --> features dims (batch_size, feat_maps=2048, 7x7=49)
return features
class Encoder_DenseNet(nn.Module):
def __init__(self):
super(Encoder_DenseNet, self).__init__()
pretrained_model = models.densenet161(pretrained=True)
self.conv_base = pretrained_model.features
# Freeze All layers as they will be used for inference
for param in self.conv_base.parameters():
param.requires_grad = False
# Flaten layer that flatten the dimensions 2 and 3 (H and W of the feature maps respectively)
self.flat = nn.Flatten(2,3)
# We apply here a ReLU
self.relu = nn.ReLU()
def forward(self, x):
# (batch_size, feat_maps=512, H=7 , W=7) or (batch_size, 512, 14 , 14)
features = self.conv_base(x)
# (batch_size, 512, 7x7=49) or (batch_size, 512, 14x14=196)
features = self.flat(features)
# (batch_size, 49, 512) or (batch_size, 196, 512)
return self.relu(features) | 2.640625 | 3 |
sdk/python/kfp/_credentials.py | Strasser-Pablo/pipelines | 1 | 12770446 | <reponame>Strasser-Pablo/pipelines<gh_stars>1-10
# Copyright 2021 Arrikto Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from kubernetes.client.configuration import Configuration
__all__ = [
"TokenCredentialsBase",
]
class TokenCredentialsBase(abc.ABC):
def refresh_api_key_hook(self, config: Configuration):
"""Refresh the api key.
This is a helper function for registering token refresh with swagger
generated clients.
Args:
config (kubernetes.client.configuration.Configuration):
The configuration object that the client uses.
The Configuration object of the kubernetes client's is the same
with kfp_server_api.configuration.Configuration.
"""
config.api_key["authorization"] = self.get_token()
@abc.abstractmethod
def get_token(self):
raise NotImplementedError()
| 2.328125 | 2 |
maze.py | ChaitanyaBhat/Maze-PyGame | 0 | 12770447 | <reponame>ChaitanyaBhat/Maze-PyGame
import pygame
import time
from pygame.locals import *
pygame.init()
class Player(pygame.sprite.Sprite):
def __init__(self,*kwargs):
super(Player,self).__init__(*kwargs)
# self.surf = pygame.Surface((10,10))
# self.surf.fill((0,0,255))
self.surf = pygame.image.load('bird.png')
# self.surf.set_colorkey((255,255,255), RLEACCEL)
self.rect = self.surf.get_rect(topleft = (2*15+2,6*15+2))
def update(self,pressedKey):
if pressedKey[K_a] or pressedKey[K_LEFT]:
self.rect.move_ip(-15,0)
elif pressedKey[K_d] or pressedKey[K_RIGHT]:
self.rect.move_ip(15,0)
elif pressedKey[K_w] or pressedKey[K_UP]:
self.rect.move_ip(0,-15)
elif pressedKey[K_s] or pressedKey[K_DOWN]:
self.rect.move_ip(0,15)
# collision detection
for wall in walls:
if self.rect.colliderect(wall.rect):
if pressedKey[K_a] or pressedKey[K_LEFT]:
self.rect.x += 15
self.rect.y += 0
# self.rect = self.surf.get_rect().move(20,100)
# self.rect.move_ip(-15,0)
elif pressedKey[K_d] or pressedKey[K_RIGHT]:
self.rect.move_ip(-15,0)
elif pressedKey[K_w] or pressedKey[K_UP]:
self.rect.move_ip(0,15)
elif pressedKey[K_s] or pressedKey[K_DOWN]:
self.rect.move_ip(0,-15)
class Board():
def __init__(self):
self.layout = [[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,1,0,0,0,0,0,1],
[1,1,1,0,1,1,1,0,0,0,1,0,0,0,1],
[1,0,1,0,1,0,1,1,1,0,1,0,1,0,1],
[1,0,0,0,0,0,1,0,1,0,0,0,1,0,1],
[1,0,1,1,1,0,0,0,1,0,1,1,1,0,1],
[1,0,0,0,1,0,1,0,1,1,1,0,0,0,1],
[1,0,0,0,0,0,1,0,0,0,0,0,0,0,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]]
class Wall(object):
def __init__(self,pos):
walls.append(self)
self.rect = pygame.Rect(pos[0],pos[1],15,15)
class Target():
def __init__(self):
# self.rect = pygame.Rect(9*15+2,5*15,10,10)
self.surf = pygame.image.load('birdnest1.png')
# self.surf.set_colorkey((255,255,255), RLEACCEL)
self.rect = self.surf.get_rect(topleft = (9*15,5*15))
def message(msg,color,screen):
fontStyle = pygame.font.SysFont(None,30,bold=1,italic=1)
msg =fontStyle.render(msg,True,color)
screen.blit(msg,(40,40))
player = Player()
board = Board()
target = Target()
walls =[]
# sending walls position to Wall class
posX = posY = 0
for row in board.layout:
for column in row:
if column == 1:
Wall((posX,posY))
# pygame.draw.rect(screen,brown,(posX,posY,15,15))
posX += 15
posX = 0
posY += 15
def gameLoop():
width = 225
height = 135
screen = pygame.display.set_mode((width,height))
pygame.display.set_caption("Resolve Me")
brown =(100,20,20)
clock = pygame.time.Clock()
running = True
while running:
for event in pygame.event.get():
if event.type == QUIT:
running = False
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
running = False
screen.fill((0,255,255))
for wall in walls:
pygame.draw.rect(screen,brown,wall.rect)
screen.blit(player.surf,player.rect)
# pygame.draw.rect(screen,(0,0,255),player.rect)
screen.blit(target.surf,target.rect)
# pygame.draw.rect(screen,(255,0,0),target.rect)
pressedKey = pygame.key.get_pressed()
player.update(pressedKey)
# end of game when the target reached
if player.rect.colliderect(target.rect):
message("Game Over",(255,155,50),screen)
running = False
pygame.display.update()
clock.tick(10)
gameLoop()
time.sleep(1)
pygame.quit()
quit() | 3.375 | 3 |
meteor_aggregations_main.py | rahulthewaffle/aim-expedia-takehome | 0 | 12770448 | <reponame>rahulthewaffle/aim-expedia-takehome<filename>meteor_aggregations_main.py
import sys
import os
import argparse
from meteor_aggs_utils import setup_logger
from meteor_aggs import final_aggregations
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--bucket', action='store', type=str, dest='bucket')
parser.add_argument('--logdir', action='store', type=str, dest='logdir')
args = parser.parse_args()
bucket = args.bucket if args.bucket else 'majorly-meteoric'
logdir = args.logdir if args.logdir else os.path.join(os.getcwd(), 'tmp', 'logs')
logger = setup_logger(logdir)
logger.info(f'{sys.argv[0]} starting up, arguments: Bucket = {bucket}, logdir = {logdir}.')
avg_mass, max_years = final_aggregations(bucket, logger)
avg_string = 'Average meteorite mass is ' + avg_mass + '.'
logger.info(avg_string)
print(avg_string)
max_string = 'The year(s) with the highest meteorite count(s) is ' + max_years + '.'
logger.info(max_string)
print(max_string) | 2.3125 | 2 |
assets/blender/delete_selected_recursive.py | revyTH/ludik | 0 | 12770449 | # Deletes selected objects recursively from the object hierarchy
import bpy
obj = bpy.context.object
stack = [obj]
while len(stack) > 0:
tmp = stack.pop()
if hasattr(tmp, "children"):
for child in tmp.children:
child.select = True
stack.append(child)
bpy.ops.object.delete() | 3.0625 | 3 |
attre2vec/aggregators/base.py | attre2vec/attre2vec | 0 | 12770450 | """Implementations of edge walk aggregators."""
import abc
import torch
from torch import nn
class BaseAggregator(abc.ABC, nn.Module):
"""Base class for edge walk aggregators."""
def __init__(self):
"""Inits BaseAggregator."""
super().__init__()
self._device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu'
)
@abc.abstractmethod
def aggregate(
self,
edge_features: torch.Tensor,
nodes_features: torch.Tensor,
) -> torch.Tensor:
"""Aggregates single edge walk into feature vector."""
pass
| 2.734375 | 3 |