hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6fd08494a3ca1279fc553e7ba2f5e4e6e8104fd1
| 8,400
|
py
|
Python
|
metrics/get_predictions_metrics.py
|
sparks-baird/RoboCrab
|
9b7577911e42c87b911ebb4d9d3e691a58c63cbe
|
[
"MIT"
] | 44
|
2020-02-20T09:56:39.000Z
|
2022-03-09T08:49:31.000Z
|
metrics/get_predictions_metrics.py
|
sparks-baird/RoboCrab
|
9b7577911e42c87b911ebb4d9d3e691a58c63cbe
|
[
"MIT"
] | 21
|
2020-02-24T14:30:05.000Z
|
2022-03-26T19:38:42.000Z
|
metrics/get_predictions_metrics.py
|
sparks-baird/RoboCrab
|
9b7577911e42c87b911ebb4d9d3e691a58c63cbe
|
[
"MIT"
] | 13
|
2020-12-15T13:53:12.000Z
|
2022-03-31T12:25:26.000Z
|
import pandas as pd
import os
from utils.composition import _fractional_composition
def norm_form(formula):
comp = _fractional_composition(formula)
form = ''
for key, value in comp.items():
form += f'{key}{str(value)[0:9]}'
return form
def count_elems(string):
count = 0
switch = 1
for c in string:
if c.isalpha():
count += switch
switch = 0
if c.isnumeric():
switch = 1
return count
# %%
if __name__ == '__main__':
print('processing all model predictions and calculating metrics')
print('this will take a few minutes...')
# %%
results_path = 'publication_predictions'
benchmark_path = 'data/benchmark_data'
test_directories = os.listdir(results_path)
benchmark_props = os.listdir(benchmark_path)
benchmark_test_directories = [test for test in test_directories if "benchmark" in test]
dataset_results = {}
dataset_preds = {}
dataset_acts = {}
test_maes = pd.DataFrame()
df_stats = pd.DataFrame()
for benchmark in benchmark_props:
df_compositions = pd.DataFrame()
df_preds = pd.DataFrame()
df_acts = pd.DataFrame()
models = []
for directory in benchmark_test_directories:
df_train_orig = pd.read_csv(f'{benchmark_path}/{benchmark}/train.csv',
keep_default_na=False, na_values=[''])
df_val = pd.read_csv(f'{benchmark_path}/{benchmark}/val.csv',
keep_default_na=False, na_values=[''])
df_train = pd.concat([df_train_orig, df_val], ignore_index=True)
df_train['formula'] = [norm_form(formula) for formula in df_train['formula']]
df_train.index = df_train['formula']
files = os.listdir(f'{results_path}\{directory}')
file = [file for file in files if benchmark in file and 'test' in file]
if len(file) > 0:
models.append(directory.split('_')[0])
file = file[0]
df = pd.read_csv(f'{results_path}\{directory}\{file}',
keep_default_na=False, na_values=[''])
composition = df['formula']
pred = df['predicted']
act = df['actual']
print(f'processing {benchmark} {models[-1]}')
df_compositions = pd.concat([df_compositions, composition], axis=1)
df_preds = pd.concat([df_preds, pred], axis=1)
df_acts = pd.concat([df_acts, act], axis=1)
n_total = act.count() + df_val.shape[0] + df_train_orig.shape[0]
df_stats.at[benchmark, 'mean_test'] = act.mean()
df_stats.at[benchmark, 'std_test'] = act.std()
df_stats.at[benchmark, 'n_test'] = act.count()
df_stats.at[benchmark, 'mean_train'] = df_train['target'].mean()
df_stats.at[benchmark, 'std_train'] = df_train['target'].std()
df_stats.at[benchmark, 'n_train'] = df_train_orig.shape[0]
df_stats.at[benchmark, 'n_val'] = df_val.shape[0]
df_stats.at[benchmark, 'n_total'] = n_total
df_stats.at[benchmark, 'prop_train'] = df_train_orig.shape[0] / n_total
df_stats.at[benchmark, 'prop_val'] = df_val.shape[0] / n_total
df_stats.at[benchmark, 'prop_test'] = act.count() / n_total
df_compositions.columns = models
df_preds.columns = models
df_acts.columns = models
df_diff = df_preds - df_acts
df_mae = df_diff.abs().mean()
test_maes[benchmark] = df_mae
dataset_results[benchmark] = df_compositions
dataset_preds[benchmark] = df_preds
dataset_acts[benchmark] = df_acts
maes = test_maes.T
model_names = ['roost', 'mat2vec', 'onehot', 'elemnet', 'rf']
out_1 = maes[model_names]
out = pd.concat([out_1, df_stats], axis=1)
df_benchmark = out.copy()
# %%
results_path = 'publication_predictions'
matbench_path = 'data/matbench_cv'
test_directories = os.listdir(results_path)
matbench_props = os.listdir(matbench_path)
matbench_test_directories = [test for test in test_directories if "matbench" in test]
dataset_results = {}
dataset_preds = {}
dataset_acts = {}
test_maes = pd.DataFrame()
df_stats = pd.DataFrame()
for matbench in matbench_props:
df_compositions = pd.DataFrame()
df_preds = pd.DataFrame()
df_acts = pd.DataFrame()
models = []
for directory in matbench_test_directories:
train_files = os.listdir(f'{matbench_path}/{matbench}')
train_files = [file for file in train_files if 'train' in file]
test_files = os.listdir(f'{results_path}/{directory}')
test_files = [file for file in test_files if matbench in file and 'test' in file]
for i, (train_file, test_file) in enumerate(zip(train_files, test_files)):
df_train_orig = pd.read_csv(f'{matbench_path}/{matbench}/{train_file}',
keep_default_na=False, na_values=[''])
df_val = pd.read_csv(f'{matbench_path}/{matbench}/{train_file.replace("train", "val")}',
keep_default_na=False, na_values=[''])
df_train = pd.concat([df_train_orig, df_val], ignore_index=True)
df_train['formula'] = [norm_form(formula) for formula in df_train['formula']]
df_train.index = df_train['formula']
if len(file) > 0:
models.append(directory.split('_')[0]+f'_{i}')
file = file[0]
df = pd.read_csv(f'{results_path}\{directory}\{test_file}',
keep_default_na=False, na_values=[''])
df.index = df['formula'].values
composition = df['formula']
pred = df['predicted']
act = df['actual']
print(f'processing {matbench} {models[-1]}')
df_compositions = pd.concat([df_compositions, composition], axis=1)
df_preds = pd.concat([df_preds, pred], axis=1)
df_acts = pd.concat([df_acts, act], axis=1)
n_total = act.count() + df_val.shape[0] + df_train_orig.shape[0]
df_stats.at[matbench, 'mean_test'] = act.mean()
df_stats.at[matbench, 'std_test'] = act.std()
df_stats.at[matbench, 'n_test'] = act.count()
df_stats.at[matbench, 'mean_train'] = df_train['target'].mean()
df_stats.at[matbench, 'std_train'] = df_train['target'].std()
df_stats.at[matbench, 'n_train'] = df_train_orig.shape[0]
df_stats.at[matbench, 'n_val'] = df_val.shape[0]
df_stats.at[matbench, 'n_total'] = n_total
df_stats.at[matbench, 'prop_train'] = df_train_orig.shape[0] / n_total
df_stats.at[matbench, 'prop_val'] = df_val.shape[0] / n_total
df_stats.at[matbench, 'prop_test'] = act.count() / n_total
df_compositions.columns = models
df_preds.columns = models
df_acts.columns = models
df_diff = df_preds - df_acts
df_mae_cv = df_diff.abs()
df_mae = pd.DataFrame()
model_names = []
_ = [model_names.append(x[:-2]) for x in models if x[:-2] not in model_names]
for j, model in enumerate(model_names):
df_mae.loc[model, 0] = df_mae_cv.iloc[:, (j)*5:(j+1)*5].max(axis=1).mean()
test_maes[matbench] = df_mae[0]
dataset_results[matbench] = df_compositions
dataset_preds[matbench] = df_preds
dataset_acts[matbench] = df_acts
maes = test_maes.T
model_names = ['roost', 'mat2vec', 'onehot', 'elemnet', 'rf']
out_1 = maes[model_names]
out = pd.concat([out_1, df_stats], axis=1)
df_matbench = out.copy()
# %%
df_all = pd.concat([df_benchmark, df_matbench], axis=0, ignore_index=False)
rename_dict = {'mat2vec': 'crabnet', 'onehot': 'hotcrab'}
df_all = df_all.rename(columns=rename_dict)
df_all.to_csv('metrics/all_metrics.csv', index_label='property')
| 43.076923
| 104
| 0.577738
|
2f12f4e95050f417c0829adab8a631326ae7c192
| 8,720
|
py
|
Python
|
Utils/download_packs_and_docker_images.py
|
mazmat-panw/content
|
024a65c1dea2548e2637a9cbbe54966e9e34a722
|
[
"MIT"
] | 1
|
2022-03-15T13:16:24.000Z
|
2022-03-15T13:16:24.000Z
|
Utils/download_packs_and_docker_images.py
|
mazmat-panw/content
|
024a65c1dea2548e2637a9cbbe54966e9e34a722
|
[
"MIT"
] | 87
|
2022-02-23T12:10:53.000Z
|
2022-03-31T11:29:05.000Z
|
Utils/download_packs_and_docker_images.py
|
henry-sue-pa/content
|
043c6badfb4f9c80673cad9242fdea72efe301f7
|
[
"MIT"
] | null | null | null |
# Perquisites to run this script:
#
# 1. Python 3.8+
# 2. requests python lib should be installed (can be installed by running "pip install requests" or "pip3 install requests")
# 3. docker is installed (if docker is not install, you can skip docker download using the `-sd` option)
# 4. docker python is installed (install it by running "pip install docker" or "pip3 install docker" or use the `-sd` option)
import argparse
import os
import tempfile
from zipfile import ZIP_DEFLATED, ZipFile
from pathlib import Path
import requests
ID_SET_URL = "https://storage.googleapis.com/marketplace-dist/content/id_set.json"
BUCKET_PACKS_URL = "https://marketplace-dist.storage.googleapis.com/content/packs"
def load_bucket_id_set(verify_ssl: bool) -> dict:
""" Loads the bucket id_set.json"""
r = requests.request(method='GET', url=ID_SET_URL, verify=verify_ssl)
return r.json()
def create_content_item_id_set(id_set_list: list) -> dict:
""" Given an id_set.json content item list, creates a dictionary representation"""
res = dict()
for item in id_set_list:
for key, val in item.items():
res[key] = val
return res
def zip_folder(source_path, output_path):
""" Zips the folder and its containing files"""
with ZipFile(output_path + '.zip', 'w', ZIP_DEFLATED) as source_zip:
for root, dirs, files in os.walk(source_path, topdown=True):
for f in files:
full_file_path = os.path.join(root, f)
source_zip.write(filename=full_file_path, arcname=f)
def get_docker_images_with_tag(pack_names: dict, id_set_json: dict) -> set:
""" Given a pack name returns its docker images with its latest tag"""
print('Starting to collect docker images')
integration_names_id_set = create_content_item_id_set(id_set_json['integrations'])
script_names_id_set = create_content_item_id_set(id_set_json['scripts'])
docker_images = set()
for pack_d_name, pack_name in pack_names.items():
if pack_name not in id_set_json['Packs']:
print(f"\tPack {pack_d_name} was not found in id_set.json.")
continue
content_items = id_set_json['Packs'][pack_name]['ContentItems']
integrations = content_items['integrations'] if 'integrations' in content_items else []
scripts = content_items['scripts'] if 'scripts' in content_items else []
if integrations:
print(f"\t{pack_d_name} docker images found for integrations:")
for integration in integrations:
if 'docker_image' in integration_names_id_set[integration]:
docker_image = integration_names_id_set[integration]['docker_image']
print(f"\t\t{docker_image} - used by {integration}")
docker_images.add(docker_image)
if scripts:
print(f"\t{pack_d_name} docker images found for scripts:")
for script in scripts:
if 'docker_image' in script_names_id_set[script]:
docker_image = script_names_id_set[script]['docker_image']
print(f"\t\t{docker_image} - used by {script}")
docker_images.add(docker_image)
return docker_images
def get_pack_names(pack_display_names: list, id_set_json: dict) -> dict:
""" Given pack_display_names try and parse it into a pack name as appears in content repo"""
pack_names = dict()
if 'Packs' not in id_set_json:
raise ValueError('Packs is missing from id_set.json.')
d_names_id_set = dict()
# create display name id_set.json
for pack_name, pack_value in id_set_json['Packs'].items():
d_names_id_set[pack_value['name']] = pack_name
# create result given display name id_set.json
for d_name in pack_display_names:
if d_name not in d_names_id_set:
print(f"Couldn't find pack {d_name}. Skipping pack.")
continue
pack_names[d_name] = d_names_id_set[d_name]
return pack_names
def download_and_save_packs(pack_names: dict, id_set_json: dict, output_path: str, verify_ssl: bool) -> None:
""" Download and save packs under """
if 'Packs' not in id_set_json:
raise ValueError('Packs missing from id_set.json.')
id_set_packs = id_set_json['Packs']
print("Starting to download packs")
temp_dir = tempfile.TemporaryDirectory()
try:
for pack_d_name, pack_name in pack_names.items():
if pack_name not in id_set_packs:
print(f"\tCouldn't find {pack_d_name} in id_set.json. Skipping pack download.")
continue
pack_version = id_set_packs[pack_name]['current_version']
print(f"\tDownloading {pack_d_name} Pack")
r = requests.request(method='GET',
url=f'{BUCKET_PACKS_URL}/{pack_name}/{pack_version}/{pack_name}.zip',
verify=verify_ssl)
with open(os.path.join(temp_dir.name, pack_name + '.zip'), 'wb') as f:
f.write(r.content)
zip_folder(temp_dir.name, output_path)
finally:
temp_dir.cleanup()
def download_and_save_docker_images(docker_images: set, output_path: str) -> None:
""" Downloads and saves the docker images into docker.zip in output_path"""
import docker # import docker only when required
print("Starting to download docker images for given packs")
cli = docker.from_env(timeout=120)
temp_dir = tempfile.TemporaryDirectory()
try:
for image in docker_images:
print(f"\tDownloading docker image: {image}")
image_pair = image.split(':')
image_data = cli.images.pull(image_pair[0], image_pair[1])
image_file_name = os.path.join(temp_dir.name, os.path.basename(f"{image_pair[0]}_{image_pair[1]}.tar"))
with open(image_file_name, 'wb') as f:
for chunk in image_data.save(named=True):
f.write(chunk)
zip_folder(temp_dir.name, output_path)
finally:
temp_dir.cleanup()
print("Finished docker images download")
def options_handler():
"""Validates and parses script arguments.
Returns:
Namespace: Parsed arguments object.
"""
parser = argparse.ArgumentParser(description="Downloads XSOAR packs as zip and their latest docker images as tar.")
parser.add_argument('-p', '--packs',
help="A list of pack names as they appear in https://xsoar.pan.dev/marketplaceEither provided "
"via a path to a file that contains the packs list (separated by new lines) or "
"a string of comma separated packs (e.g. Base,AutoFocus)",
required=True)
parser.add_argument('-o', '--output_path',
help="The path where the files will be saved to.",
required=False, default=".")
parser.add_argument('-sp', '--skip_packs',
help="Don't download packs.",
required=False, action='store_true')
parser.add_argument('-sd', '--skip_docker',
help="Don't download docker images.",
required=False, action='store_true')
parser.add_argument('--insecure',
help="Skip certificate validation.", dest='feature', action='store_true')
parser.set_defaults(skip_packs=False, skip_docker=False, insecure=False)
return parser.parse_args()
def main():
options = options_handler()
output_path = options.output_path
packs = options.packs
if os.path.isfile(packs):
pack_display_names = []
with open(packs) as file:
for line in file:
pack_display_names.append(line.rstrip())
else:
pack_display_names = packs.split(',')
verify_ssl = not options.insecure
id_set_json = load_bucket_id_set(verify_ssl)
pack_names = get_pack_names(pack_display_names, id_set_json)
Path(output_path).mkdir(parents=True, exist_ok=True)
if not options.skip_packs and pack_names:
download_and_save_packs(pack_names, id_set_json, os.path.join(output_path, 'packs'), verify_ssl)
else:
print('Skipping packs.zip creation')
if pack_names:
docker_images = get_docker_images_with_tag(pack_names, id_set_json)
if not options.skip_docker:
download_and_save_docker_images(docker_images, os.path.join(output_path, 'docker'))
else:
print('Skipping dockers.zip creation')
else:
print('Skpping docker images collection since no packs were found')
if __name__ == '__main__':
main()
| 43.6
| 125
| 0.652638
|
e24849c1589ecafda5e401d1c491fc6ee9506089
| 11,840
|
py
|
Python
|
examples/Nolan/AFRL/Hipersonica/hyper23.py
|
Rapid-Design-of-Systems-Laboratory/beluga-legacy
|
d14713d8211b64293c4427005cf02fbd58630598
|
[
"MIT"
] | 1
|
2019-03-26T03:00:03.000Z
|
2019-03-26T03:00:03.000Z
|
examples/Nolan/AFRL/Hipersonica/hyper23.py
|
Rapid-Design-of-Systems-Laboratory/beluga-legacy
|
d14713d8211b64293c4427005cf02fbd58630598
|
[
"MIT"
] | null | null | null |
examples/Nolan/AFRL/Hipersonica/hyper23.py
|
Rapid-Design-of-Systems-Laboratory/beluga-legacy
|
d14713d8211b64293c4427005cf02fbd58630598
|
[
"MIT"
] | 1
|
2019-07-14T22:53:52.000Z
|
2019-07-14T22:53:52.000Z
|
if __name__ == "__main__":
import numpy as np
import beluga.Beluga as Beluga
import beluga.bvpsol as bvpsol
import beluga.bvpsol.algorithms as algorithms
import beluga.optim.Problem
from beluga.optim.problem import *
from beluga.continuation import *
import logging
# Import Libraries for Matrix Calculations
from sympy import symbols, Matrix, Transpose, simplify, diff
from sympy import sin
from sympy import cos, acos
from sympy import sqrt
from sympy import exp
from sympy import atan
from numpy import pi
writeEqn = True
simpList = False
if writeEqn:
writeList = []
# Constants
rho0, h_ref, A_ref, r_e, mass, mu = symbols('rho0, h_ref, A_ref, r_e, mass, mu')
sig_h, sig_t, sig_v, sig_g, sig_r, sig_b, Dt = symbols('sig_h, sig_t, sig_v, sig_g, sig_r, sig_b Dt')
theta_b = symbols('theta_b')
# Primary States
h, theta, v, gam = symbols('h, theta, v, gam')
# Control
a_trig, a_max, u, u_max = symbols('a_trig, a_max, u, u_max')
# alpha = a_max*sin(a_trig)
alpha = symbols('alpha')
# Secondary States
rho = rho0 * exp(-h/h_ref)
Cl = 1.5658*alpha*180/pi + -0.00000
Cd = 1.6537*(alpha*180/pi)**2 + 0.0612
D = 0.5*rho*v**2*Cd*A_ref
L = 0.5*rho*v**2*Cl*A_ref
r = r_e + h
# Primary State Rates
h_dot = v*sin(gam)
theta_dot = v*cos(gam)/r
v_dot = -D/mass - mu*sin(gam)/r**2
gam_dot = L/(mass*v) + (v/r - mu/(v*r**2))*cos(gam)
alpha_dot = u_max*sin(u)
writeList = [h_dot, theta_dot, v_dot, gam_dot, alpha_dot]
# Covariance Calculations
p11, p12, p13, p14, \
p22, p23, p24, \
p33, p34, \
p44 \
= symbols('p11 p12 p13 p14 \
p22 p23 p24 \
p33 p34 \
p44')
P = Matrix([[p11, p12, p13, p14],
[p12, p22, p23, p24],
[p13, p23, p33, p34],
[p14, p24, p34, p44]])
F = Matrix([[diff(h_dot, h), diff(theta_dot, h), diff(v_dot, h), diff(gam_dot, h)],
[diff(h_dot, theta), diff(theta_dot, theta), diff(v_dot, theta), diff(gam_dot, theta)],
[diff(h_dot, v), diff(theta_dot, v), diff(v_dot, v), diff(gam_dot, v)],
[diff(h_dot, gam), diff(theta_dot, gam), diff(v_dot, gam), diff(gam_dot, gam)]]).T
G = Matrix([[0, 0],
[0, 0],
[1, 0],
[0, 1]])
theta_r = theta - theta_b
Rho = sqrt(
r_e ** 2 + r ** 2 - 2 * r * r_e * cos(theta - theta_b)) # sqrt(2*r_e*(r_e + h)*(1 - cos(theta_r)) + h**2)
H = Matrix([[diff(Rho, h), diff(Rho, theta), diff(Rho, v), diff(Rho, gam)]])
Q = Dt * Matrix([[sig_v ** 2, 0],
[0, sig_g ** 2]])
R = Dt * Matrix([[sig_r ** 2]])
P_dot = (F*P + P*F.T - P*H.T*(R**-1)*H*P + G*Q*G.T)
Dim = P_dot.shape
for i in range(0, Dim[0]):
for j in range(i, Dim[1]):
# print(P_dot[i, j])
writeList.append(P_dot[i, j])
# h_new, theta_new, v_new, gam_new = symbols('h_new, theta_new, v_new, gam_new')
# h_scale, theta_scale, v_scale, gam_scale = symbols('h_scale, theta_scale, v_scale, gam_scale')
states = [h, theta, v, gam, a_trig,
p11, p12, p13, p14,
p22, p23, p24,
p33, p34,
p44]
h_s, theta_s, v_s, gam_s, \
p11_s, p12_s, p13_s, p14_s, \
p22_s, p23_s, p24_s, \
p33_s, p34_s, \
p44_s = \
symbols('h_s, theta_s, v_s, gam_s, \
p11_s, p12_s, p13_s, p14_s, \
p22_s, p23_s, p24_s, \
p33_s, p34_s, \
p44_s')
scales = [h_s, theta_s, v_s, gam_s, 1,
p11_s, p12_s, p13_s, p14_s,
p22_s, p23_s, p24_s,
p33_s, p34_s,
p44_s]
h_n, theta_n, v_n, gam_n, \
p11_n, p12_n, p13_n, p14_n, \
p22_n, p23_n, p24_n, \
p33_n, p34_n, \
p44_n = \
symbols('h_n, theta_n, v_n, gam_n, \
p11_n, p12_n, p13_n, p14_n, \
p22_n, p23_n, p24_n, \
p33_n, p34_n, \
p44_n')
states_new = [h_n, theta_n, v_n, gam_n, a_trig,
p11_n, p12_n, p13_n, p14_n,
p22_n, p23_n, p24_n,
p33_n, p34_n,
p44_n]
# print(writeList)
Z1 = zip(writeList, scales)
scaledList = []
for item, Scale in Z1:
# print(item)
item = item/Scale
Z2 = zip(states, states_new, scales)
# print(item)
# for state, new, scale in Z2:
# print(state)
# print(new)
# print(scale)
for state, new, scale in Z2:
# print(new)
item = item.subs(state, scale*new)
# print(item)
scaledList.append(item)
k = 1
with open("eqns.txt", "w") as my_file:
for item in scaledList:
if simpList:
# print('* ' + str(item))
item = simplify(item)
# print('# ' + str(item))
my_file.write(str(item) + "\n")
# print(" Wrote " + str(k) + "/" + str(len(scaledList)))
k += 1
k = 1
alfa = symbols('alpha')
with open("eqnsUnscaled.txt", "w") as my_file:
for item in writeList:
item = item.subs(a_max*sin(a_trig),alfa)
my_file.write(str(item) + "\n")
# print(" Wrote " + str(k) + "/" + str(len(writeList)))
k += 1
''' Start Optimal Control Calculations '''
# Read Covariance State Rates from File
with open("eqns.txt", "r") as f:
eqnsList = list(f)
# for item in P_dot_eqns:
# print(item)
# Rename this and/or move to optim package?
problem = beluga.optim.Problem('hyperKalman7')
# problem = beluga.optim.Problem()
# Define independent variables
problem.independent('t', 's')
# rho = 'rho0*exp(-h/H)'
# Cl = '(1.5658*alfa + -0.0000)'
# Cd = '(1.6537*alfa^2 + 0.0612)'
# Cl = 'CLfunctio0n(alfa)'
# Cd = 'CDfunction(alfa)'
# D = '(0.5*'+rho+'*v^2*'+Cd+'*Aref)'
# L = '(0.5*'+rho+'*v^2*'+Cl+'*Aref)'
# r = '(re+h)'
# Define equations of motion
problem.state('h_n', eqnsList[0]+'+ ep/h_s*cos(u)', 'm') \
.state('theta_n', eqnsList[1], 'rad') \
.state('v_n', eqnsList[2], 'm/s') \
.state('gam_n', eqnsList[3], 'rad') \
.state('alpha', eqnsList[4], 'rad') \
.state('p11_n', eqnsList[5], 'm**2') \
.state('p12_n', eqnsList[6], 'm') \
.state('p13_n', eqnsList[7], 'm**2/s') \
.state('p14_n', eqnsList[8], 'm') \
.state('p22_n', eqnsList[9], 'rad')\
.state('p23_n', eqnsList[10], 'rad*m/s') \
.state('p24_n', eqnsList[11], 'rad**2') \
.state('p33_n', eqnsList[12], 'm**2/s**2') \
.state('p34_n', eqnsList[13], 'm/s') \
.state('p44_n', eqnsList[14], 'rad**2') \
# Define controls
problem.control('u', 'rad')
# Define costs
# problem.cost['path'] = Expression('p11', 'm^2/s^2')
# problem.cost['path'] = Expression('0.001', 's')
problem.cost['terminal'] = Expression('p22_n', '1')
# Define constraints
problem.constraints() \
.initial('h_n-h_n_0', 'm') \
.initial('theta_n-theta_n_0', 'rad') \
.initial('v_n-v_n_0', 'm/s') \
.initial('gam_n-gam_n_0', 'rad') \
.initial('p11_n-p11_n_0', 'm**2') \
.initial('p12_n-p12_n_0', 'm') \
.initial('p13_n-p13_n_0', 'm**2/s') \
.initial('p14_n-p14_n_0', 'm') \
.initial('p22_n-p22_n_0', 'rad^2') \
.initial('p23_n-p23_n_0', 'rad*m/s') \
.initial('p24_n-p24_n_0', 'rad**2') \
.initial('p33_n-p33_n_0', 'm**2/s**2') \
.initial('p34_n-p34_n_0', 'm/s') \
.initial('p44_n-p44_n_0', 'rad**2') \
.terminal('h_n-h_n_f', 'm') \
.terminal('theta_n-theta_n_f', 'rad')
# Define constants
problem.constant('mu', 3.986e5*1e9, 'm^3/s^2') # Gravitational parameter, m^3/s^2
problem.constant('rho0', 1.2, 'kg/m^3') # Sea-level atmospheric density, kg/m^3
problem.constant('h_ref', 7500, 'm') # Scale height for atmosphere of Earth, m
problem.constant('mass', 750/2.2046226, 'kg') # Mass of vehicle, kg
problem.constant('r_e', 6378000, 'm') # Radius of planet, m
problem.constant('A_ref', pi*(24*.0254/2)**2, 'm^2') # Reference area of vehicle, m^2
problem.constant('rn', 1/12*0.3048, 'm') # Nose radius, m
problem.constant('Dt', 0.1, 's') # time step
problem.constant('sig_v', 10.0, 'm/s**2') # var in v
problem.constant('sig_g', 0.1*pi/180, 'rad/s') # var in gam
problem.constant('sig_r', 100.0, 'm') # var in range
problem.constant('theta_b', -2*pi/180, 'rad') # location of kalmanBeacon
problem.constant('a_max', 10.0*pi/180, 'rad')
problem.constant('u_max', 0.25*pi/180, 'rad/s')
problem.constant('h_s', 1000, 'rad')
problem.constant('theta_s', 1, 'rad')
problem.constant('v_s', 100, 'rad')
problem.constant('gam_s', 1, 'rad')
problem.constant('p11_s', 1e5, 'rad')
problem.constant('p12_s', 1e-2, 'rad')
problem.constant('p13_s', 1e2, 'rad')
problem.constant('p14_s', 1e-1, 'rad')
problem.constant('p22_s', 1e-10, 'rad')
problem.constant('p23_s', 1e-5, 'rad')
problem.constant('p24_s', 1e-8, 'rad')
problem.constant('p33_s', 1e1, 'rad')
problem.constant('p34_s', 1e-3, 'rad')
problem.constant('p44_s', 1e-6, 'rad')
problem.constant('ep', 40, 'rad')
problem.bvp_solver = algorithms.MultipleShooting(derivative_method='fd', tolerance=1e-4, max_iterations=1000, verbose=True, cached=False, number_arcs=16)
# problem.bvp_solver = algorithms.SingleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=1000, verbose=True, cached=False)
problem.scale.unit('m', 1) \
.unit('s', 1) \
.unit('kg', 1) \
.unit('rad', 1)
# Define quantity (not implemented at present)
# Is this actually an Expression rather than a Value?
# problem.quantity = [Value('tanAng','tan(theta)')]
problem.guess.setup('auto', start=[80, 0, 50, -89*pi/180, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], costate_guess=[0, 0, 0, 0, 0.0001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], time_integrate=2.5) # costate_guess=[0, 0, 0, 0, 0.01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# problem.guess.setup('auto',start=[80000,3.38575809e-21,5000,7.98617365e-02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],direction='forward',time_integrate=229.865209,costate_guess =[-1.37514494e+01,3.80852584e+06,-3.26290152e+03,-2.31984720e-14,0.00,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01])
# Figure out nicer way of representing this. Done?
problem.steps.add_step().num_cases(6) \
.terminal('theta_n', 1*pi/180) \
.terminal('h_n', 0)
# problem.steps.add_step().num_cases(15) \
# .terminal('theta_n', 5)
# problem.steps.add_step().num_cases(21) \
# .terminal('theta', 10*pi/180)
Beluga.run(problem, display_level=logging.DEBUG)
| 35.987842
| 288
| 0.509459
|
7a63af95b623349d41866798a766b2bfb8754e26
| 5,297
|
py
|
Python
|
src/esapy/api_growi.py
|
KosukeMizuno/esapy
|
9b4955f3e50a83f8336a5d155cd47c27c3a135ae
|
[
"MIT"
] | 6
|
2020-02-25T00:53:09.000Z
|
2021-07-21T16:49:41.000Z
|
src/esapy/api_growi.py
|
KosukeMizuno/esapy
|
9b4955f3e50a83f8336a5d155cd47c27c3a135ae
|
[
"MIT"
] | 28
|
2020-02-24T10:48:35.000Z
|
2021-07-25T18:01:00.000Z
|
src/esapy/api_growi.py
|
KosukeMizuno/esapy
|
9b4955f3e50a83f8336a5d155cd47c27c3a135ae
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
from pathlib import Path
import mimetypes
import requests
import json
import uuid
from urllib.parse import quote
import hashlib
# logger
from logging import getLogger
logger = getLogger(__name__)
from .loadrc import KEY_GROWI_USERNAME
def _set_proxy(proxy):
if proxy is None:
logger.debug('No proxy is addressed.')
return
logger.info('Addressed proxy: %s' % proxy)
os.environ['HTTP_PROXY'] = proxy
logger.debug('HTTP_PROXY={:s}'.format(os.environ['HTTP_PROXY']))
os.environ['HTTPS_PROXY'] = proxy
logger.debug('HTTPS_PROXY={:s}'.format(os.environ['HTTPS_PROXY']))
def _get_growi_username():
return os.environ[KEY_GROWI_USERNAME]
def get_team_stats(token=None, url=None, proxy=None):
logger.info('Getting healthcheck of growi')
_set_proxy(proxy)
# get metadata
res = requests.get(url + '/_api/v3/statistics/user',
params=dict(access_token=token))
logger.debug(res)
logger.debug(res.headers)
if res.status_code == 200:
pass
else:
raise RuntimeError('Getting team statistics failed.')
print(res.json())
return res.json()
def upload_binary(filename, token=None, url=None, proxy=None):
path_bin = Path(filename)
logger.info('Uploading binary data, path=%s' % str(path_bin))
logger.info(' filesize: %d' % path_bin.stat().st_size)
_set_proxy(proxy)
page_id = get_post_by_path('/user/' + _get_growi_username(), token, url, proxy)['_id']
# upload file
logger.info('Posting binary...{:}'.format(path_bin.name))
with path_bin.open('rb') as imgfile:
res = requests.post(url + '/_api/attachments.add',
data=dict(page_id=page_id,
access_token=token),
files=dict(file=(path_bin.name,
imgfile,
mimetypes.guess_type(path_bin)[0]))
)
logger.debug(res.headers)
if res.status_code != 200:
logger.warning('Upload failed, %s' % str(path_bin))
raise RuntimeError('Upload failed.')
image_url = res.json()['attachment']['filePathProxied']
logger.info('The file has been uploaded successfully, url: %s' % image_url)
return image_url, res
def get_post(page_id, token=None, url=None, proxy=None):
logger.info('Getting post/{:}'.format(page_id))
# post
_set_proxy(proxy)
payload = {'access_token': token,
'page_id': page_id}
res = requests.get(url + '/_api/pages.get',
params=payload)
logger.debug(res)
if res.status_code != 200:
logger.warning('Getting post failed.')
raise RuntimeError('Getting post failed.')
d = res.json()
logger.debug(d)
return d['page']
def get_post_by_path(pagepath, token=None, url=None, proxy=None):
logger.info('Getting post/{:}'.format(pagepath))
_set_proxy(proxy)
payload = {'access_token': token,
'path': pagepath}
res = requests.get(url + '/_api/pages.get',
params=payload)
logger.debug(res)
logger.debug(res.headers)
# logger.debug(res.text)
if res.status_code != 200:
logger.warning('Getting post failed.')
raise RuntimeError('Getting post failed.')
d = res.json()
logger.debug(d)
return d['page']
def create_post(body_md, token=None, url=None, name=None, proxy=None):
logger.info('Creating new post')
# post
_set_proxy(proxy)
if name is None:
raise RuntimeError('`name` is required.')
payload = {'access_token': token,
'body': body_md,
'path': '/user/' + _get_growi_username() + '/' + name}
res = requests.post(url + '/_api/v3/pages/',
data=payload)
logger.debug(res)
logger.debug(res.headers)
if res.status_code == 409:
raise RuntimeError('Page path is already existed. Retry with different name.')
elif res.status_code != 201:
raise RuntimeError('Create post failed.')
logger.info('New post was successfully created.')
d = res.json()
logger.debug(d['data']['page'])
pageurl = url + '/' + d['data']['page']['path']
logger.info('URL of the created post: %s' % pageurl)
return pageurl, res
def patch_post(page_id, body_md, name, token=None, url=None, proxy=None):
logger.info('Updating post: {:}'.format(page_id))
page_dat = get_post(page_id, token, url, proxy)
# post
_set_proxy(proxy)
payload = {'body': body_md,
'page_id': page_id,
'revision_id': page_dat['revision']['_id']
}
logger.debug(payload)
res = requests.post(url + f'/_api/pages.update?access_token={quote(token)}',
data=payload,
)
logger.debug(res)
if res.status_code != 200:
raise RuntimeError('Create post failed.')
d = res.json()
logger.debug(d)
logger.debug(d['page'])
pageurl = url + '/' + d['page']['path']
logger.info('New post was successfully created.')
logger.info('URL of the created post: %s' % pageurl)
return pageurl, res
| 28.175532
| 90
| 0.603738
|
342f41edd126c7ad0c1dd0e2d5eb76844d9467b7
| 1,430
|
py
|
Python
|
FeatureAssembler/fstrategies/ASTEmbedding/tool_io/readAndCompareResults.py
|
sed-inf-u-szeged/DeepWaterFramework
|
30e9c0145300e24834a3d4a7dde25002c638dd5a
|
[
"Apache-2.0"
] | 6
|
2020-06-18T22:38:09.000Z
|
2022-01-13T14:32:56.000Z
|
FeatureAssembler/fstrategies/ASTEmbedding/tool_io/readAndCompareResults.py
|
sed-inf-u-szeged/DeepWaterFramework
|
30e9c0145300e24834a3d4a7dde25002c638dd5a
|
[
"Apache-2.0"
] | 18
|
2020-01-13T08:32:53.000Z
|
2021-07-23T08:26:02.000Z
|
FeatureAssembler/fstrategies/ASTEmbedding/tool_io/readAndCompareResults.py
|
sed-inf-u-szeged/DeepWaterFramework
|
30e9c0145300e24834a3d4a7dde25002c638dd5a
|
[
"Apache-2.0"
] | 2
|
2020-06-18T09:50:59.000Z
|
2021-03-18T04:52:53.000Z
|
import csv
import sys
import json
def readDbhCsv(directory):
retData = dict()
filename=directory+"/dbh.csv"
with open(filename, mode='r', encoding='utf-8' ) as resultFile:
reader=csv.reader(resultFile, dialect='excel', delimiter = ";")
data=list(reader)
for i in range(len(data)):
retData[data[i][3]]=json.loads(data[i][10].replace('\'','"'))
return retData
def compare(ref, data):
if (data>ref):
return "**"+'{:03.2f}'.format(data)+"** (+"+'{:2.0f}'.format(100*data-100*ref) +")"
elif (data<ref):
return "<u>"+'{:03.2f}'.format(data)+"</u> (-"+'{:2.0f}'.format(100*ref-100*data) +")"
return '{:03.2f}'.format(data)
def compareData(refData, data):
for i in data:
print ("|" + i
+ "|" + compare(refData[i]["precision"], data[i]["precision"])
+ "|" + compare(refData[i]["recall"], data[i]["recall"])
+ "|" + compare(refData[i]["accuracy"], data[i]["accuracy"])
+ "|" +compare(refData[i]["fmes"], data[i]["fmes"])
+ "|" + compare(refData[i]["completeness"], data[i]["completeness"]) + "|")
if len(sys.argv) > 2:
refData=readDbhCsv(sys.argv[1])
data=readDbhCsv(sys.argv[2])
#print(refData)
print("|Algorithm | Precision | Recall | Accuracy | Fmes | Completness |")
print("| --------- | --------- | ------ | -------- | ---- | ----------- |")
compareData(refData, data)
| 32.5
| 94
| 0.537762
|
c3994e857ad786403ca45e6ddd0bf13f31707920
| 3,970
|
py
|
Python
|
dcodex_lectionary/migrations/0030_auto_20201119_2131.py
|
rbturnbull/dcodex_lectionary
|
9a4787eb353d09fef023cd82af8859a7ee041aee
|
[
"Apache-2.0"
] | null | null | null |
dcodex_lectionary/migrations/0030_auto_20201119_2131.py
|
rbturnbull/dcodex_lectionary
|
9a4787eb353d09fef023cd82af8859a7ee041aee
|
[
"Apache-2.0"
] | 2
|
2021-08-09T01:11:59.000Z
|
2021-08-09T01:12:49.000Z
|
dcodex_lectionary/migrations/0030_auto_20201119_2131.py
|
rbturnbull/dcodex_lectionary
|
9a4787eb353d09fef023cd82af8859a7ee041aee
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.0.11 on 2020-11-19 10:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('dcodex_lectionary', '0029_auto_20201116_2119'),
]
operations = [
migrations.CreateModel(
name='LectionaryDay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=255)),
('polymorphic_ctype', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_dcodex_lectionary.lectionaryday_set+', to='contenttypes.ContentType')),
],
options={
'abstract': False,
'base_manager_name': 'objects',
},
),
migrations.CreateModel(
name='EothinaDay',
fields=[
('lectionaryday_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='dcodex_lectionary.LectionaryDay')),
('rank', models.IntegerField()),
],
options={
'abstract': False,
'base_manager_name': 'objects',
},
bases=('dcodex_lectionary.lectionaryday',),
),
migrations.CreateModel(
name='FixedDay',
fields=[
('lectionaryday_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='dcodex_lectionary.LectionaryDay')),
('date', models.DateField(blank=True, default=None, null=True)),
],
options={
'ordering': ('date',),
},
bases=('dcodex_lectionary.lectionaryday',),
),
migrations.CreateModel(
name='MiscDay',
fields=[
('lectionaryday_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='dcodex_lectionary.LectionaryDay')),
],
options={
'abstract': False,
'base_manager_name': 'objects',
},
bases=('dcodex_lectionary.lectionaryday',),
),
migrations.CreateModel(
name='MovableDay',
fields=[
('lectionaryday_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='dcodex_lectionary.LectionaryDay')),
('day_of_week', models.IntegerField(choices=[(0, 'Sunday'), (1, 'Monday'), (2, 'Tuesday'), (3, 'Wednesday'), (4, 'Thursday'), (5, 'Friday'), (6, 'Saturday')])),
('period', models.CharField(choices=[('E', 'Easter'), ('P', 'Pentecost'), ('F', 'Feast of the Cross'), ('L', 'Lent'), ('G', 'Great Week'), ('T', 'Epiphany')], max_length=1)),
('week', models.CharField(max_length=31)),
('weekday_number', models.CharField(max_length=32)),
('earliest_date', models.CharField(max_length=15)),
('latest_date', models.CharField(max_length=15)),
],
options={
'abstract': False,
'base_manager_name': 'objects',
},
bases=('dcodex_lectionary.lectionaryday',),
),
migrations.AddField(
model_name='lectioninsystem',
name='lectionary_day',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='dcodex_lectionary.LectionaryDay'),
),
]
| 47.261905
| 225
| 0.58262
|
a15539defe06aaaee59a56bdf879cfcb36c49150
| 2,371
|
py
|
Python
|
07-the-sum-of-its-parts.py
|
gosiqueira/advent-of-code-2018
|
04e784afadd71adcce7c33c6e07ed116b4d0a350
|
[
"MIT"
] | null | null | null |
07-the-sum-of-its-parts.py
|
gosiqueira/advent-of-code-2018
|
04e784afadd71adcce7c33c6e07ed116b4d0a350
|
[
"MIT"
] | null | null | null |
07-the-sum-of-its-parts.py
|
gosiqueira/advent-of-code-2018
|
04e784afadd71adcce7c33c6e07ed116b4d0a350
|
[
"MIT"
] | null | null | null |
"""
Day 07 - The Sum of Its Parts
---
source: https://adventofcode.com/2018/day/7
"""
import argparse
import string
import time
from collections import deque
from utils import get_input
def the_sum_of_its_parts(verbose=False):
instructions = get_input(day=7)
tasks = {}
for ch in string.ascii_uppercase:
tasks[ch] = set()
for inst in instructions:
tasks[inst[36]].add(inst[5])
schedule = ''
while len(schedule) < 26:
for t, dep in tasks.items():
if t in schedule: continue
if dep.issubset(set(schedule)):
schedule += t
break
if verbose:
print(f'The order the tasks should be: {schedule}')
total_time = 0
workers = [{'remaining': 0, 'task': None} for _ in range(5)]
queue = deque()
done, doing = set(), set()
while len(done) < 26:
for k, v in tasks.items():
if k in done or k in doing or k in queue:
continue
if v.issubset(done):
queue.append(k)
while queue:
avail_worker = None
for i in range(5):
if workers[i]['task'] is None:
avail_worker = i
break
if avail_worker is None:
break
task = queue.popleft()
print(f'Task taken: {task}')
workers[avail_worker]['task'] = task
workers[avail_worker]['remaining'] = ord(task) - 4
for i in range(5):
if workers[i]['task']:
workers[i]['remaining'] -= 1
if workers[i]['remaining'] == 0:
task = workers[i]['task']
done.add(task)
doing.remove(task)
workers[i]['task'] = None
total_time += 1
print(total_time, workers)
if verbose:
print(f'Time taken to complete all the steps: {total_time}')
return schedule, total_time
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Advent of Code 2018 -- Day 07')
parser.add_argument('--verbose', dest='verbose', action='store_true')
args = parser.parse_args()
tic = time.time()
response = the_sum_of_its_parts(args.verbose)
tac = time.time()
print(*response)
print(f'{tac - tic:.3f} s')
| 25.771739
| 81
| 0.537748
|
762639efb4dcf533016583515f36a965fdd98f1a
| 533
|
py
|
Python
|
djenealog/tables.py
|
nim65s/djenealog
|
3666cd65d22e118294f477b41f8e56b04b56a52d
|
[
"BSD-2-Clause"
] | null | null | null |
djenealog/tables.py
|
nim65s/djenealog
|
3666cd65d22e118294f477b41f8e56b04b56a52d
|
[
"BSD-2-Clause"
] | 2
|
2018-04-25T11:54:34.000Z
|
2020-03-09T07:11:35.000Z
|
djenealog/tables.py
|
nim65s/djenealog
|
3666cd65d22e118294f477b41f8e56b04b56a52d
|
[
"BSD-2-Clause"
] | null | null | null |
import django_tables2 as tables
from . import models
class IndividuTable(tables.Table):
edit = tables.Column(accessor="get_link", orderable=False)
class Meta:
model = models.Individu
fields = ("prenom", "nom", "masculin", "parents", "edit")
def render_masculin(self, value):
return "♂" if value else "♀"
class CoupleTable(tables.Table):
edit = tables.Column(accessor="get_link", orderable=False)
class Meta:
model = models.Couple
fields = ("mari", "femme", "edit")
| 23.173913
| 65
| 0.643527
|
ae244ebc1fd2cdd1307f87bf93cc983ef58bf53a
| 10,060
|
py
|
Python
|
pybind/slxos/v16r_1_00b/routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/af_ipv4_uc_and_vrf_cmds_call_point_holder/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/af_ipv4_uc_and_vrf_cmds_call_point_holder/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/af_ipv4_uc_and_vrf_cmds_call_point_holder/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import redistribute
class af_ipv4_uc_and_vrf_cmds_call_point_holder(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /routing-system/router/router-bgp/address-family/ipv4/ipv4-unicast/af-vrf/af-ipv4-uc-and-vrf-cmds-call-point-holder. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__bgp_redistribute_internal','__redistribute',)
_yang_name = 'af-ipv4-uc-and-vrf-cmds-call-point-holder'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__redistribute = YANGDynClass(base=redistribute.redistribute, is_container='container', presence=False, yang_name="redistribute", rest_name="redistribute", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Redistribute information from another routing protocol', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
self.__bgp_redistribute_internal = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="bgp-redistribute-internal", rest_name="bgp-redistribute-internal", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Allow redistribution of iBGP routes into IGPs'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'routing-system', u'router', u'router-bgp', u'address-family', u'ipv4', u'ipv4-unicast', u'af-vrf', u'af-ipv4-uc-and-vrf-cmds-call-point-holder']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'router', u'bgp', u'address-family', u'ipv4', u'unicast', u'vrf']
def _get_bgp_redistribute_internal(self):
"""
Getter method for bgp_redistribute_internal, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/af_ipv4_uc_and_vrf_cmds_call_point_holder/bgp_redistribute_internal (empty)
"""
return self.__bgp_redistribute_internal
def _set_bgp_redistribute_internal(self, v, load=False):
"""
Setter method for bgp_redistribute_internal, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/af_ipv4_uc_and_vrf_cmds_call_point_holder/bgp_redistribute_internal (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_bgp_redistribute_internal is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bgp_redistribute_internal() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="bgp-redistribute-internal", rest_name="bgp-redistribute-internal", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Allow redistribution of iBGP routes into IGPs'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bgp_redistribute_internal must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="bgp-redistribute-internal", rest_name="bgp-redistribute-internal", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Allow redistribution of iBGP routes into IGPs'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)""",
})
self.__bgp_redistribute_internal = t
if hasattr(self, '_set'):
self._set()
def _unset_bgp_redistribute_internal(self):
self.__bgp_redistribute_internal = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="bgp-redistribute-internal", rest_name="bgp-redistribute-internal", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Allow redistribution of iBGP routes into IGPs'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
def _get_redistribute(self):
"""
Getter method for redistribute, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/af_ipv4_uc_and_vrf_cmds_call_point_holder/redistribute (container)
"""
return self.__redistribute
def _set_redistribute(self, v, load=False):
"""
Setter method for redistribute, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/af_ipv4_uc_and_vrf_cmds_call_point_holder/redistribute (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_redistribute is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_redistribute() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=redistribute.redistribute, is_container='container', presence=False, yang_name="redistribute", rest_name="redistribute", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Redistribute information from another routing protocol', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """redistribute must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=redistribute.redistribute, is_container='container', presence=False, yang_name="redistribute", rest_name="redistribute", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Redistribute information from another routing protocol', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""",
})
self.__redistribute = t
if hasattr(self, '_set'):
self._set()
def _unset_redistribute(self):
self.__redistribute = YANGDynClass(base=redistribute.redistribute, is_container='container', presence=False, yang_name="redistribute", rest_name="redistribute", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Redistribute information from another routing protocol', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
bgp_redistribute_internal = __builtin__.property(_get_bgp_redistribute_internal, _set_bgp_redistribute_internal)
redistribute = __builtin__.property(_get_redistribute, _set_redistribute)
_pyangbind_elements = {'bgp_redistribute_internal': bgp_redistribute_internal, 'redistribute': redistribute, }
| 63.27044
| 544
| 0.74672
|
5fa4c9a3b44213887af00e87cbfca8002d360f0d
| 3,408
|
py
|
Python
|
iommi/page__tests.py
|
yanhuixie/iommi
|
b6ae39c270b10b553a7ef2ab52e8c1c96489db0b
|
[
"BSD-3-Clause"
] | 192
|
2020-01-30T14:29:56.000Z
|
2022-03-28T19:55:30.000Z
|
iommi/page__tests.py
|
yanhuixie/iommi
|
b6ae39c270b10b553a7ef2ab52e8c1c96489db0b
|
[
"BSD-3-Clause"
] | 105
|
2020-03-29T21:59:01.000Z
|
2022-03-24T12:29:09.000Z
|
iommi/page__tests.py
|
yanhuixie/iommi
|
b6ae39c270b10b553a7ef2ab52e8c1c96489db0b
|
[
"BSD-3-Clause"
] | 28
|
2020-02-02T20:51:09.000Z
|
2022-03-08T16:23:42.000Z
|
from platform import python_implementation
import pytest
from django.test import override_settings
from iommi import (
Fragment,
html,
Page,
)
from iommi._web_compat import (
Template,
)
from iommi.member import _force_bind_all
from iommi.part import as_html
from tests.helpers import (
prettify,
req,
user_req,
)
def test_simple_page():
class MyPage(Page):
footer = html.div(
html.hr(),
)
my_page = MyPage()
my_page.bind(request=req('GET')).render_to_response()
my_page.bind(request=req('GET')).render_to_response()
def test_page_constructor():
class MyPage(Page):
h1 = html.h1()
my_page = MyPage(parts__foo=html.div(_name='foo'), parts__bar=html.div()).refine_done()
assert list(my_page.iommi_namespace.parts.keys()) == ['h1', 'foo', 'bar']
my_page = my_page.bind(request=None)
assert list(my_page.parts.keys()) == ['h1', 'foo', 'bar']
@pytest.mark.skipif(python_implementation() == 'PyPy', reason='Intermittently fails on pypy for unknown reasons.')
@override_settings(
MIDDLEWARE_CLASSES=[],
)
def test_page_render():
# Working around some weird issue with pypy3+django3.0
from django.conf import settings
settings.DEBUG = False
# end workaround
class MyPage(Page):
header = html.h1('Foo')
body = html.div('bar bar')
my_page = MyPage(parts__footer=html.div('footer'))
my_page = my_page.bind(request=user_req('get'))
response = my_page.render_to_response()
expected_html = '''
<!DOCTYPE html>
<html>
<head>
<title></title>
</head>
<body>
<h1> Foo </h1>
<div> bar bar </div>
<div> footer </div>
</body>
</html>
'''
prettified_expected = prettify(expected_html)
prettified_actual = prettify(response.content)
assert prettified_expected == prettified_actual
def test_promote_str_to_fragment_for_page():
class MyPage(Page):
foo = 'asd'
page = MyPage().refine_done()
assert isinstance(page.iommi_namespace.parts.foo, Fragment)
def test_as_html_integer():
assert as_html(part=123, context={}) == '123'
def test_page_context():
class MyPage(Page):
part1 = Template('Template: {{foo}}\n')
part2 = html.div(template=Template('Template2: {{foo}}'))
class Meta:
context__foo = 'foo'
assert MyPage().bind(request=req('get')).__html__() == 'Template: foo\nTemplate2: foo'
def test_invalid_context_specified():
class Nested(Page):
class Meta:
context__foo = 1
class Root(Page):
nested = Nested()
with pytest.raises(AssertionError) as e:
root = Root().bind(request=None)
_force_bind_all(root.parts)
assert str(e.value) == 'The context property is only valid on the root page'
def test_as_view():
view = Page(parts__foo='##foo##').as_view()
assert '##foo##' in view(req('get')).content.decode()
def test_title_basic():
assert '<h1>Foo</h1>' == Page(title='foo').bind(request=req('get')).__html__()
def test_title_empty():
assert '' in Page().bind(request=req('get')).__html__()
def test_title_attr():
assert '<h1 class="foo">Foo</h1>' == Page(title='foo', h_tag__attrs__class__foo=True).bind(request=req('get')).__html__()
| 24.875912
| 125
| 0.631455
|
b34a7a2b6f42413f938aa27d7894a8c779efa992
| 339
|
py
|
Python
|
csv_manager/views.py
|
Apfirebolt/CSV-File-Manager-in-Django
|
e708038c1f9951ec593b37bee5dd329268643af0
|
[
"MIT"
] | null | null | null |
csv_manager/views.py
|
Apfirebolt/CSV-File-Manager-in-Django
|
e708038c1f9951ec593b37bee5dd329268643af0
|
[
"MIT"
] | null | null | null |
csv_manager/views.py
|
Apfirebolt/CSV-File-Manager-in-Django
|
e708038c1f9951ec593b37bee5dd329268643af0
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
def handler404(request, exception):
return render(request, '404.html')
def handler403(request, exception):
return render(request, '403.html')
def handler500(request, exception):
return render(request, '500.html')
def handler400(request, exception):
return render(request, '400.html')
| 19.941176
| 37
| 0.740413
|
902da8cbd33808618399125bb013b3cfef957b80
| 4,479
|
py
|
Python
|
src/sss/genkey.py
|
foundriesio/plug-and-trust-ssscli
|
f77c65d5b3de649d7db1c023ee41d871f77cd224
|
[
"Apache-2.0"
] | null | null | null |
src/sss/genkey.py
|
foundriesio/plug-and-trust-ssscli
|
f77c65d5b3de649d7db1c023ee41d871f77cd224
|
[
"Apache-2.0"
] | null | null | null |
src/sss/genkey.py
|
foundriesio/plug-and-trust-ssscli
|
f77c65d5b3de649d7db1c023ee41d871f77cd224
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2018-2020 NXP
# SPDX-License-Identifier: Apache-2.0
#
#
"""License text"""
import logging
from . import sss_api as apis
from .keystore import KeyStore
from .keyobject import KeyObject
from .getkey import Get
from .util import get_ecc_cypher_type
log = logging.getLogger(__name__)
class Generate:
"""
Generate key pair/public key of ecc/rsa
"""
def __init__(self, session_obj):
"""
Constructor
:param session_obj: Instance of session
"""
self._session = session_obj
self._ctx_ks = KeyStore(self._session)
self._ctx_key = KeyObject(self._ctx_ks)
self.key_obj_mode = apis.kKeyObject_Mode_Persistent
def gen_ecc_public(self, key_id, curve_type, file_name, policy, encode_format=""): # pylint: disable=too-many-arguments
"""
Generate ecc public key
:param key_id: Key index
:param curve_type: ECC curve type
:param file_name: File name to store public key
:param policy: Policy to be applied
:param encode_format: File format to store public key
:return: Status
"""
if file_name[-4:] != '.pem' and file_name[-4:] != '.der':
log.error("Unsupported file type. File type should be in pem or der format")
return apis.kStatus_SSS_Fail
status = self.gen_ecc_pair(key_id, curve_type, policy)
if status != apis.kStatus_SSS_Success:
return status
get = Get(self._session)
status = get.get_key(key_id, file_name, encode_format)
return status
def gen_ecc_pair(self, key_id, curve_type, policy):
"""
Generate ecc key pair
:param key_id: Key index
:param curve_type: ECC curve type
:param policy: Policy to be applied
:return: Status
"""
cypher_type, key_size = get_ecc_cypher_type(curve_type)
key_type = apis.kSSS_KeyPart_Pair
if key_size == 0:
log.error("curve type not supported")
return apis.kStatus_SSS_Fail
status = self._gen_key_pair(key_id, key_size, key_type, cypher_type, policy)
return status
def gen_rsa_public(self, key_id, key_size, file_name, policy):
"""
Generate rsa public key
:param key_id: Key index
:param key_size: Key size to generate
:param file_name: File name to store public key
:param policy: Policy to be applied
:return: Status
"""
if file_name[-4:] != '.pem' and file_name[-4:] != '.der':
log.error("Unsupported file type. File type should be in pem or der format")
return apis.kStatus_SSS_Fail
status = self.gen_rsa_pair(key_id, key_size, policy)
if status != apis.kStatus_SSS_Success:
return status
get = Get(self._session)
status = get.get_key(key_id, file_name)
return status
def gen_rsa_pair(self, key_id, key_size, policy):
"""
Generate rsa key pair
:param key_id: Key index
:param key_size: RSA key size to generate
:param policy: Policy to be applied
:return: Status
"""
key_type = apis.kSSS_KeyPart_Pair
cypher_type = apis.kSSS_CipherType_RSA_CRT
status = self._gen_key_pair(key_id, key_size, key_type, cypher_type, policy)
return status
def _gen_key_pair(self, key_id, key_size, key_type, cypher_type, policy): # pylint: disable=too-many-arguments
"""
Generate key pair
:param key_id: Key index
:param key_size: Key size
:param key_type: Key type
:param cypher_type: Cypher type
:param policy: Policy to be applied
:return: Status
"""
# Key length calculation based on key bit length
# if modulus of key_bit_len is non zero, then allocate extra byte
if (key_size % 8) != 0:
key_len = (key_size / 8) + 1
else:
key_len = key_size / 8
status = self._ctx_key.allocate_handle(key_id, key_type, cypher_type, int(key_len),
self.key_obj_mode)
if status != apis.kStatus_SSS_Success:
return status
status = self._ctx_ks.generate_key(self._ctx_key, key_size, policy)
if status != apis.kStatus_SSS_Success:
return status
status = self._ctx_ks.save_key_store()
return status
| 33.177778
| 124
| 0.621121
|
2272a3280e0c69e50c6002475d88955fc50890e0
| 5,289
|
py
|
Python
|
tests/test_pack.py
|
genenetwork/cwltool
|
15539fba76993f951af9eba913bea6d677c74005
|
[
"Apache-2.0"
] | null | null | null |
tests/test_pack.py
|
genenetwork/cwltool
|
15539fba76993f951af9eba913bea6d677c74005
|
[
"Apache-2.0"
] | null | null | null |
tests/test_pack.py
|
genenetwork/cwltool
|
15539fba76993f951af9eba913bea6d677c74005
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
import tempfile
from functools import partial
from six import StringIO
import pytest
import cwltool.pack
import cwltool.workflow
from cwltool import load_tool
from cwltool.load_tool import fetch_document, validate_document
from cwltool.main import main, make_relative, print_pack
from cwltool.pathmapper import adjustDirObjs, adjustFileObjs
from cwltool.resolver import tool_resolver
from .util import get_data, needs_docker
def test_pack():
load_tool.loaders = {}
document_loader, workflowobj, uri = fetch_document(get_data("tests/wf/revsort.cwl"))
document_loader, _, processobj, metadata, uri = validate_document(
document_loader, workflowobj, uri, [], {})
with open(get_data("tests/wf/expect_packed.cwl")) as packed_file:
expect_packed = json.load(packed_file)
packed = cwltool.pack.pack(document_loader, processobj, uri, metadata)
adjustFileObjs(packed, partial(make_relative, os.path.abspath(get_data("tests/wf"))))
adjustDirObjs(packed, partial(make_relative, os.path.abspath(get_data("tests/wf"))))
assert "$schemas" in packed
assert len(packed["$schemas"]) == len(expect_packed["$schemas"])
del packed["$schemas"]
del expect_packed["$schemas"]
assert packed == expect_packed
def test_pack_single_tool():
load_tool.loaders = {}
document_loader, workflowobj, uri = fetch_document(
get_data("tests/wf/formattest.cwl"))
document_loader, _, processobj, metadata, uri = validate_document(
document_loader, workflowobj, uri, [], {})
packed = cwltool.pack.pack(document_loader, processobj, uri, metadata)
assert "$schemas" in packed
def test_pack_rewrites():
load_tool.loaders = {}
rewrites = {}
document_loader, workflowobj, uri = fetch_document(get_data("tests/wf/default-wf5.cwl"))
document_loader, _, processobj, metadata, uri = validate_document(
document_loader, workflowobj, uri, [], {})
cwltool.pack.pack(document_loader, processobj, uri, metadata, rewrite_out=rewrites)
assert len(rewrites) == 6
cwl_missing_version_paths = [
"tests/wf/hello_single_tool.cwl",
"tests/wf/hello-workflow.cwl"
]
@pytest.mark.parametrize('cwl_path', cwl_missing_version_paths)
def test_pack_missing_cwlVersion(cwl_path):
"""Test to ensure the generated pack output is not missing
the `cwlVersion` in case of single tool workflow and single step workflow"""
# Testing single tool workflow
document_loader, workflowobj, uri = fetch_document(get_data(cwl_path))
document_loader, _, processobj, metadata, uri = validate_document(
document_loader, workflowobj, uri, [], {})
# generate pack output dict
packed = json.loads(print_pack(document_loader, processobj, uri, metadata))
assert packed["cwlVersion"] == 'v1.0'
def test_pack_idempotence_tool():
"""Test to ensure that pack produces exactly the same document for
an already packed document"""
# Testing single tool
_pack_idempotently("tests/wf/hello_single_tool.cwl")
def test_pack_idempotence_workflow():
"""Test to ensure that pack produces exactly the same document for
an already packed document"""
# Testing workflow
_pack_idempotently("tests/wf/count-lines1-wf.cwl")
def _pack_idempotently(document):
document_loader, workflowobj, uri = fetch_document(
get_data(document))
document_loader, _, processobj, metadata, uri = validate_document(
document_loader, workflowobj, uri, [], {})
# generate pack output dict
packed = json.loads(print_pack(document_loader, processobj, uri, metadata))
document_loader, workflowobj, uri2 = fetch_document(packed)
document_loader, _, processobj, metadata, uri2 = validate_document(
document_loader, workflowobj, uri, [], {})
double_packed = json.loads(print_pack(document_loader, processobj, uri2, metadata))
assert packed == double_packed
cwl_to_run = [
("tests/wf/count-lines1-wf.cwl",
"tests/wf/wc-job.json",
False
),
("tests/wf/formattest.cwl",
"tests/wf/formattest-job.json",
True
),
]
@needs_docker
@pytest.mark.parametrize('wf_path,job_path,namespaced', cwl_to_run)
def test_packed_workflow_execution(wf_path, job_path, namespaced):
load_tool.loaders = {}
document_loader, workflowobj, uri = fetch_document(
get_data(wf_path), resolver=tool_resolver)
document_loader, _, processobj, metadata, uri = validate_document(
document_loader, workflowobj, uri, [], {})
packed = json.loads(print_pack(document_loader, processobj, uri, metadata))
assert not namespaced or "$namespaces" in packed
wf_packed_handle, wf_packed_path = tempfile.mkstemp()
with open(wf_packed_path, 'w') as temp_file:
json.dump(packed, temp_file)
normal_output = StringIO()
packed_output = StringIO()
normal_params = [get_data(wf_path), get_data(job_path)]
packed_params = ['--debug', get_data(wf_packed_path), get_data(job_path)]
assert main(normal_params, stdout=normal_output) == 0
assert main(packed_params, stdout=packed_output) == 0
assert json.loads(packed_output.getvalue()) == json.loads(normal_output.getvalue())
os.close(wf_packed_handle)
os.remove(wf_packed_path)
| 34.122581
| 92
| 0.726035
|
46a1d62394f245ebdd51b0b2aee95597abd0c394
| 3,881
|
py
|
Python
|
webCrawler_scrapy/dbhelper.py
|
bobobo0826/scrapy_wallpaper
|
8f2eb9a05d49557feab04d9565be211a73e58184
|
[
"MIT"
] | null | null | null |
webCrawler_scrapy/dbhelper.py
|
bobobo0826/scrapy_wallpaper
|
8f2eb9a05d49557feab04d9565be211a73e58184
|
[
"MIT"
] | null | null | null |
webCrawler_scrapy/dbhelper.py
|
bobobo0826/scrapy_wallpaper
|
8f2eb9a05d49557feab04d9565be211a73e58184
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*
#import MySQLdb
import scrapy
#引入pymysql
import pymysql
#当成是mysqldb一样使用,当然也可以不写这句,那就按照pymysql的方式
pymysql.install_as_MySQLdb()
from scrapy.utils.project import get_project_settings #导入seetings配置
class DBHelper():
'''这个类也是读取settings中的配置,自行修改代码进行操作'''
def __init__(self):
self.settings=get_project_settings() #获取settings配置,设置需要的信息
self.host=self.settings['MYSQL_HOST']
self.port=self.settings['MYSQL_PORT']
self.user=self.settings['MYSQL_USER']
self.passwd=self.settings['MYSQL_PASSWD']
self.db=self.settings['MYSQL_DBNAME']
#连接到mysql,不是连接到具体的数据库
def connectMysql(self):
conn=pymysql.connect(host=self.host,
port=self.port,
user=self.user,
passwd=self.passwd,
db=self.db,#指定数据库名testdb
charset='utf8') #要指定编码,否则中文可能乱码
return conn
#连接到具体的数据库(settings中设置的MYSQL_DBNAME)
def connectDatabase(self):
conn=pymysql.connect(host=self.host,
port=self.port,
user=self.user,
passwd=self.passwd,
db=self.db,
charset='utf8') #要指定编码,否则中文可能乱码
return conn
#创建数据库
def createDatabase(self):
'''因为创建数据库直接修改settings中的配置MYSQL_DBNAME即可,所以就不要传sql语句了'''
conn=self.connectMysql()#连接数据库
print("1212121"+self.db)
sql="create database if not exists "+self.db
cur=conn.cursor()
cur.execute(sql)#执行sql语句
cur.close()
conn.close()
#创建表
def createTable(self,sql):
conn=self.connectDatabase()
cur=conn.cursor()
cur.execute(sql)
cur.close()
conn.close()
#插入数据
def insert(self,sql,*params):#注意这里params要加*,因为传递过来的是元组,*表示参数个数不定
conn=self.connectDatabase()
cur=conn.cursor();
cur.execute(sql,params)
conn.commit()#注意要commit
cur.close()
conn.close()
#更新数据
def update(self,sql,*params):
conn=self.connectDatabase()
cur=conn.cursor()
cur.execute(sql,params)
conn.commit()#注意要commit
cur.close()
conn.close()
#删除数据
def delete(self,sql,*params):
conn=self.connectDatabase()
cur=conn.cursor()
cur.execute(sql,params)
conn.commit()
cur.close()
conn.close()
'''测试DBHelper的类'''
class TestDBHelper():
def __init__(self):
self.dbHelper=DBHelper()
#测试创建数据库testdb(settings配置文件中的MYSQL_DBNAME,直接修改settings配置文件即可)
def testCreateDatebase(self):
self.dbHelper.createDatabase()
#测试创建表testtable
def testCreateTable(self):
sql="create table testtable(id int primary key auto_increment,name varchar(50),url varchar(200))"
self.dbHelper.createTable(sql)
#测试插入
def testInsert(self):
sql="insert into testtable(name,url) values(%s,%s)"
params=("test","test")
self.dbHelper.insert(sql,*params) # *表示拆分元组,调用insert(*params)会重组成元组
def testUpdate(self):
sql="update testtable set name=%s,url=%s where id=%s"
params=("update","update","1")
self.dbHelper.update(sql,*params)
def testDelete(self):
sql="delete from testtable where id=%s"
params=("1")
self.dbHelper.delete(sql,*params)
if __name__=="__main__":
testDBHelper=TestDBHelper()
#testDBHelper.testCreateDatebase() #执行测试创建数据库
#testDBHelper.testCreateTable() #执行测试创建表
#testDBHelper.testInsert() #执行测试插入数据
#testDBHelper.testUpdate() #执行测试更新数据
#testDBHelper.testDelete() #执行测试删除数据
| 30.801587
| 105
| 0.581809
|
2350ff6eec4023fcc73d69bdc9ddff611b891a58
| 205
|
py
|
Python
|
app/app.py
|
jieyinybl/immoscore
|
0daf9e455a307aed522e2498804776362d6c17ae
|
[
"MIT"
] | null | null | null |
app/app.py
|
jieyinybl/immoscore
|
0daf9e455a307aed522e2498804776362d6c17ae
|
[
"MIT"
] | null | null | null |
app/app.py
|
jieyinybl/immoscore
|
0daf9e455a307aed522e2498804776362d6c17ae
|
[
"MIT"
] | null | null | null |
from mara_app.app import MaraApp
app = MaraApp()
# from werkzeug.contrib.profiler import ProfilerMiddleware
# app.wsgi_app = ProfilerMiddleware(app.wsgi_app, profile_dir='/tmp/')
wsgi_app = app.wsgi_app
| 25.625
| 70
| 0.790244
|
8f77fcd785601a7a20f1d4d0681c03ec6f82b687
| 3,197
|
py
|
Python
|
PhoneProbes/eval.py
|
archiki/ASR-Accent-Analysis
|
da1dcb8e3a4476c392e01ed410cc47691d693882
|
[
"MIT"
] | 12
|
2020-07-02T06:51:22.000Z
|
2022-03-22T19:40:42.000Z
|
PhoneProbes/eval.py
|
csalt-research/ASR-Accent-Analysis
|
da1dcb8e3a4476c392e01ed410cc47691d693882
|
[
"MIT"
] | 1
|
2021-05-12T08:27:30.000Z
|
2021-05-28T21:24:53.000Z
|
PhoneProbes/eval.py
|
csalt-research/ASR-Accent-Analysis
|
da1dcb8e3a4476c392e01ed410cc47691d693882
|
[
"MIT"
] | 3
|
2021-07-26T18:55:05.000Z
|
2022-02-14T15:17:17.000Z
|
import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.data import DataLoader
from torch.autograd import Variable
from model import PhoneNet
from data_loader import PhoneDataset
import argparse
import pdb
import time
from tqdm import tqdm
from torch.nn.parallel import DistributedDataParallel
import multiprocessing
from collections import OrderedDict
# print(type(batch[1]))
# return out_batch
def eval(test_path,rep_type, batch_size, num_epochs, inp_dim0, inp_dim1,model_path, hidden_dim = 500, all_gpu = False):
cuda = torch.cuda.is_available()
test_set = PhoneDataset(rep_type, test_path)
inp_dim = (inp_dim0, inp_dim1)
# torch.set_num_threads(32)
net = PhoneNet(inp_dim, hidden_dim)
criterion = nn.CrossEntropyLoss()
if(cuda):
net = net.cuda()
criterion = criterion.cuda()
state_dict = torch.load(model_path)
try:
net.load_state_dict(state_dict)
except:
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:]
new_state_dict[name] = v
#net = torch.nn.DataParallel(net)
net.load_state_dict(new_state_dict)
if(not all_gpu):
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=True, num_workers = multiprocessing.cpu_count()//4, pin_memory = True)
else:
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=True)
print('Loading finished')
#
for epoch in range(num_epochs):
train_loss = 0
test_loss = 0
train_total = 0
test_total = 0
train_correct = 0
test_correct = 0
net.eval()
for rep, label in tqdm(test_loader):
rep = Variable(rep)
label = Variable(label)
if(cuda):
rep = rep.cuda()
label = label.cuda()
pred = net(rep)
tloss = criterion(pred, label)
test_loss += tloss.item()
_, predicted = torch.max(pred.data, 1)
test_total += label.size(0)
test_correct += (predicted == label).sum().item()
#calculate loss
#calculate accuracy
print("Test Accuracy {}".format(100*test_correct/test_total))
#print('Epoch: {}, Train Loss: {}, Test Loss: {}, Train Accuracy: {}, Test Accuracy: {}'.format(str(epoch), str(train_loss/train_total), \
# str(test_loss/test_total), str(100*train_correct/train_total), str(100*test_correct/test_total)))
#torch.save(net.state_dict(), 'Weights_{}/Weights_{}.pth'.format(rep_type, str(epoch+1)))
parser = argparse.ArgumentParser(description='Take command line arguments')
#parser.add_argument('--train_path',type=str)
parser.add_argument('--test_path',type=str)
parser.add_argument('--rep_type',type=str)
parser.add_argument('--learning_rate',type=float)
parser.add_argument('--batch_size',type=int)
parser.add_argument('--num_epochs',type=int)
parser.add_argument('--use_model', action='store_true', default = False)
parser.add_argument('--model_path', type= str)
args = parser.parse_args()
dim = {'spec':[161,1], 'conv':[1312,1], 'rnn_0': [1024,1], 'rnn_1': [1024,1], 'rnn_2': [1024, 1], 'rnn_3': [1024, 1], 'rnn_4': [1024,1]}
#pdb.set_trace()
if __name__ == '__main__':
print(args.test_path.split('/')[-2])
print(args.rep_type)
eval( args.test_path, args.rep_type, args.batch_size, 1, dim[args.rep_type][0], dim[args.rep_type][1], args.model_path)
| 34.010638
| 141
| 0.720988
|
4adbeea2b6ac2608aa77e390442913bb60daec86
| 920
|
py
|
Python
|
xlsxwriter/test/comparison/test_object_position17.py
|
hugovk/XlsxWriter
|
e97cc66637d9895480ee32cfb5e561d652d3787b
|
[
"BSD-2-Clause"
] | null | null | null |
xlsxwriter/test/comparison/test_object_position17.py
|
hugovk/XlsxWriter
|
e97cc66637d9895480ee32cfb5e561d652d3787b
|
[
"BSD-2-Clause"
] | null | null | null |
xlsxwriter/test/comparison/test_object_position17.py
|
hugovk/XlsxWriter
|
e97cc66637d9895480ee32cfb5e561d652d3787b
|
[
"BSD-2-Clause"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('object_position17.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column(1, 1, 5, None, {'hidden': 1})
worksheet.insert_image('A9', self.image_dir + 'red.png', {'x_offset': 192})
workbook.close()
self.assertExcelEqual()
| 24.864865
| 83
| 0.618478
|
1190e0deda18d8ab82e5bdab9249f996eb7e0b9e
| 1,023
|
py
|
Python
|
dns/rdtypes/ANY/SPF.py
|
Ashiq5/dnspython
|
5449af5318d88bada34f661247f3bcb16f58f057
|
[
"ISC"
] | 1,666
|
2015-01-02T17:46:14.000Z
|
2022-03-30T07:27:32.000Z
|
dns/rdtypes/ANY/SPF.py
|
felixonmars/dnspython
|
2691834df42aab74914883fdf26109aeb62ec647
|
[
"ISC"
] | 591
|
2015-01-16T12:19:49.000Z
|
2022-03-30T21:32:11.000Z
|
dns/rdtypes/ANY/SPF.py
|
felixonmars/dnspython
|
2691834df42aab74914883fdf26109aeb62ec647
|
[
"ISC"
] | 481
|
2015-01-14T04:14:43.000Z
|
2022-03-30T19:28:52.000Z
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.txtbase
import dns.immutable
@dns.immutable.immutable
class SPF(dns.rdtypes.txtbase.TXTBase):
"""SPF record"""
# see: RFC 4408
| 36.535714
| 75
| 0.772239
|
879865cf0b47a9e17a6e95d834d9401129ca793b
| 3,821
|
py
|
Python
|
0.16/_downloads/plot_configuration.py
|
drammock/mne-tools.github.io
|
5d3a104d174255644d8d5335f58036e32695e85d
|
[
"BSD-3-Clause"
] | null | null | null |
0.16/_downloads/plot_configuration.py
|
drammock/mne-tools.github.io
|
5d3a104d174255644d8d5335f58036e32695e85d
|
[
"BSD-3-Clause"
] | 1
|
2020-10-29T19:53:46.000Z
|
2020-10-29T19:53:46.000Z
|
0.16/_downloads/plot_configuration.py
|
drammock/mne-tools.github.io
|
5d3a104d174255644d8d5335f58036e32695e85d
|
[
"BSD-3-Clause"
] | 1
|
2021-04-12T12:45:31.000Z
|
2021-04-12T12:45:31.000Z
|
# -*- coding: utf-8 -*-
"""
======================
Configuring MNE python
======================
This tutorial gives a short introduction to MNE configurations.
"""
import os.path as op
import mne
from mne.datasets.sample import data_path
fname = op.join(data_path(), 'MEG', 'sample', 'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(fname).crop(0, 10)
original_level = mne.get_config('MNE_LOGGING_LEVEL', 'INFO')
###############################################################################
# MNE-python stores configurations to a folder called `.mne` in the user's
# home directory, or to AppData directory on Windows. The path to the config
# file can be found out by calling :func:`mne.get_config_path`.
print(mne.get_config_path())
###############################################################################
# These configurations include information like sample data paths and plotter
# window sizes. Files inside this folder should never be modified manually.
# Let's see what the configurations contain.
print(mne.get_config())
###############################################################################
# We see fields like "MNE_DATASETS_SAMPLE_PATH". As the name suggests, this is
# the path the sample data is downloaded to. All the fields in the
# configuration file can be modified by calling :func:`mne.set_config`.
###############################################################################
#
# .. _tut_logging:
#
# Logging
# =======
# Configurations also include the default logging level for the functions. This
# field is called "MNE_LOGGING_LEVEL".
mne.set_config('MNE_LOGGING_LEVEL', 'INFO')
print(mne.get_config(key='MNE_LOGGING_LEVEL'))
###############################################################################
# The default value is now set to INFO. This level will now be used by default
# every time we call a function in MNE. We can set the global logging level for
# only this session by calling :func:`mne.set_log_level` function.
mne.set_log_level('WARNING')
print(mne.get_config(key='MNE_LOGGING_LEVEL'))
###############################################################################
# Notice how the value in the config file was not changed. Logging level of
# WARNING only applies for this session. Let's see what logging level of
# WARNING prints for :func:`mne.compute_raw_covariance`.
cov = mne.compute_raw_covariance(raw)
###############################################################################
# Nothing. This means that no warnings were emitted during the computation. If
# you look at the documentation of :func:`mne.compute_raw_covariance`, you
# notice the ``verbose`` keyword. Setting this parameter does not touch the
# configurations, but sets the logging level for just this one function call.
# Let's see what happens with logging level of INFO.
cov = mne.compute_raw_covariance(raw, verbose=True)
###############################################################################
# As you see there is some info about what the function is doing. The logging
# level can be set to 'DEBUG', 'INFO', 'WARNING', 'ERROR' or 'CRITICAL'. It can
# also be set to an integer or a boolean value. The correspondance to string
# values can be seen in the table below. ``verbose=None`` uses the default
# value from the configuration file.
#
# +----------+---------+---------+
# | String | Integer | Boolean |
# +==========+=========+=========+
# | DEBUG | 10 | |
# +----------+---------+---------+
# | INFO | 20 | True |
# +----------+---------+---------+
# | WARNING | 30 | False |
# +----------+---------+---------+
# | ERROR | 40 | |
# +----------+---------+---------+
# | CRITICAL | 50 | |
# +----------+---------+---------+
mne.set_config('MNE_LOGGING_LEVEL', original_level)
| 43.420455
| 79
| 0.55352
|
9652256664a0856948516ad9450b9edd8f91bab1
| 44,515
|
py
|
Python
|
google_images_download/google_images_download.py
|
buddydvd/google-images-download
|
368de9d4a54d22bd16d2d7b4be7e1a8db833f4f0
|
[
"MIT"
] | 1
|
2021-06-30T06:16:18.000Z
|
2021-06-30T06:16:18.000Z
|
google_images_download/google_images_download.py
|
buddydvd/google-images-download
|
368de9d4a54d22bd16d2d7b4be7e1a8db833f4f0
|
[
"MIT"
] | null | null | null |
google_images_download/google_images_download.py
|
buddydvd/google-images-download
|
368de9d4a54d22bd16d2d7b4be7e1a8db833f4f0
|
[
"MIT"
] | 1
|
2021-03-23T19:21:35.000Z
|
2021-03-23T19:21:35.000Z
|
#!/usr/bin/env python
# In[ ]:
# coding: utf-8
###### Searching and Downloading Google Images to the local disk ######
# Import Libraries
import sys
version = (3, 0)
cur_version = sys.version_info
if cur_version >= version: # If the Current Version of Python is 3.0 or above
import urllib.request
from urllib.request import Request, urlopen
from urllib.request import URLError, HTTPError
from urllib.parse import quote
import http.client
from http.client import IncompleteRead
http.client._MAXHEADERS = 1000
else: # If the Current Version of Python is 2.x
import urllib2
from urllib2 import Request, urlopen
from urllib2 import URLError, HTTPError
from urllib import quote
import httplib
from httplib import IncompleteRead
httplib._MAXHEADERS = 1000
import time # Importing the time library to check the time of code execution
import os
import argparse
import ssl
import datetime
import json
import re
import codecs
import socket
args_list = ["keywords", "keywords_from_file", "prefix_keywords", "suffix_keywords",
"limit", "format", "color", "color_type", "usage_rights", "size",
"exact_size", "aspect_ratio", "type", "time", "time_range", "delay", "url", "single_image",
"output_directory", "image_directory", "no_directory", "proxy", "similar_images", "specific_site",
"print_urls", "print_size", "print_paths", "metadata", "extract_metadata", "socket_timeout",
"thumbnail", "language", "prefix", "chromedriver", "related_images", "safe_search", "no_numbering"]
def user_input():
config = argparse.ArgumentParser()
config.add_argument('-cf', '--config_file', help='config file name', default='', type=str, required=False)
config_file_check = config.parse_known_args()
object_check = vars(config_file_check[0])
if object_check['config_file'] != '':
records = []
json_file = json.load(open(config_file_check[0].config_file))
for record in range(0,len(json_file['Records'])):
arguments = {}
for i in args_list:
arguments[i] = None
for key, value in json_file['Records'][record].items():
arguments[key] = value
records.append(arguments)
records_count = len(records)
else:
# Taking command line arguments from users
parser = argparse.ArgumentParser()
parser.add_argument('-k', '--keywords', help='delimited list input', type=str, required=False)
parser.add_argument('-kf', '--keywords_from_file', help='extract list of keywords from a text file', type=str, required=False)
parser.add_argument('-sk', '--suffix_keywords', help='comma separated additional words added after to main keyword', type=str, required=False)
parser.add_argument('-pk', '--prefix_keywords', help='comma separated additional words added before main keyword', type=str, required=False)
parser.add_argument('-l', '--limit', help='delimited list input', type=str, required=False)
parser.add_argument('-f', '--format', help='download images with specific format', type=str, required=False,
choices=['jpg', 'gif', 'png', 'bmp', 'svg', 'webp', 'ico'])
parser.add_argument('-u', '--url', help='search with google image URL', type=str, required=False)
parser.add_argument('-x', '--single_image', help='downloading a single image from URL', type=str, required=False)
parser.add_argument('-o', '--output_directory', help='download images in a specific main directory', type=str, required=False)
parser.add_argument('-i', '--image_directory', help='download images in a specific sub-directory', type=str, required=False)
parser.add_argument('-n', '--no_directory', default=False, help='download images in the main directory but no sub-directory', action="store_true")
parser.add_argument('-d', '--delay', help='delay in seconds to wait between downloading two images', type=int, required=False)
parser.add_argument('-co', '--color', help='filter on color', type=str, required=False,
choices=['red', 'orange', 'yellow', 'green', 'teal', 'blue', 'purple', 'pink', 'white', 'gray', 'black', 'brown'])
parser.add_argument('-ct', '--color_type', help='filter on color', type=str, required=False,
choices=['full-color', 'black-and-white', 'transparent'])
parser.add_argument('-r', '--usage_rights', help='usage rights', type=str, required=False,
choices=['labeled-for-reuse-with-modifications','labeled-for-reuse','labeled-for-noncommercial-reuse-with-modification','labeled-for-nocommercial-reuse'])
parser.add_argument('-s', '--size', help='image size', type=str, required=False,
choices=['large','medium','icon','>400*300','>640*480','>800*600','>1024*768','>2MP','>4MP','>6MP','>8MP','>10MP','>12MP','>15MP','>20MP','>40MP','>70MP'])
parser.add_argument('-es', '--exact_size', help='exact image resolution "WIDTH,HEIGHT"', type=str, required=False)
parser.add_argument('-t', '--type', help='image type', type=str, required=False,
choices=['face','photo','clip-art','line-drawing','animated'])
parser.add_argument('-w', '--time', help='image age', type=str, required=False,
choices=['past-24-hours','past-7-days'])
parser.add_argument('-wr', '--time_range', help='time range for the age of the image. should be in the format {"time_min":"MM/DD/YYYY","time_max":"MM/DD/YYYY"}', type=str, required=False)
parser.add_argument('-a', '--aspect_ratio', help='comma separated additional words added to keywords', type=str, required=False,
choices=['tall', 'square', 'wide', 'panoramic'])
parser.add_argument('-si', '--similar_images', help='downloads images very similar to the image URL you provide', type=str, required=False)
parser.add_argument('-ss', '--specific_site', help='downloads images that are indexed from a specific website', type=str, required=False)
parser.add_argument('-p', '--print_urls', default=False, help="Print the URLs of the images", action="store_true")
parser.add_argument('-ps', '--print_size', default=False, help="Print the size of the images on disk", action="store_true")
parser.add_argument('-pp', '--print_paths', default=False, help="Prints the list of absolute paths of the images",action="store_true")
parser.add_argument('-m', '--metadata', default=False, help="Print the metadata of the image", action="store_true")
parser.add_argument('-e', '--extract_metadata', default=False, help="Dumps all the logs into a text file", action="store_true")
parser.add_argument('-st', '--socket_timeout', default=False, help="Connection timeout waiting for the image to download", type=float)
parser.add_argument('-th', '--thumbnail', default=False, help="Downloads image thumbnail along with the actual image", action="store_true")
parser.add_argument('-la', '--language', default=False, help="Defines the language filter. The search results are authomatically returned in that language", type=str, required=False,
choices=['Arabic','Chinese (Simplified)','Chinese (Traditional)','Czech','Danish','Dutch','English','Estonian','Finnish','French','German','Greek','Hebrew','Hungarian','Icelandic','Italian','Japanese','Korean','Latvian','Lithuanian','Norwegian','Portuguese','Polish','Romanian','Russian','Spanish','Swedish','Turkish'])
parser.add_argument('-pr', '--prefix', default=False, help="A word that you would want to prefix in front of each image name", type=str, required=False)
parser.add_argument('-px', '--proxy', help='specify a proxy address and port', type=str, required=False)
parser.add_argument('-cd', '--chromedriver', help='specify the path to chromedriver executable in your local machine', type=str, required=False)
parser.add_argument('-ri', '--related_images', default=False, help="Downloads images that are similar to the keyword provided", action="store_true")
parser.add_argument('-sa', '--safe_search', default=False, help="Turns on the safe search filter while searching for images", action="store_true")
parser.add_argument('-nn', '--no_numbering', default=False, help="Allows you to exclude the default numbering of images", action="store_true")
args = parser.parse_args()
arguments = vars(args)
records = []
records.append(arguments)
return records
class googleimagesdownload:
def __init__(self):
pass
# Downloading entire Web Document (Raw Page Content)
def download_page(self,url):
version = (3, 0)
cur_version = sys.version_info
if cur_version >= version: # If the Current Version of Python is 3.0 or above
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib.request.Request(url, headers=headers)
resp = urllib.request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else: # If the Current Version of Python is 2.x
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers=headers)
try:
response = urllib2.urlopen(req)
except URLError: # Handling SSL certificate failed
context = ssl._create_unverified_context()
response = urlopen(req, context=context)
page = response.read()
return page
except:
return "Page Not found"
# Download Page for more than 100 images
def download_extended_page(self,url,chromedriver):
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding('utf8')
options = webdriver.ChromeOptions()
options.add_argument('--no-sandbox')
options.add_argument("--headless")
try:
browser = webdriver.Chrome(chromedriver, chrome_options=options)
except Exception as e:
print("Looks like we cannot locate the path the 'chromedriver' (use the '--chromedriver' "
"argument to specify the path to the executable.) or google chrome browser is not "
"installed on your machine (exception: %s)" % e)
sys.exit()
browser.set_window_size(1024, 768)
# Open the link
browser.get(url)
time.sleep(1)
print("Getting you a lot of images. This may take a few moments...")
element = browser.find_element_by_tag_name("body")
# Scroll down
for i in range(30):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.3)
try:
browser.find_element_by_id("smb").click()
for i in range(50):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.3) # bot id protection
except:
for i in range(10):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.3) # bot id protection
print("Reached end of Page.")
time.sleep(0.5)
source = browser.page_source #page source
#close the browser
browser.close()
return source
#Correcting the escape characters for python2
def replace_with_byte(self,match):
return chr(int(match.group(0)[1:], 8))
def repair(self,brokenjson):
invalid_escape = re.compile(r'\\[0-7]{1,3}') # up to 3 digits for byte values up to FF
return invalid_escape.sub(self.replace_with_byte, brokenjson)
# Finding 'Next Image' from the given raw page
def get_next_tab(self,s):
start_line = s.find('class="ZO5Spb"')
if start_line == -1: # If no links are found then give an error!
end_quote = 0
link = "no_tabs"
return link,'',end_quote
else:
start_line = s.find('class="ZO5Spb"')
start_content = s.find('href="', start_line + 1)
end_content = s.find('">', start_content + 1)
url_item = "https://www.google.com" + str(s[start_content+6:end_content])
url_item = url_item.replace('&', '&')
start_line_2 = s.find('class="ZO5Spb"')
start_content_2 = s.find(':', start_line_2 + 1)
end_content_2 = s.find('"', start_content_2 + 1)
url_item_name = str(s[start_content_2 + 1:end_content_2])
#print(url_item,url_item_name)
return url_item,url_item_name,end_content
# Getting all links with the help of '_images_get_next_image'
def get_all_tabs(self,page):
tabs = {}
while True:
item,item_name,end_content = self.get_next_tab(page)
if item == "no_tabs":
break
else:
tabs[item_name] = item # Append all the links in the list named 'Links'
time.sleep(0.1) # Timer could be used to slow down the request for image downloads
page = page[end_content:]
return tabs
#Format the object in readable format
def format_object(self,object):
formatted_object = {}
formatted_object['image_format'] = object['ity']
formatted_object['image_height'] = object['oh']
formatted_object['image_width'] = object['ow']
formatted_object['image_link'] = object['ou']
formatted_object['image_description'] = object['pt']
formatted_object['image_host'] = object['rh']
formatted_object['image_source'] = object['ru']
formatted_object['image_thumbnail_url'] = object['tu']
return formatted_object
#function to download single image
def single_image(self,image_url):
main_directory = "downloads"
url = image_url
try:
os.makedirs(main_directory)
except OSError as e:
if e.errno != 17:
raise
pass
req = Request(url, headers={
"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"})
response = urlopen(req, None, 10)
data = response.read()
response.close()
image_name = str(url[(url.rfind('/')) + 1:])
if '?' in image_name:
image_name = image_name[:image_name.find('?')]
if ".jpg" in image_name or ".gif" in image_name or ".png" in image_name or ".bmp" in image_name or ".svg" in image_name or ".webp" in image_name or ".ico" in image_name:
file_name = main_directory + "/" + image_name
else:
file_name = main_directory + "/" + image_name + ".jpg"
image_name = image_name + ".jpg"
try:
output_file = open(file_name, 'wb')
output_file.write(data)
output_file.close()
except IOError as e:
raise e
except OSError as e:
raise e
print("completed ====> " + image_name)
return
def similar_images(self,similar_images):
version = (3, 0)
cur_version = sys.version_info
if cur_version >= version: # If the Current Version of Python is 3.0 or above
try:
searchUrl = 'https://www.google.com/searchbyimage?site=search&sa=X&image_url=' + similar_images
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req1 = urllib.request.Request(searchUrl, headers=headers)
resp1 = urllib.request.urlopen(req1)
content = str(resp1.read())
l1 = content.find('AMhZZ')
l2 = content.find('&', l1)
urll = content[l1:l2]
newurl = "https://www.google.com/search?tbs=sbi:" + urll + "&site=search&sa=X"
req2 = urllib.request.Request(newurl, headers=headers)
resp2 = urllib.request.urlopen(req2)
# print(resp2.read())
l3 = content.find('/search?sa=X&q=')
l4 = content.find(';', l3 + 19)
urll2 = content[l3 + 19:l4]
return urll2
except:
return "Cloud not connect to Google Images endpoint"
else: # If the Current Version of Python is 2.x
try:
searchUrl = 'https://www.google.com/searchbyimage?site=search&sa=X&image_url=' + similar_images
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req1 = urllib2.Request(searchUrl, headers=headers)
resp1 = urllib2.urlopen(req1)
content = str(resp1.read())
l1 = content.find('AMhZZ')
l2 = content.find('&', l1)
urll = content[l1:l2]
newurl = "https://www.google.com/search?tbs=sbi:" + urll + "&site=search&sa=X"
#print newurl
req2 = urllib2.Request(newurl, headers=headers)
resp2 = urllib2.urlopen(req2)
# print(resp2.read())
l3 = content.find('/search?sa=X&q=')
l4 = content.find(';', l3 + 19)
urll2 = content[l3 + 19:l4]
return(urll2)
except:
return "Cloud not connect to Google Images endpoint"
#Building URL parameters
def build_url_parameters(self,arguments):
if arguments['language']:
lang = "&lr="
lang_param = {"Arabic":"lang_ar","Chinese (Simplified)":"lang_zh-CN","Chinese (Traditional)":"lang_zh-TW","Czech":"lang_cs","Danish":"lang_da","Dutch":"lang_nl","English":"lang_en","Estonian":"lang_et","Finnish":"lang_fi","French":"lang_fr","German":"lang_de","Greek":"lang_el","Hebrew":"lang_iw ","Hungarian":"lang_hu","Icelandic":"lang_is","Italian":"lang_it","Japanese":"lang_ja","Korean":"lang_ko","Latvian":"lang_lv","Lithuanian":"lang_lt","Norwegian":"lang_no","Portuguese":"lang_pt","Polish":"lang_pl","Romanian":"lang_ro","Russian":"lang_ru","Spanish":"lang_es","Swedish":"lang_sv","Turkish":"lang_tr"}
lang_url = lang+lang_param[arguments['language']]
else:
lang_url = ''
if arguments['time_range']:
json_acceptable_string = arguments['time_range'].replace("'", "\"")
d = json.loads(json_acceptable_string)
time_range = ',cdr:1,cd_min:' + d['time_min'] + ',cd_max:' + d['time_min']
else:
time_range = ''
if arguments['exact_size']:
size_array = [x.strip() for x in arguments['exact_size'].split(',')]
exact_size = ",isz:ex,iszw:" + str(size_array[0]) + ",iszh:" + str(size_array[1])
else:
exact_size = ''
built_url = "&tbs="
counter = 0
params = {'color':[arguments['color'],{'red':'ic:specific,isc:red', 'orange':'ic:specific,isc:orange', 'yellow':'ic:specific,isc:yellow', 'green':'ic:specific,isc:green', 'teal':'ic:specific,isc:teel', 'blue':'ic:specific,isc:blue', 'purple':'ic:specific,isc:purple', 'pink':'ic:specific,isc:pink', 'white':'ic:specific,isc:white', 'gray':'ic:specific,isc:gray', 'black':'ic:specific,isc:black', 'brown':'ic:specific,isc:brown'}],
'color_type':[arguments['color_type'],{'full-color':'ic:color', 'black-and-white':'ic:gray','transparent':'ic:trans'}],
'usage_rights':[arguments['usage_rights'],{'labeled-for-reuse-with-modifications':'sur:fmc','labeled-for-reuse':'sur:fc','labeled-for-noncommercial-reuse-with-modification':'sur:fm','labeled-for-nocommercial-reuse':'sur:f'}],
'size':[arguments['size'],{'large':'isz:l','medium':'isz:m','icon':'isz:i','>400*300':'isz:lt,islt:qsvga','>640*480':'isz:lt,islt:vga','>800*600':'isz:lt,islt:svga','>1024*768':'visz:lt,islt:xga','>2MP':'isz:lt,islt:2mp','>4MP':'isz:lt,islt:4mp','>6MP':'isz:lt,islt:6mp','>8MP':'isz:lt,islt:8mp','>10MP':'isz:lt,islt:10mp','>12MP':'isz:lt,islt:12mp','>15MP':'isz:lt,islt:15mp','>20MP':'isz:lt,islt:20mp','>40MP':'isz:lt,islt:40mp','>70MP':'isz:lt,islt:70mp'}],
'type':[arguments['type'],{'face':'itp:face','photo':'itp:photo','clip-art':'itp:clip-art','line-drawing':'itp:lineart','animated':'itp:animated'}],
'time':[arguments['time'],{'past-24-hours':'qdr:d','past-7-days':'qdr:w'}],
'aspect_ratio':[arguments['aspect_ratio'],{'tall':'iar:t','square':'iar:s','wide':'iar:w','panoramic':'iar:xw'}],
'format':[arguments['format'],{'jpg':'ift:jpg','gif':'ift:gif','png':'ift:png','bmp':'ift:bmp','svg':'ift:svg','webp':'webp','ico':'ift:ico'}]}
for key, value in params.items():
if value[0] is not None:
ext_param = value[1][value[0]]
# counter will tell if it is first param added or not
if counter == 0:
# add it to the built url
built_url = built_url + ext_param
counter += 1
else:
built_url = built_url + ',' + ext_param
counter += 1
built_url = lang_url+built_url+exact_size+time_range
return built_url
#building main search URL
def build_search_url(self,search_term,params,url,similar_images,specific_site,safe_search):
#check safe_search
safe_search_string = "&safe=active"
# check the args and choose the URL
if url:
url = url
elif similar_images:
print(similar_images)
keywordem = self.similar_images(similar_images)
url = 'https://www.google.com/search?q=' + keywordem + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'
elif specific_site:
url = 'https://www.google.com/search?q=' + quote(
search_term) + '&as_sitesearch=' + specific_site + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch' + params + '&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'
else:
url = 'https://www.google.com/search?q=' + quote(
search_term) + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch' + params + '&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'
#safe search check
if safe_search:
url = url + safe_search_string
# print(url)
return url
#measures the file size
def file_size(self,file_path):
if os.path.isfile(file_path):
file_info = os.stat(file_path)
size = file_info.st_size
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if size < 1024.0:
return "%3.1f %s" % (size, x)
size /= 1024.0
return size
#keywords from file
def keywords_from_file(self,file_name):
search_keyword = []
with codecs.open(file_name, 'r', encoding='utf-8-sig') as f:
if '.csv' in file_name:
for line in f:
if line in ['\n', '\r\n']:
pass
else:
search_keyword.append(line.replace('\n', '').replace('\r', ''))
elif '.txt' in file_name:
for line in f:
if line in ['\n', '\r\n']:
pass
else:
search_keyword.append(line.replace('\n', '').replace('\r', ''))
else:
print("Invalid file type: Valid file types are either .txt or .csv \n"
"exiting...")
sys.exit()
return search_keyword
# make directories
def create_directories(self,main_directory, dir_name,thumbnail):
dir_name_thumbnail = dir_name + " - thumbnail"
# make a search keyword directory
try:
if not os.path.exists(main_directory):
os.makedirs(main_directory)
time.sleep(0.2)
path = str(dir_name)
sub_directory = os.path.join(main_directory, path)
if not os.path.exists(sub_directory):
os.makedirs(sub_directory)
if thumbnail:
sub_directory_thumbnail = os.path.join(main_directory, dir_name_thumbnail)
if not os.path.exists(sub_directory_thumbnail):
os.makedirs(sub_directory_thumbnail)
else:
path = str(dir_name)
sub_directory = os.path.join(main_directory, path)
if not os.path.exists(sub_directory):
os.makedirs(sub_directory)
if thumbnail:
sub_directory_thumbnail = os.path.join(main_directory, dir_name_thumbnail)
if not os.path.exists(sub_directory_thumbnail):
os.makedirs(sub_directory_thumbnail)
except OSError as e:
if e.errno != 17:
raise
# time.sleep might help here
pass
return
# Download Images
def download_image_thumbnail(self,image_url,main_directory,dir_name,return_image_name,print_urls,socket_timeout,print_size):
if print_urls:
print("Image URL: " + image_url)
try:
req = Request(image_url, headers={
"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"})
try:
# timeout time to download an image
if socket_timeout:
timeout = float(socket_timeout)
else:
timeout = 10
response = urlopen(req, None, timeout)
data = response.read()
response.close()
path = main_directory + "/" + dir_name + " - thumbnail" + "/" + return_image_name
try:
output_file = open(path, 'wb')
output_file.write(data)
output_file.close()
except OSError as e:
download_status = 'fail'
download_message = "OSError on an image...trying next one..." + " Error: " + str(e)
except IOError as e:
download_status = 'fail'
download_message = "IOError on an image...trying next one..." + " Error: " + str(e)
download_status = 'success'
download_message = "Completed Image Thumbnail ====> " + return_image_name
# image size parameter
if print_size:
print("Image Size: " + str(self.file_size(path)))
except UnicodeEncodeError as e:
download_status = 'fail'
download_message = "UnicodeEncodeError on an image...trying next one..." + " Error: " + str(e)
except HTTPError as e: # If there is any HTTPError
download_status = 'fail'
download_message = "HTTPError on an image...trying next one..." + " Error: " + str(e)
except URLError as e:
download_status = 'fail'
download_message = "URLError on an image...trying next one..." + " Error: " + str(e)
except ssl.CertificateError as e:
download_status = 'fail'
download_message = "CertificateError on an image...trying next one..." + " Error: " + str(e)
except IOError as e: # If there is any IOError
download_status = 'fail'
download_message = "IOError on an image...trying next one..." + " Error: " + str(e)
return download_status, download_message
# Download Images
def download_image(self,image_url,image_format,main_directory,dir_name,count,print_urls,socket_timeout,prefix,print_size,no_numbering):
if print_urls:
print("Image URL: " + image_url)
try:
req = Request(image_url, headers={
"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"})
try:
# timeout time to download an image
if socket_timeout:
timeout = float(socket_timeout)
else:
timeout = 10
response = urlopen(req, None, timeout)
data = response.read()
response.close()
# keep everything after the last '/'
image_name = str(image_url[(image_url.rfind('/')) + 1:])
image_name = image_name.lower()
# if no extension then add it
# remove everything after the image name
if image_format == "":
image_name = image_name + "." + "jpg"
elif image_format == "jpeg":
image_name = image_name[:image_name.find(image_format) + 4]
else:
image_name = image_name[:image_name.find(image_format) + 3]
# prefix name in image
if prefix:
prefix = prefix + " "
else:
prefix = ''
if no_numbering:
path = main_directory + "/" + dir_name + "/" + prefix + image_name
else:
path = main_directory + "/" + dir_name + "/" + prefix + str(count) + ". " + image_name
try:
output_file = open(path, 'wb')
output_file.write(data)
output_file.close()
absolute_path = os.path.abspath(path)
except OSError as e:
download_status = 'fail'
download_message = "OSError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
#return image name back to calling method to use it for thumbnail downloads
download_status = 'success'
download_message = "Completed Image ====> " + prefix + str(count) + ". " + image_name
return_image_name = prefix + str(count) + ". " + image_name
# image size parameter
if print_size:
print("Image Size: " + str(self.file_size(path)))
except UnicodeEncodeError as e:
download_status = 'fail'
download_message = "UnicodeEncodeError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except URLError as e:
download_status = 'fail'
download_message = "URLError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except HTTPError as e: # If there is any HTTPError
download_status = 'fail'
download_message = "HTTPError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except URLError as e:
download_status = 'fail'
download_message = "URLError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except ssl.CertificateError as e:
download_status = 'fail'
download_message = "CertificateError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except IOError as e: # If there is any IOError
download_status = 'fail'
download_message = "IOError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except IncompleteRead as e:
download_status = 'fail'
download_message = "IncompleteReadError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
return download_status,download_message,return_image_name,absolute_path
# Finding 'Next Image' from the given raw page
def _get_next_item(self,s):
start_line = s.find('rg_meta notranslate')
if start_line == -1: # If no links are found then give an error!
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('class="rg_meta notranslate">')
start_object = s.find('{', start_line + 1)
end_object = s.find('</div>', start_object + 1)
object_raw = str(s[start_object:end_object])
#remove escape characters based on python version
version = (3, 0)
cur_version = sys.version_info
if cur_version >= version: #python3
try:
object_decode = bytes(object_raw, "utf-8").decode("unicode_escape")
final_object = json.loads(object_decode)
except:
final_object = ""
else: #python2
try:
final_object = (json.loads(self.repair(object_raw)))
except:
final_object = ""
return final_object, end_object
# Getting all links with the help of '_images_get_next_image'
def _get_all_items(self,page,main_directory,dir_name,limit,arguments):
items = []
abs_path = []
errorCount = 0
i = 0
count = 1
while count < limit+1:
object, end_content = self._get_next_item(page)
if object == "no_links":
break
elif object == "":
page = page[end_content:]
else:
#format the item for readability
object = self.format_object(object)
if arguments['metadata']:
print("\nImage Metadata: " + str(object))
items.append(object) # Append all the links in the list named 'Links'
#download the images
download_status,download_message,return_image_name,absolute_path = self.download_image(object['image_link'],object['image_format'],main_directory,dir_name,count,arguments['print_urls'],arguments['socket_timeout'],arguments['prefix'],arguments['print_size'],arguments['no_numbering'])
print(download_message)
if download_status == "success":
# download image_thumbnails
if arguments['thumbnail']:
download_status, download_message_thumbnail = self.download_image_thumbnail(object['image_thumbnail_url'],main_directory,dir_name,return_image_name,arguments['print_urls'],arguments['socket_timeout'],arguments['print_size'])
print(download_message_thumbnail)
count += 1
abs_path.append(absolute_path)
else:
errorCount += 1
#delay param
if arguments['delay']:
time.sleep(int(arguments['delay']))
page = page[end_content:]
i += 1
if count < limit:
print("\n\nUnfortunately all " + str(
limit) + " could not be downloaded because some images were not downloadable. " + str(
count-1) + " is all we got for this search filter!")
return items,errorCount,abs_path
# Bulk Download
def download(self,arguments):
#for input coming from other python files
if __name__ != "__main__":
for arg in args_list:
if arg not in arguments:
arguments[arg] = None
######Initialization and Validation of user arguments
if arguments['keywords']:
search_keyword = [str(item) for item in arguments['keywords'].split(',')]
if arguments['keywords_from_file']:
search_keyword = self.keywords_from_file(arguments['keywords_from_file'])
# both time and time range should not be allowed in the same query
if arguments['time'] and arguments['time_range']:
raise ValueError('Either time or time range should be used in a query. Both cannot be used at the same time.')
# both time and time range should not be allowed in the same query
if arguments['size'] and arguments['exact_size']:
raise ValueError('Either "size" or "exact_size" should be used in a query. Both cannot be used at the same time.')
# both image directory and no image directory should not be allowed in the same query
if arguments['image_directory'] and arguments['no_directory']:
raise ValueError('You can either specify image directory or specify no image directory, not both!')
# Additional words added to keywords
if arguments['suffix_keywords']:
suffix_keywords = [" " + str(sk) for sk in arguments['suffix_keywords'].split(',')]
else:
suffix_keywords = ['']
# Additional words added to keywords
if arguments['prefix_keywords']:
prefix_keywords = [str(sk) + " " for sk in arguments['prefix_keywords'].split(',')]
else:
prefix_keywords = ['']
# Setting limit on number of images to be downloaded
if arguments['limit']:
limit = int(arguments['limit'])
else:
limit = 100
if arguments['url']:
current_time = str(datetime.datetime.now()).split('.')[0]
search_keyword = [current_time.replace(":", "_")]
if arguments['similar_images']:
current_time = str(datetime.datetime.now()).split('.')[0]
search_keyword = [current_time.replace(":", "_")]
# If single_image or url argument not present then keywords is mandatory argument
if arguments['single_image'] is None and arguments['url'] is None and arguments['similar_images'] is None and \
arguments['keywords'] is None and arguments['keywords_from_file'] is None:
print('-------------------------------\n'
'Uh oh! Keywords is a required argument \n\n'
'Please refer to the documentation on guide to writing queries \n'
'https://github.com/hardikvasa/google-images-download#examples'
'\n\nexiting!\n'
'-------------------------------')
sys.exit()
# If this argument is present, set the custom output directory
if arguments['output_directory']:
main_directory = arguments['output_directory']
else:
main_directory = "downloads"
# Proxy settings
if arguments['proxy']:
os.environ["http_proxy"] = arguments['proxy']
os.environ["https_proxy"] = arguments['proxy']
######Initialization Complete
paths = {}
for pky in prefix_keywords:
for sky in suffix_keywords: # 1.for every suffix keywords
i = 0
while i < len(search_keyword): # 2.for every main keyword
iteration = "\n" + "Item no.: " + str(i + 1) + " -->" + " Item name = " + str(pky) + str(search_keyword[i] + str(sky))
print(iteration)
print("Evaluating...")
search_term = pky + search_keyword[i] + sky
if arguments['image_directory']:
dir_name = arguments['image_directory']
elif arguments['no_directory']:
dir_name = ''
else:
dir_name = search_term + ('-' + arguments['color'] if arguments['color'] else '') #sub-directory
self.create_directories(main_directory,dir_name,arguments['thumbnail']) #create directories in OS
params = self.build_url_parameters(arguments) #building URL with params
url = self.build_search_url(search_term,params,arguments['url'],arguments['similar_images'],arguments['specific_site'],arguments['safe_search']) #building main search url
if limit < 101:
raw_html = self.download_page(url) # download page
else:
raw_html = self.download_extended_page(url,arguments['chromedriver'])
print("Starting Download...")
items,errorCount,abs_path = self._get_all_items(raw_html,main_directory,dir_name,limit,arguments) #get all image items and download images
paths[pky + search_keyword[i] + sky] = abs_path
#dumps into a text file
if arguments['extract_metadata']:
try:
if not os.path.exists("logs"):
os.makedirs("logs")
except OSError as e:
print(e)
text_file = open("logs/"+search_keyword[i]+".txt", "w")
text_file.write(json.dumps(items, indent=4, sort_keys=True))
text_file.close()
#Related images
if arguments['related_images']:
print("\nGetting list of related keywords...this may take a few moments")
tabs = self.get_all_tabs(raw_html)
for key, value in tabs.items():
final_search_term = (search_term + " - " + key)
print("\nNow Downloading - " + final_search_term)
if limit < 101:
new_raw_html = self.download_page(value) # download page
else:
new_raw_html = self.download_extended_page(value,arguments['chromedriver'])
self.create_directories(main_directory, final_search_term,arguments['thumbnail'])
self._get_all_items(new_raw_html, main_directory, search_term + " - " + key, limit,arguments)
i += 1
print("\nErrors: " + str(errorCount) + "\n")
if arguments['print_paths']:
print(paths)
return paths
#------------- Main Program -------------#
def main():
records = user_input()
for arguments in records:
if arguments['single_image']: # Download Single Image using a URL
response = googleimagesdownload()
response.single_image(arguments['single_image'])
else: # or download multiple images based on keywords/keyphrase search
t0 = time.time() # start the timer
response = googleimagesdownload()
paths = response.download(arguments) #wrapping response in a variable just for consistency
print("\nEverything downloaded!")
t1 = time.time() # stop the timer
total_time = t1 - t0 # Calculating the total time required to crawl, find and download all the links of 60,000 images
print("Total time taken: " + str(total_time) + " Seconds")
if __name__ == "__main__":
main()
# In[ ]:
| 49.516129
| 622
| 0.572841
|
3d478f4b46989fe06239d903f6be61f4d471af15
| 1,431
|
py
|
Python
|
Framework/SpriteText.py
|
EpicTofuu/Froggers
|
0395ef801fe11a7881fd32fd570bf3135a4a761f
|
[
"MIT"
] | 1
|
2020-11-17T04:32:55.000Z
|
2020-11-17T04:32:55.000Z
|
Framework/SpriteText.py
|
EpicTofuu/Froggers
|
0395ef801fe11a7881fd32fd570bf3135a4a761f
|
[
"MIT"
] | null | null | null |
Framework/SpriteText.py
|
EpicTofuu/Froggers
|
0395ef801fe11a7881fd32fd570bf3135a4a761f
|
[
"MIT"
] | null | null | null |
import pygame
from Sprite import *
# all text is drawn as a sprite
class SpriteText (Sprite):
def __init__(self, text, size = 48, fontPath = None, font = None, colour = [255,255,255], Background = None):
self.Background = Background # text background
self._colour = colour # colour of text
self._text = text
# if there isn't any font specified, use the system default
if fontPath is None:
if font is None:
self.Font = pygame.font.SysFont (None, size)
else:
self.Font = font
else:
self.Font = pygame.font.Font (fontPath, size)
# draw the text through the constructor
super().__init__(img = self.Font.render (self._text, True, self._colour, self.Background))
def update (self):
super().update()
# change the text
def SetText (self, text):
self._text = text
self.UpdateText()
# change the colour
def SetColour (self, col):
self._colour = col
self.UpdateText()
# call this to rerender the text
def UpdateText (self):
self.image = self.Font.render (self._text, True, self._colour, self.Background)
self.rect.width = self.image.get_rect().width
self.rect.height = self.image.get_rect().height
# removes all instances from all groups
def kill (self):
super().kill()
| 31.8
| 113
| 0.599581
|
547f00581059eb0796a07be4063e8622c2a75c6f
| 335
|
py
|
Python
|
plugins/labels/kivy.py
|
lionzeye/reddelectrum
|
e39497aee08b08bed89efa10072d17fb1e37920c
|
[
"MIT"
] | null | null | null |
plugins/labels/kivy.py
|
lionzeye/reddelectrum
|
e39497aee08b08bed89efa10072d17fb1e37920c
|
[
"MIT"
] | null | null | null |
plugins/labels/kivy.py
|
lionzeye/reddelectrum
|
e39497aee08b08bed89efa10072d17fb1e37920c
|
[
"MIT"
] | null | null | null |
from labels import LabelsPlugin
from reddelectrum.plugins import hook
class Plugin(LabelsPlugin):
@hook
def load_wallet(self, wallet, window):
self.window = window
self.start_wallet(wallet)
def on_pulled(self, wallet):
self.print_error('on pulled')
self.window._trigger_update_history()
| 22.333333
| 45
| 0.698507
|
f009e8b7b040f8c20036b9ea9497e2b4fcfeb8db
| 7,664
|
py
|
Python
|
apps/beeswax/src/beeswax/management/commands/beeswax_install_examples.py
|
yongshengwang/builthue
|
c63af09d8284cc5a6f613bffd621ccdcac265ffc
|
[
"Apache-2.0"
] | 1
|
2020-04-23T21:08:43.000Z
|
2020-04-23T21:08:43.000Z
|
apps/beeswax/src/beeswax/management/commands/beeswax_install_examples.py
|
yongshengwang/builthue
|
c63af09d8284cc5a6f613bffd621ccdcac265ffc
|
[
"Apache-2.0"
] | null | null | null |
apps/beeswax/src/beeswax/management/commands/beeswax_install_examples.py
|
yongshengwang/builthue
|
c63af09d8284cc5a6f613bffd621ccdcac265ffc
|
[
"Apache-2.0"
] | 1
|
2019-01-04T08:08:11.000Z
|
2019-01-04T08:08:11.000Z
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import simplejson
from django.core.management.base import NoArgsCommand
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from desktop.models import Document
from hadoop import cluster
import beeswax.conf
from beeswax.models import SavedQuery, IMPALA
from beeswax.design import hql_query
from beeswax.server import dbms
from beeswax.server.dbms import get_query_server_config, QueryServerException
from useradmin.models import install_sample_user
from desktop.lib.exceptions_renderable import PopupException
LOG = logging.getLogger(__name__)
class InstallException(Exception):
pass
class Command(NoArgsCommand):
"""
Install examples but do not overwrite them.
"""
def handle_noargs(self, **options):
exception = None
# Documents will belong to this user but we run the install as the current user
try:
sample_user = install_sample_user()
self._install_tables(options['user'], options['app_name'])
except Exception, ex:
exception = ex
try:
self._install_queries(sample_user, options['app_name'])
except Exception, ex:
exception = ex
Document.objects.sync()
if exception is not None:
pretty_msg = None
if "AlreadyExistsException" in exception.message:
pretty_msg = _("SQL table examples already installed.")
if "Permission denied" in exception.message:
pretty_msg = _("Permission denied. Please check with your system administrator.")
if pretty_msg is not None:
raise PopupException(pretty_msg)
else:
raise exception
def _install_tables(self, django_user, app_name):
data_dir = beeswax.conf.LOCAL_EXAMPLES_DATA_DIR.get()
table_file = file(os.path.join(data_dir, 'tables.json'))
table_list = simplejson.load(table_file)
table_file.close()
for table_dict in table_list:
table = SampleTable(table_dict, app_name)
try:
table.install(django_user)
except Exception, ex:
raise InstallException(_('Could not install table: %s') % ex)
def _install_queries(self, django_user, app_name):
design_file = file(os.path.join(beeswax.conf.LOCAL_EXAMPLES_DATA_DIR.get(), 'designs.json'))
design_list = simplejson.load(design_file)
design_file.close()
for design_dict in design_list:
if app_name == 'impala':
design_dict['type'] = IMPALA
design = SampleDesign(design_dict)
try:
design.install(django_user)
except Exception, ex:
raise InstallException(_('Could not install query: %s') % ex)
class SampleTable(object):
"""
Represents a table loaded from the tables.json file
"""
def __init__(self, data_dict, app_name):
self.name = data_dict['table_name']
self.filename = data_dict['data_file']
self.hql = data_dict['create_hql']
self.query_server = get_query_server_config(app_name)
self.app_name = app_name
# Sanity check
self._data_dir = beeswax.conf.LOCAL_EXAMPLES_DATA_DIR.get()
self._contents_file = os.path.join(self._data_dir, self.filename)
if not os.path.isfile(self._contents_file):
msg = _('Cannot find table data in "%(file)s".') % {'file': self._contents_file}
LOG.error(msg)
raise ValueError(msg)
def install(self, django_user):
self.create(django_user)
self.load(django_user)
def create(self, django_user):
"""
Create table in the Hive Metastore.
"""
LOG.info('Creating table "%s"' % (self.name,))
db = dbms.get(django_user, self.query_server)
try:
# Already exists?
db.get_table('default', self.name)
msg = _('Table "%(table)s" already exists.') % {'table': self.name}
LOG.error(msg)
except Exception:
query = hql_query(self.hql)
try:
results = db.execute_and_wait(query)
if not results:
msg = _('Error creating table %(table)s: Operation timeout.') % {'table': self.name}
LOG.error(msg)
raise InstallException(msg)
except Exception, ex:
msg = _('Error creating table %(table)s: %(error)s.') % {'table': self.name, 'error': ex}
LOG.error(msg)
raise InstallException(msg)
def load(self, django_user):
"""
Upload data to HDFS home of user then load (aka move) it into the Hive table (in the Hive metastore in HDFS).
"""
LOAD_HQL = \
"""
LOAD DATA INPATH
'%(filename)s' OVERWRITE INTO TABLE %(tablename)s
"""
fs = cluster.get_hdfs()
if self.app_name == 'impala':
# Because Impala does not have impersonation on by default, we use a public destination for the upload.
from impala.conf import IMPERSONATION_ENABLED
if not IMPERSONATION_ENABLED.get():
tmp_public = '/tmp/public_hue_examples'
fs.do_as_user(django_user, fs.mkdir, tmp_public, '0777')
hdfs_root_destination = tmp_public
else:
hdfs_root_destination = fs.do_as_user(django_user, fs.get_home_dir)
hdfs_destination = os.path.join(hdfs_root_destination, self.name)
LOG.info('Uploading local data %s to HDFS table "%s"' % (self.name, hdfs_destination))
fs.do_as_user(django_user, fs.copyFromLocal, self._contents_file, hdfs_destination)
LOG.info('Loading data into table "%s"' % (self.name,))
hql = LOAD_HQL % {'tablename': self.name, 'filename': hdfs_destination}
query = hql_query(hql)
try:
results = dbms.get(django_user, self.query_server).execute_and_wait(query)
if not results:
msg = _('Error loading table %(table)s: Operation timeout.') % {'table': self.name}
LOG.error(msg)
raise InstallException(msg)
except QueryServerException, ex:
msg = _('Error loading table %(table)s: %(error)s.') % {'table': self.name, 'error': ex}
LOG.error(msg)
raise InstallException(msg)
class SampleDesign(object):
"""Represents a query loaded from the designs.json file"""
def __init__(self, data_dict):
self.name = data_dict['name']
self.desc = data_dict['desc']
self.type = int(data_dict['type'])
self.data = data_dict['data']
def install(self, django_user):
"""
Install queries. Raise InstallException on failure.
"""
LOG.info('Installing sample query: %s' % (self.name,))
try:
# Don't overwrite
model = SavedQuery.objects.get(owner=django_user, name=self.name, type=self.type)
except SavedQuery.DoesNotExist:
model = SavedQuery(owner=django_user, name=self.name)
model.type = self.type
# The data field needs to be a string. The sample file writes it
# as json (without encoding into a string) for readability.
model.data = simplejson.dumps(self.data)
model.desc = self.desc
model.save()
LOG.info('Successfully installed sample design: %s' % (self.name,))
| 34.367713
| 113
| 0.689849
|
dbe483066088aff4c7af263e1da1e6dc7ac70ab1
| 4,169
|
py
|
Python
|
Hangman/Hangman_Game/py/entry2.py
|
mthavaf/hangman
|
eb86a23fc00fe73139a6fe74c978b3eb14cd909d
|
[
"MIT"
] | null | null | null |
Hangman/Hangman_Game/py/entry2.py
|
mthavaf/hangman
|
eb86a23fc00fe73139a6fe74c978b3eb14cd909d
|
[
"MIT"
] | null | null | null |
Hangman/Hangman_Game/py/entry2.py
|
mthavaf/hangman
|
eb86a23fc00fe73139a6fe74c978b3eb14cd909d
|
[
"MIT"
] | null | null | null |
from tkinter import *
#import pygame
import pymysql
def getWord(num):
global guessedWord,wrongAttempts,gameOver,level,data
level = num
gameOver = 0
guessedWord = []
wrongAttempts = 8
conn = pymysql.connect(host="localhost",user="root",passwd="cout<<password;",db="hangman")
cur = conn.cursor()
if level == 1:
cur.execute("SELECT word FROM easy ORDER BY RAND() LIMIT 1")
data = cur.fetchone()
data = data[0]
elif level == 2:
cur.execute("SELECT word FROM moderate ORDER BY RAND() LIMIT 1")
data = cur.fetchone()
data = data[0]
elif level == 3:
cur.execute("SELECT word FROM difficult ORDER BY RAND() LIMIT 1")
data = cur.fetchone()
data = data[0]
else:
pass
for letter in data:
guessedWord.append("-")
for index in range(7):
array.append(0)
main('-',array,data,level)
def callback(event):
try:
ch = chr(ord(event.char))
if gameOver == 0 and ch.isalpha():
main(chr(ord(event.char)),array,data,level)
except:
#pygame.init()
#pygame.mixer.music.load("beep.wav")
#me.mixer.music.play()
pass
def main(char,array,data,level):
global wrongAttempts,gameOver
w.delete(ALL)
while (wrongAttempts != 0 and "-" in guessedWord):
for index in range(len(data)):
if char == data[index]:
guessedWord[index] = char
if char not in data:
wrongAttempts = wrongAttempts - 1
guesssedWord = ''.join(guessedWord)
drawCount = 7 - wrongAttempts
for index in range(drawCount):
array[index] = 1
w.create_text(250,50,font=("times roman", 30),text="HANGMAN",fill="#B0171F")
w.create_text(750,150,text = "wrong attempts remaining : ")
text = Label(master,text=wrongAttempts)
text.place(x=835,y=142)
w.create_text(750,170,text = "guess a character : ")
if(array[0]):
w.create_line(150,450,150,150, fill="#000000", width=4)
w.create_line(300,150,150,150, fill="#000000", width=4)
w.create_line(100,450,200,450, fill="#000000", width=4)
w.create_line(300,150,300,200, fill="#000000", width=4)
w.create_line(150,400,175,450, fill="#000000", width=4)
w.create_line(150,400,125,450, fill="#000000", width=4)
if(array[1]):
img = PhotoImage(file="sadface.png")
w.create_image(268,192, anchor=NW, image=img)
if(array[2]):
w.create_line(300,250,300,350, fill="#000000", width=4)
if(array[3]):
w.create_line(300,300,250,250, fill="#000000", width=4)
if(array[4]):
w.create_line(300,300,350,250, fill="#000000", width=4)
if(array[5]):
w.create_line(300,350,250,400, fill="#000000", width=4)
if(array[6]):
w.create_line(300,350,350,400, fill="#000000", width=4)
w.create_text(750,200,text=guessedWord)
if(wrongAttempts == 0 ):
w.create_text(750,350,text = "the word was : "+str(data))
w.create_text(750,370,text = "you lost")
gameOver = 1
if "-" not in guessedWord :
w.create_text(750,350,text = "the word was : "+str(data))
w.create_text(750,370,text = "Congratulation!!!! YOU WON!!!!")
gameOver = 1
Button(master, text='newgame', command = lambda:getWord(level)).place(x=830,y=300)
Button(master, text='exit',command = lambda:exit(0)).place(x=900,y=400)
mainloop()
if __name__ == "__main__":
array = []
master = Tk()
w = Canvas(master, width=1000, height=500)
w.pack()
w.create_text(250,50,font=("times roman", 30),text="HANGMAN",fill="#B0171F")
Button(master, text='simple',command = lambda:getWord(1)).place(x=700,y=50)
Button(master, text='moderate',command = lambda:getWord(2)).place(x=800,y=50)
Button(master, text='difficult',command = lambda:getWord(3)).place(x=920,y=50)
w.focus_set()
w.bind("<Key>",callback)
| 37.9
| 94
| 0.576157
|
08fa384a4080078af0c52e7feed410e81071c905
| 7,620
|
py
|
Python
|
kubernetes/client/models/v1_subject_access_review.py
|
fsduser/python
|
2b20069ebc05283352fbdc95bbdca2b6133a4175
|
[
"Apache-2.0"
] | 1
|
2021-10-15T13:05:45.000Z
|
2021-10-15T13:05:45.000Z
|
kubernetes/client/models/v1_subject_access_review.py
|
belajarqywok/python
|
b15bea16a87ad03136a4627941ac437582ea4657
|
[
"Apache-2.0"
] | 10
|
2020-10-01T03:15:01.000Z
|
2022-03-01T03:06:31.000Z
|
kubernetes/client/models/v1_subject_access_review.py
|
belajarqywok/python
|
b15bea16a87ad03136a4627941ac437582ea4657
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.19
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1SubjectAccessReview(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1SubjectAccessReviewSpec',
'status': 'V1SubjectAccessReviewStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1SubjectAccessReview - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1SubjectAccessReview. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1SubjectAccessReview. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1SubjectAccessReview.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1SubjectAccessReview. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1SubjectAccessReview. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1SubjectAccessReview. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1SubjectAccessReview.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1SubjectAccessReview. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1SubjectAccessReview. # noqa: E501
:return: The metadata of this V1SubjectAccessReview. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1SubjectAccessReview.
:param metadata: The metadata of this V1SubjectAccessReview. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1SubjectAccessReview. # noqa: E501
:return: The spec of this V1SubjectAccessReview. # noqa: E501
:rtype: V1SubjectAccessReviewSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1SubjectAccessReview.
:param spec: The spec of this V1SubjectAccessReview. # noqa: E501
:type: V1SubjectAccessReviewSpec
"""
if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
self._spec = spec
@property
def status(self):
"""Gets the status of this V1SubjectAccessReview. # noqa: E501
:return: The status of this V1SubjectAccessReview. # noqa: E501
:rtype: V1SubjectAccessReviewStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1SubjectAccessReview.
:param status: The status of this V1SubjectAccessReview. # noqa: E501
:type: V1SubjectAccessReviewStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1SubjectAccessReview):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1SubjectAccessReview):
return True
return self.to_dict() != other.to_dict()
| 33.130435
| 312
| 0.629134
|
22d85e1b2c909be8b15f080d08373d47a2e99883
| 1,784
|
py
|
Python
|
lyman/tools/fileutils.py
|
sgagnon/lyman
|
f2ca8c7e40a4f090e5ed8aaf47f87b71a0bc2cff
|
[
"BSD-3-Clause"
] | null | null | null |
lyman/tools/fileutils.py
|
sgagnon/lyman
|
f2ca8c7e40a4f090e5ed8aaf47f87b71a0bc2cff
|
[
"BSD-3-Clause"
] | null | null | null |
lyman/tools/fileutils.py
|
sgagnon/lyman
|
f2ca8c7e40a4f090e5ed8aaf47f87b71a0bc2cff
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import json
import os.path as op
from nipype.utils.filemanip import fname_presuffix
from nipype.interfaces.base import (BaseInterface, BaseInterfaceInputSpec,
TraitedSpec, InputMultiPath, File, traits)
class SaveParametersInput(BaseInterfaceInputSpec):
exp_info = traits.Dict()
in_file = traits.Either(InputMultiPath(File(exists=True)),
File(exists=True))
class SaveParametersOutput(TraitedSpec):
json_file = File(exists=True)
class SaveParameters(BaseInterface):
input_spec = SaveParametersInput
output_spec = SaveParametersOutput
_always_run = True
def _run_interface(self, runtime):
with open("experiment_info.json", "w") as fp:
json.dump(self.inputs.exp_info, fp, sort_keys=True, indent=2)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs["json_file"] = op.abspath("experiment_info.json")
return outputs
def dump_exp_info(exp_info, timeseries):
"""Dump the exp_info dict into a json file."""
json_file = op.abspath("experiment_info.json")
with open(json_file, "w") as fp:
json.dump(exp_info, fp, sort_keys=True, indent=2)
return json_file
def add_suffix(fname, suffix):
"""Insert a suffix into a filename before the extension."""
out_fname = fname_presuffix(fname, suffix="_" + suffix,
use_ext=True)
return out_fname
def nii_to_png(fname, suffix=""):
"""Return a path to write a local png based on an image."""
out_fname = fname_presuffix(fname, suffix=suffix + ".png",
newpath=os.getcwd(),
use_ext=False)
return out_fname
| 28.774194
| 78
| 0.647422
|
7f55d5076d31f420b5b039d80d39cbc69365c80f
| 1,851
|
py
|
Python
|
day5/part1.py
|
dcramer/aoc19
|
01581292773de71f1e4f1abfa02c2ada37e8857d
|
[
"BSD-3-Clause"
] | null | null | null |
day5/part1.py
|
dcramer/aoc19
|
01581292773de71f1e4f1abfa02c2ada37e8857d
|
[
"BSD-3-Clause"
] | null | null | null |
day5/part1.py
|
dcramer/aoc19
|
01581292773de71f1e4f1abfa02c2ada37e8857d
|
[
"BSD-3-Clause"
] | null | null | null |
import itertools
import math
import sys
valid_ops = sorted([1, 2, 3, 4, 99], reverse=True)
def splitop(op):
op = str(op)
return int(''.join(op[-2:])), [int(o) for o in op[-3::-1]]
def execute(data, input):
def instr(offset, params):
try:
inst = params[offset - 1]
except IndexError:
inst = 0
return inst
def param(data, pos, offset, params):
value = data[pos + offset]
inst = instr(offset, params)
if inst == 0:
try:
return data[value]
except IndexError as exc:
raise Exception(f'Cannot find reference {value}') from exc
if inst == 1:
return value
raise NotImplementedError(f'Invalid instruction: {inst}')
output = None
pos = 0
data_len = len(data)
while pos < data_len:
op, params = splitop(data[pos])
if op == 99:
pos += 1
break
elif op == 1:
data[data[pos + 3] if instr(3, params) == 0 else pos + 3] = param(data, pos, 1, params) + param(data, pos, 2, params)
pos += 4
elif op == 2:
data[data[pos + 3] if instr(3, params) == 0 else pos + 3] = param(data, pos, 1, params) * param(data, pos, 2, params)
pos += 4
elif op == 3:
data[data[pos + 1] if instr(1, params) == 0 else pos + 1] = input
pos += 2
elif op == 4:
output = param(data, pos, 1, params)
print('output', output)
pos += 2
else:
raise Exception(f'invalid opcode: {op}')
return data, output
def main(infile):
with open(infile, 'r') as fp:
data = [int(x) for x in fp.read().split(',')]
input = 1
_, output = execute(data, input)
print(output)
main(sys.argv[1])
| 26.442857
| 129
| 0.508374
|
6ae5616d2d089ac5d4fdfddfce21b9a6a81109c1
| 4,844
|
py
|
Python
|
src/domainClient/models/domain_listing_admin_service_v1_model_inspection_details.py
|
diabolical-ninja/smart-property-search
|
0931c7c8195ec21cbd56768c9c84cea2927a9e1d
|
[
"MIT"
] | 5
|
2021-04-12T04:10:42.000Z
|
2021-04-28T05:54:22.000Z
|
src/domainClient/models/domain_listing_admin_service_v1_model_inspection_details.py
|
diabolical-ninja/smart-property-search
|
0931c7c8195ec21cbd56768c9c84cea2927a9e1d
|
[
"MIT"
] | 35
|
2020-05-26T14:21:37.000Z
|
2022-03-29T16:14:42.000Z
|
src/domainClient/models/domain_listing_admin_service_v1_model_inspection_details.py
|
diabolical-ninja/smart-property-search
|
0931c7c8195ec21cbd56768c9c84cea2927a9e1d
|
[
"MIT"
] | 2
|
2020-05-26T14:02:12.000Z
|
2022-01-10T08:19:49.000Z
|
# coding: utf-8
"""
Domain Group API V1
Provides public access to Domain's microservices # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DomainListingAdminServiceV1ModelInspectionDetails(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'inspection_description': 'str',
'inspections': 'list[DomainListingAdminServiceV1ModelInspection]'
}
attribute_map = {
'inspection_description': 'inspectionDescription',
'inspections': 'inspections'
}
def __init__(self, inspection_description=None, inspections=None): # noqa: E501
"""DomainListingAdminServiceV1ModelInspectionDetails - a model defined in Swagger""" # noqa: E501
self._inspection_description = None
self._inspections = None
self.discriminator = None
if inspection_description is not None:
self.inspection_description = inspection_description
if inspections is not None:
self.inspections = inspections
@property
def inspection_description(self):
"""Gets the inspection_description of this DomainListingAdminServiceV1ModelInspectionDetails. # noqa: E501
Free text field for inspections # noqa: E501
:return: The inspection_description of this DomainListingAdminServiceV1ModelInspectionDetails. # noqa: E501
:rtype: str
"""
return self._inspection_description
@inspection_description.setter
def inspection_description(self, inspection_description):
"""Sets the inspection_description of this DomainListingAdminServiceV1ModelInspectionDetails.
Free text field for inspections # noqa: E501
:param inspection_description: The inspection_description of this DomainListingAdminServiceV1ModelInspectionDetails. # noqa: E501
:type: str
"""
self._inspection_description = inspection_description
@property
def inspections(self):
"""Gets the inspections of this DomainListingAdminServiceV1ModelInspectionDetails. # noqa: E501
Inspection times of the listing # noqa: E501
:return: The inspections of this DomainListingAdminServiceV1ModelInspectionDetails. # noqa: E501
:rtype: list[DomainListingAdminServiceV1ModelInspection]
"""
return self._inspections
@inspections.setter
def inspections(self, inspections):
"""Sets the inspections of this DomainListingAdminServiceV1ModelInspectionDetails.
Inspection times of the listing # noqa: E501
:param inspections: The inspections of this DomainListingAdminServiceV1ModelInspectionDetails. # noqa: E501
:type: list[DomainListingAdminServiceV1ModelInspection]
"""
self._inspections = inspections
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DomainListingAdminServiceV1ModelInspectionDetails, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DomainListingAdminServiceV1ModelInspectionDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.178082
| 138
| 0.643683
|
42d92b7d9de1aeb5b9b59852244f18a5de4bc9b9
| 984
|
py
|
Python
|
Task2/IGNORE_miscellaneousScripts/setup_ensemble.py
|
Jacob-L-Vincent/project-athena
|
d1d300e375941399f116cbaa4678a9ed7c6652db
|
[
"MIT"
] | 1
|
2020-11-11T19:22:25.000Z
|
2020-11-11T19:22:25.000Z
|
Task2/IGNORE_miscellaneousScripts/setup_ensemble.py
|
Jacob-L-Vincent/project-athena
|
d1d300e375941399f116cbaa4678a9ed7c6652db
|
[
"MIT"
] | null | null | null |
Task2/IGNORE_miscellaneousScripts/setup_ensemble.py
|
Jacob-L-Vincent/project-athena
|
d1d300e375941399f116cbaa4678a9ed7c6652db
|
[
"MIT"
] | null | null | null |
"""
Code pieces for collecting raw values from WDs on the input(s).
@author: Isaac Keohane isackeohane95@gmail.com
adapted from: Ying Meng (y(dot)meng201011(at)gmail(dot)com)
"""
import sys
sys.path.append("../")
from utils.model import load_pool
from models.athena import Ensemble, ENSEMBLE_STRATEGY
def setup_ensemble(trans_configs, model_configs, use_logits=False,
useActiveList=True, customList=False, wdList=[]):
# load the pool and create the ensemble
pool, _ = load_pool(trans_configs=trans_configs,
model_configs=model_configs,
active_list=useActiveList,
use_logits=use_logits,
wrap=True,
customList = customList,
custom_wds = wdList
)
athena = Ensemble(classifiers=list(pool.values()),
strategy=ENSEMBLE_STRATEGY.MV.value)
return athena
| 29.818182
| 68
| 0.606707
|
3ffec33bc5e5573cb59f993460f4847230e33c77
| 2,236
|
py
|
Python
|
config.py
|
darwinharianto/pvnet-rendering
|
8af589f9208fb2bad9bca3595f68ad7ca2563849
|
[
"Apache-2.0"
] | 143
|
2019-04-12T09:09:43.000Z
|
2022-03-31T08:12:52.000Z
|
config.py
|
darwinharianto/pvnet-rendering
|
8af589f9208fb2bad9bca3595f68ad7ca2563849
|
[
"Apache-2.0"
] | 66
|
2019-04-01T08:33:24.000Z
|
2022-02-14T07:11:14.000Z
|
config.py
|
darwinharianto/pvnet-rendering
|
8af589f9208fb2bad9bca3595f68ad7ca2563849
|
[
"Apache-2.0"
] | 46
|
2019-04-07T06:53:40.000Z
|
2022-01-02T13:11:16.000Z
|
from easydict import EasyDict
import os
import sys
import numpy as np
cfg = EasyDict()
"""
Path settings
"""
cfg.ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
cfg.DATA_DIR = os.path.join(cfg.ROOT_DIR, 'data')
cfg.MODEL_DIR = os.path.join(cfg.DATA_DIR, 'model')
cfg.REC_DIR = os.path.join(cfg.DATA_DIR, 'record')
cfg.FIGURE_DIR = os.path.join(cfg.ROOT_DIR, 'figure')
cfg.BLENDER_DIR = os.path.join(cfg.ROOT_DIR, "blender")
def add_path():
for key, value in cfg.items():
if 'DIR' in key:
sys.path.insert(0, value)
add_path()
sys.path.extend([".", ".."])
"""
Data settings
"""
cfg.LINEMOD = os.path.join(cfg.DATA_DIR, 'LINEMOD')
cfg.LINEMOD_ORIG = os.path.join(cfg.DATA_DIR, 'LINEMOD_ORIG')
cfg.OCCLUSION_LINEMOD = os.path.join(cfg.DATA_DIR, 'OCCLUSION_LINEMOD')
cfg.YCB = os.path.join(cfg.DATA_DIR, 'YCB')
cfg.SUN = os.path.join(cfg.DATA_DIR, "SUN")
"""
Rendering setting
"""
cfg.BLENDER_PATH = '/home/pengsida/Software/blender-2.79a-linux-glibc219-x86_64/blender'
cfg.NUM_SYN = 10
cfg.WIDTH = 640
cfg.HEIGHT = 480
cfg.low_azi = 0
cfg.high_azi = 360
cfg.low_ele = -15
cfg.high_ele = 40
cfg.low_theta = 10
cfg.high_theta = 40
cfg.cam_dist = 0.5
cfg.MIN_DEPTH = 0
cfg.MAX_DEPTH = 2
cfg.render_K=np.array([[700., 0., 320.],
[0., 700., 240.],
[0., 0., 1.]],np.float32)
cfg.linemod_K=np.array([[572.41140,0. ,325.26110],
[0. ,573.57043,242.04899],
[0. ,0. ,1. ]],np.float32)
cfg.linemod_cls_names=['ape','cam','cat','duck','glue','iron','phone',
'benchvise','can','driller','eggbox','holepuncher','lamp']
cfg.occ_linemod_cls_names=['ape','can','cat','driller','duck','eggbox','glue','holepuncher']
cfg.linemod_plane=['can']
cfg.symmetry_linemod_cls_names=['glue','eggbox']
'''
pascal 3d +
'''
cfg.PASCAL = os.path.join(cfg.DATA_DIR, 'PASCAL3D')
cfg.pascal_cls_names=['aeroplane','bicycle','boat','bottle','bus','car',
'chair','diningtable','motorbike','sofa','train','tvmonitor']
cfg.pascal_size=128
'''
YCB
'''
cfg.ycb_sym_cls=[21,20,19,16,13] # foam_brick extra_large_clamp large_clamp wood_block bowl
cfg.ycb_class_num=21
| 26.305882
| 92
| 0.639535
|
07d024b60eaeec927203a504b8347a00cb76e9cc
| 990
|
py
|
Python
|
pyasdf/tags/time/tests/test_time.py
|
mdboom/pyasdf
|
ac4e9f85bf96206fdd6bc3d0708875c953c66dc5
|
[
"BSD-3-Clause"
] | null | null | null |
pyasdf/tags/time/tests/test_time.py
|
mdboom/pyasdf
|
ac4e9f85bf96206fdd6bc3d0708875c953c66dc5
|
[
"BSD-3-Clause"
] | null | null | null |
pyasdf/tags/time/tests/test_time.py
|
mdboom/pyasdf
|
ac4e9f85bf96206fdd6bc3d0708875c953c66dc5
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
import six
try:
import astropy
except ImportError:
HAS_ASTROPY = False
else:
HAS_ASTROPY = True
from astropy import time
import numpy as np
import pytest
from .... import asdf
from .... import yamlutil
from ....tests import helpers
@pytest.mark.skipif('not HAS_ASTROPY')
def test_time(tmpdir):
time_array = time.Time(
np.arange(100), format="unix")
tree = {
'large_time_array': time_array
}
helpers.assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.skipif('not HAS_ASTROPY')
def test_isot(tmpdir):
tree = {
'time': time.Time('2000-01-01T00:00:00.000')
}
helpers.assert_roundtrip_tree(tree, tmpdir)
ff = asdf.AsdfFile(tree)
tree = yamlutil.custom_tree_to_tagged_tree(ff.tree, ff)
assert isinstance(tree['time'], six.text_type)
| 20.625
| 82
| 0.692929
|
4373e7eccb2a9f1a599ac7a4de289863fde3dea2
| 26,619
|
py
|
Python
|
sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/models/__init__.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/models/__init__.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/models/__init__.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import ARMProxyResource
from ._models_py3 import ARMResourceProperties
from ._models_py3 import ApiProperties
from ._models_py3 import AutoUpgradePolicyResource
from ._models_py3 import AutoscaleSettings
from ._models_py3 import AutoscaleSettingsResource
from ._models_py3 import BackupPolicy
from ._models_py3 import Capability
from ._models_py3 import CassandraKeyspaceCreateUpdateParameters
from ._models_py3 import CassandraKeyspaceGetPropertiesOptions
from ._models_py3 import CassandraKeyspaceGetPropertiesResource
from ._models_py3 import CassandraKeyspaceGetResults
from ._models_py3 import CassandraKeyspaceListResult
from ._models_py3 import CassandraKeyspaceResource
from ._models_py3 import CassandraPartitionKey
from ._models_py3 import CassandraSchema
from ._models_py3 import CassandraTableCreateUpdateParameters
from ._models_py3 import CassandraTableGetPropertiesOptions
from ._models_py3 import CassandraTableGetPropertiesResource
from ._models_py3 import CassandraTableGetResults
from ._models_py3 import CassandraTableListResult
from ._models_py3 import CassandraTableResource
from ._models_py3 import ClusterKey
from ._models_py3 import Column
from ._models_py3 import Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties
from ._models_py3 import CompositePath
from ._models_py3 import ConflictResolutionPolicy
from ._models_py3 import ConsistencyPolicy
from ._models_py3 import ContainerPartitionKey
from ._models_py3 import ContinuousModeBackupPolicy
from ._models_py3 import CorsPolicy
from ._models_py3 import CreateUpdateOptions
from ._models_py3 import DatabaseAccountConnectionString
from ._models_py3 import DatabaseAccountCreateUpdateParameters
from ._models_py3 import DatabaseAccountGetResults
from ._models_py3 import DatabaseAccountListConnectionStringsResult
from ._models_py3 import DatabaseAccountListKeysResult
from ._models_py3 import DatabaseAccountListReadOnlyKeysResult
from ._models_py3 import DatabaseAccountRegenerateKeyParameters
from ._models_py3 import DatabaseAccountUpdateParameters
from ._models_py3 import DatabaseAccountsListResult
from ._models_py3 import ErrorResponse
from ._models_py3 import ExcludedPath
from ._models_py3 import ExtendedResourceProperties
from ._models_py3 import FailoverPolicies
from ._models_py3 import FailoverPolicy
from ._models_py3 import GremlinDatabaseCreateUpdateParameters
from ._models_py3 import GremlinDatabaseGetPropertiesOptions
from ._models_py3 import GremlinDatabaseGetPropertiesResource
from ._models_py3 import GremlinDatabaseGetResults
from ._models_py3 import GremlinDatabaseListResult
from ._models_py3 import GremlinDatabaseResource
from ._models_py3 import GremlinGraphCreateUpdateParameters
from ._models_py3 import GremlinGraphGetPropertiesOptions
from ._models_py3 import GremlinGraphGetPropertiesResource
from ._models_py3 import GremlinGraphGetResults
from ._models_py3 import GremlinGraphListResult
from ._models_py3 import GremlinGraphResource
from ._models_py3 import IncludedPath
from ._models_py3 import Indexes
from ._models_py3 import IndexingPolicy
from ._models_py3 import IpAddressOrRange
from ._models_py3 import Location
from ._models_py3 import ManagedServiceIdentity
from ._models_py3 import Metric
from ._models_py3 import MetricAvailability
from ._models_py3 import MetricDefinition
from ._models_py3 import MetricDefinitionsListResult
from ._models_py3 import MetricListResult
from ._models_py3 import MetricName
from ._models_py3 import MetricValue
from ._models_py3 import MongoDBCollectionCreateUpdateParameters
from ._models_py3 import MongoDBCollectionGetPropertiesOptions
from ._models_py3 import MongoDBCollectionGetPropertiesResource
from ._models_py3 import MongoDBCollectionGetResults
from ._models_py3 import MongoDBCollectionListResult
from ._models_py3 import MongoDBCollectionResource
from ._models_py3 import MongoDBDatabaseCreateUpdateParameters
from ._models_py3 import MongoDBDatabaseGetPropertiesOptions
from ._models_py3 import MongoDBDatabaseGetPropertiesResource
from ._models_py3 import MongoDBDatabaseGetResults
from ._models_py3 import MongoDBDatabaseListResult
from ._models_py3 import MongoDBDatabaseResource
from ._models_py3 import MongoIndex
from ._models_py3 import MongoIndexKeys
from ._models_py3 import MongoIndexOptions
from ._models_py3 import NotebookWorkspace
from ._models_py3 import NotebookWorkspaceConnectionInfoResult
from ._models_py3 import NotebookWorkspaceCreateUpdateParameters
from ._models_py3 import NotebookWorkspaceListResult
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import OptionsResource
from ._models_py3 import PartitionMetric
from ._models_py3 import PartitionMetricListResult
from ._models_py3 import PartitionUsage
from ._models_py3 import PartitionUsagesResult
from ._models_py3 import PercentileMetric
from ._models_py3 import PercentileMetricListResult
from ._models_py3 import PercentileMetricValue
from ._models_py3 import PeriodicModeBackupPolicy
from ._models_py3 import PeriodicModeProperties
from ._models_py3 import Permission
from ._models_py3 import PrivateEndpointConnection
from ._models_py3 import PrivateEndpointConnectionListResult
from ._models_py3 import PrivateEndpointProperty
from ._models_py3 import PrivateLinkResource
from ._models_py3 import PrivateLinkResourceListResult
from ._models_py3 import PrivateLinkServiceConnectionStateProperty
from ._models_py3 import ProxyResource
from ._models_py3 import RegionForOnlineOffline
from ._models_py3 import Resource
from ._models_py3 import SpatialSpec
from ._models_py3 import SqlContainerCreateUpdateParameters
from ._models_py3 import SqlContainerGetPropertiesOptions
from ._models_py3 import SqlContainerGetPropertiesResource
from ._models_py3 import SqlContainerGetResults
from ._models_py3 import SqlContainerListResult
from ._models_py3 import SqlContainerResource
from ._models_py3 import SqlDatabaseCreateUpdateParameters
from ._models_py3 import SqlDatabaseGetPropertiesOptions
from ._models_py3 import SqlDatabaseGetPropertiesResource
from ._models_py3 import SqlDatabaseGetResults
from ._models_py3 import SqlDatabaseListResult
from ._models_py3 import SqlDatabaseResource
from ._models_py3 import SqlRoleAssignmentCreateUpdateParameters
from ._models_py3 import SqlRoleAssignmentGetResults
from ._models_py3 import SqlRoleAssignmentListResult
from ._models_py3 import SqlRoleDefinitionCreateUpdateParameters
from ._models_py3 import SqlRoleDefinitionGetResults
from ._models_py3 import SqlRoleDefinitionListResult
from ._models_py3 import SqlStoredProcedureCreateUpdateParameters
from ._models_py3 import SqlStoredProcedureGetPropertiesResource
from ._models_py3 import SqlStoredProcedureGetResults
from ._models_py3 import SqlStoredProcedureListResult
from ._models_py3 import SqlStoredProcedureResource
from ._models_py3 import SqlTriggerCreateUpdateParameters
from ._models_py3 import SqlTriggerGetPropertiesResource
from ._models_py3 import SqlTriggerGetResults
from ._models_py3 import SqlTriggerListResult
from ._models_py3 import SqlTriggerResource
from ._models_py3 import SqlUserDefinedFunctionCreateUpdateParameters
from ._models_py3 import SqlUserDefinedFunctionGetPropertiesResource
from ._models_py3 import SqlUserDefinedFunctionGetResults
from ._models_py3 import SqlUserDefinedFunctionListResult
from ._models_py3 import SqlUserDefinedFunctionResource
from ._models_py3 import TableCreateUpdateParameters
from ._models_py3 import TableGetPropertiesOptions
from ._models_py3 import TableGetPropertiesResource
from ._models_py3 import TableGetResults
from ._models_py3 import TableListResult
from ._models_py3 import TableResource
from ._models_py3 import ThroughputPolicyResource
from ._models_py3 import ThroughputSettingsGetPropertiesResource
from ._models_py3 import ThroughputSettingsGetResults
from ._models_py3 import ThroughputSettingsResource
from ._models_py3 import ThroughputSettingsUpdateParameters
from ._models_py3 import UniqueKey
from ._models_py3 import UniqueKeyPolicy
from ._models_py3 import Usage
from ._models_py3 import UsagesResult
from ._models_py3 import VirtualNetworkRule
except (SyntaxError, ImportError):
from ._models import ARMProxyResource # type: ignore
from ._models import ARMResourceProperties # type: ignore
from ._models import ApiProperties # type: ignore
from ._models import AutoUpgradePolicyResource # type: ignore
from ._models import AutoscaleSettings # type: ignore
from ._models import AutoscaleSettingsResource # type: ignore
from ._models import BackupPolicy # type: ignore
from ._models import Capability # type: ignore
from ._models import CassandraKeyspaceCreateUpdateParameters # type: ignore
from ._models import CassandraKeyspaceGetPropertiesOptions # type: ignore
from ._models import CassandraKeyspaceGetPropertiesResource # type: ignore
from ._models import CassandraKeyspaceGetResults # type: ignore
from ._models import CassandraKeyspaceListResult # type: ignore
from ._models import CassandraKeyspaceResource # type: ignore
from ._models import CassandraPartitionKey # type: ignore
from ._models import CassandraSchema # type: ignore
from ._models import CassandraTableCreateUpdateParameters # type: ignore
from ._models import CassandraTableGetPropertiesOptions # type: ignore
from ._models import CassandraTableGetPropertiesResource # type: ignore
from ._models import CassandraTableGetResults # type: ignore
from ._models import CassandraTableListResult # type: ignore
from ._models import CassandraTableResource # type: ignore
from ._models import ClusterKey # type: ignore
from ._models import Column # type: ignore
from ._models import Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties # type: ignore
from ._models import CompositePath # type: ignore
from ._models import ConflictResolutionPolicy # type: ignore
from ._models import ConsistencyPolicy # type: ignore
from ._models import ContainerPartitionKey # type: ignore
from ._models import ContinuousModeBackupPolicy # type: ignore
from ._models import CorsPolicy # type: ignore
from ._models import CreateUpdateOptions # type: ignore
from ._models import DatabaseAccountConnectionString # type: ignore
from ._models import DatabaseAccountCreateUpdateParameters # type: ignore
from ._models import DatabaseAccountGetResults # type: ignore
from ._models import DatabaseAccountListConnectionStringsResult # type: ignore
from ._models import DatabaseAccountListKeysResult # type: ignore
from ._models import DatabaseAccountListReadOnlyKeysResult # type: ignore
from ._models import DatabaseAccountRegenerateKeyParameters # type: ignore
from ._models import DatabaseAccountUpdateParameters # type: ignore
from ._models import DatabaseAccountsListResult # type: ignore
from ._models import ErrorResponse # type: ignore
from ._models import ExcludedPath # type: ignore
from ._models import ExtendedResourceProperties # type: ignore
from ._models import FailoverPolicies # type: ignore
from ._models import FailoverPolicy # type: ignore
from ._models import GremlinDatabaseCreateUpdateParameters # type: ignore
from ._models import GremlinDatabaseGetPropertiesOptions # type: ignore
from ._models import GremlinDatabaseGetPropertiesResource # type: ignore
from ._models import GremlinDatabaseGetResults # type: ignore
from ._models import GremlinDatabaseListResult # type: ignore
from ._models import GremlinDatabaseResource # type: ignore
from ._models import GremlinGraphCreateUpdateParameters # type: ignore
from ._models import GremlinGraphGetPropertiesOptions # type: ignore
from ._models import GremlinGraphGetPropertiesResource # type: ignore
from ._models import GremlinGraphGetResults # type: ignore
from ._models import GremlinGraphListResult # type: ignore
from ._models import GremlinGraphResource # type: ignore
from ._models import IncludedPath # type: ignore
from ._models import Indexes # type: ignore
from ._models import IndexingPolicy # type: ignore
from ._models import IpAddressOrRange # type: ignore
from ._models import Location # type: ignore
from ._models import ManagedServiceIdentity # type: ignore
from ._models import Metric # type: ignore
from ._models import MetricAvailability # type: ignore
from ._models import MetricDefinition # type: ignore
from ._models import MetricDefinitionsListResult # type: ignore
from ._models import MetricListResult # type: ignore
from ._models import MetricName # type: ignore
from ._models import MetricValue # type: ignore
from ._models import MongoDBCollectionCreateUpdateParameters # type: ignore
from ._models import MongoDBCollectionGetPropertiesOptions # type: ignore
from ._models import MongoDBCollectionGetPropertiesResource # type: ignore
from ._models import MongoDBCollectionGetResults # type: ignore
from ._models import MongoDBCollectionListResult # type: ignore
from ._models import MongoDBCollectionResource # type: ignore
from ._models import MongoDBDatabaseCreateUpdateParameters # type: ignore
from ._models import MongoDBDatabaseGetPropertiesOptions # type: ignore
from ._models import MongoDBDatabaseGetPropertiesResource # type: ignore
from ._models import MongoDBDatabaseGetResults # type: ignore
from ._models import MongoDBDatabaseListResult # type: ignore
from ._models import MongoDBDatabaseResource # type: ignore
from ._models import MongoIndex # type: ignore
from ._models import MongoIndexKeys # type: ignore
from ._models import MongoIndexOptions # type: ignore
from ._models import NotebookWorkspace # type: ignore
from ._models import NotebookWorkspaceConnectionInfoResult # type: ignore
from ._models import NotebookWorkspaceCreateUpdateParameters # type: ignore
from ._models import NotebookWorkspaceListResult # type: ignore
from ._models import Operation # type: ignore
from ._models import OperationDisplay # type: ignore
from ._models import OperationListResult # type: ignore
from ._models import OptionsResource # type: ignore
from ._models import PartitionMetric # type: ignore
from ._models import PartitionMetricListResult # type: ignore
from ._models import PartitionUsage # type: ignore
from ._models import PartitionUsagesResult # type: ignore
from ._models import PercentileMetric # type: ignore
from ._models import PercentileMetricListResult # type: ignore
from ._models import PercentileMetricValue # type: ignore
from ._models import PeriodicModeBackupPolicy # type: ignore
from ._models import PeriodicModeProperties # type: ignore
from ._models import Permission # type: ignore
from ._models import PrivateEndpointConnection # type: ignore
from ._models import PrivateEndpointConnectionListResult # type: ignore
from ._models import PrivateEndpointProperty # type: ignore
from ._models import PrivateLinkResource # type: ignore
from ._models import PrivateLinkResourceListResult # type: ignore
from ._models import PrivateLinkServiceConnectionStateProperty # type: ignore
from ._models import ProxyResource # type: ignore
from ._models import RegionForOnlineOffline # type: ignore
from ._models import Resource # type: ignore
from ._models import SpatialSpec # type: ignore
from ._models import SqlContainerCreateUpdateParameters # type: ignore
from ._models import SqlContainerGetPropertiesOptions # type: ignore
from ._models import SqlContainerGetPropertiesResource # type: ignore
from ._models import SqlContainerGetResults # type: ignore
from ._models import SqlContainerListResult # type: ignore
from ._models import SqlContainerResource # type: ignore
from ._models import SqlDatabaseCreateUpdateParameters # type: ignore
from ._models import SqlDatabaseGetPropertiesOptions # type: ignore
from ._models import SqlDatabaseGetPropertiesResource # type: ignore
from ._models import SqlDatabaseGetResults # type: ignore
from ._models import SqlDatabaseListResult # type: ignore
from ._models import SqlDatabaseResource # type: ignore
from ._models import SqlRoleAssignmentCreateUpdateParameters # type: ignore
from ._models import SqlRoleAssignmentGetResults # type: ignore
from ._models import SqlRoleAssignmentListResult # type: ignore
from ._models import SqlRoleDefinitionCreateUpdateParameters # type: ignore
from ._models import SqlRoleDefinitionGetResults # type: ignore
from ._models import SqlRoleDefinitionListResult # type: ignore
from ._models import SqlStoredProcedureCreateUpdateParameters # type: ignore
from ._models import SqlStoredProcedureGetPropertiesResource # type: ignore
from ._models import SqlStoredProcedureGetResults # type: ignore
from ._models import SqlStoredProcedureListResult # type: ignore
from ._models import SqlStoredProcedureResource # type: ignore
from ._models import SqlTriggerCreateUpdateParameters # type: ignore
from ._models import SqlTriggerGetPropertiesResource # type: ignore
from ._models import SqlTriggerGetResults # type: ignore
from ._models import SqlTriggerListResult # type: ignore
from ._models import SqlTriggerResource # type: ignore
from ._models import SqlUserDefinedFunctionCreateUpdateParameters # type: ignore
from ._models import SqlUserDefinedFunctionGetPropertiesResource # type: ignore
from ._models import SqlUserDefinedFunctionGetResults # type: ignore
from ._models import SqlUserDefinedFunctionListResult # type: ignore
from ._models import SqlUserDefinedFunctionResource # type: ignore
from ._models import TableCreateUpdateParameters # type: ignore
from ._models import TableGetPropertiesOptions # type: ignore
from ._models import TableGetPropertiesResource # type: ignore
from ._models import TableGetResults # type: ignore
from ._models import TableListResult # type: ignore
from ._models import TableResource # type: ignore
from ._models import ThroughputPolicyResource # type: ignore
from ._models import ThroughputSettingsGetPropertiesResource # type: ignore
from ._models import ThroughputSettingsGetResults # type: ignore
from ._models import ThroughputSettingsResource # type: ignore
from ._models import ThroughputSettingsUpdateParameters # type: ignore
from ._models import UniqueKey # type: ignore
from ._models import UniqueKeyPolicy # type: ignore
from ._models import Usage # type: ignore
from ._models import UsagesResult # type: ignore
from ._models import VirtualNetworkRule # type: ignore
from ._cosmos_db_management_client_enums import (
BackupPolicyType,
CompositePathSortOrder,
ConflictResolutionMode,
ConnectorOffer,
DataType,
DatabaseAccountKind,
DefaultConsistencyLevel,
IndexKind,
IndexingMode,
KeyKind,
NetworkAclBypass,
NotebookWorkspaceName,
PartitionKind,
PrimaryAggregationType,
PublicNetworkAccess,
ResourceIdentityType,
RoleDefinitionType,
ServerVersion,
SpatialType,
TriggerOperation,
TriggerType,
UnitType,
)
__all__ = [
'ARMProxyResource',
'ARMResourceProperties',
'ApiProperties',
'AutoUpgradePolicyResource',
'AutoscaleSettings',
'AutoscaleSettingsResource',
'BackupPolicy',
'Capability',
'CassandraKeyspaceCreateUpdateParameters',
'CassandraKeyspaceGetPropertiesOptions',
'CassandraKeyspaceGetPropertiesResource',
'CassandraKeyspaceGetResults',
'CassandraKeyspaceListResult',
'CassandraKeyspaceResource',
'CassandraPartitionKey',
'CassandraSchema',
'CassandraTableCreateUpdateParameters',
'CassandraTableGetPropertiesOptions',
'CassandraTableGetPropertiesResource',
'CassandraTableGetResults',
'CassandraTableListResult',
'CassandraTableResource',
'ClusterKey',
'Column',
'Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties',
'CompositePath',
'ConflictResolutionPolicy',
'ConsistencyPolicy',
'ContainerPartitionKey',
'ContinuousModeBackupPolicy',
'CorsPolicy',
'CreateUpdateOptions',
'DatabaseAccountConnectionString',
'DatabaseAccountCreateUpdateParameters',
'DatabaseAccountGetResults',
'DatabaseAccountListConnectionStringsResult',
'DatabaseAccountListKeysResult',
'DatabaseAccountListReadOnlyKeysResult',
'DatabaseAccountRegenerateKeyParameters',
'DatabaseAccountUpdateParameters',
'DatabaseAccountsListResult',
'ErrorResponse',
'ExcludedPath',
'ExtendedResourceProperties',
'FailoverPolicies',
'FailoverPolicy',
'GremlinDatabaseCreateUpdateParameters',
'GremlinDatabaseGetPropertiesOptions',
'GremlinDatabaseGetPropertiesResource',
'GremlinDatabaseGetResults',
'GremlinDatabaseListResult',
'GremlinDatabaseResource',
'GremlinGraphCreateUpdateParameters',
'GremlinGraphGetPropertiesOptions',
'GremlinGraphGetPropertiesResource',
'GremlinGraphGetResults',
'GremlinGraphListResult',
'GremlinGraphResource',
'IncludedPath',
'Indexes',
'IndexingPolicy',
'IpAddressOrRange',
'Location',
'ManagedServiceIdentity',
'Metric',
'MetricAvailability',
'MetricDefinition',
'MetricDefinitionsListResult',
'MetricListResult',
'MetricName',
'MetricValue',
'MongoDBCollectionCreateUpdateParameters',
'MongoDBCollectionGetPropertiesOptions',
'MongoDBCollectionGetPropertiesResource',
'MongoDBCollectionGetResults',
'MongoDBCollectionListResult',
'MongoDBCollectionResource',
'MongoDBDatabaseCreateUpdateParameters',
'MongoDBDatabaseGetPropertiesOptions',
'MongoDBDatabaseGetPropertiesResource',
'MongoDBDatabaseGetResults',
'MongoDBDatabaseListResult',
'MongoDBDatabaseResource',
'MongoIndex',
'MongoIndexKeys',
'MongoIndexOptions',
'NotebookWorkspace',
'NotebookWorkspaceConnectionInfoResult',
'NotebookWorkspaceCreateUpdateParameters',
'NotebookWorkspaceListResult',
'Operation',
'OperationDisplay',
'OperationListResult',
'OptionsResource',
'PartitionMetric',
'PartitionMetricListResult',
'PartitionUsage',
'PartitionUsagesResult',
'PercentileMetric',
'PercentileMetricListResult',
'PercentileMetricValue',
'PeriodicModeBackupPolicy',
'PeriodicModeProperties',
'Permission',
'PrivateEndpointConnection',
'PrivateEndpointConnectionListResult',
'PrivateEndpointProperty',
'PrivateLinkResource',
'PrivateLinkResourceListResult',
'PrivateLinkServiceConnectionStateProperty',
'ProxyResource',
'RegionForOnlineOffline',
'Resource',
'SpatialSpec',
'SqlContainerCreateUpdateParameters',
'SqlContainerGetPropertiesOptions',
'SqlContainerGetPropertiesResource',
'SqlContainerGetResults',
'SqlContainerListResult',
'SqlContainerResource',
'SqlDatabaseCreateUpdateParameters',
'SqlDatabaseGetPropertiesOptions',
'SqlDatabaseGetPropertiesResource',
'SqlDatabaseGetResults',
'SqlDatabaseListResult',
'SqlDatabaseResource',
'SqlRoleAssignmentCreateUpdateParameters',
'SqlRoleAssignmentGetResults',
'SqlRoleAssignmentListResult',
'SqlRoleDefinitionCreateUpdateParameters',
'SqlRoleDefinitionGetResults',
'SqlRoleDefinitionListResult',
'SqlStoredProcedureCreateUpdateParameters',
'SqlStoredProcedureGetPropertiesResource',
'SqlStoredProcedureGetResults',
'SqlStoredProcedureListResult',
'SqlStoredProcedureResource',
'SqlTriggerCreateUpdateParameters',
'SqlTriggerGetPropertiesResource',
'SqlTriggerGetResults',
'SqlTriggerListResult',
'SqlTriggerResource',
'SqlUserDefinedFunctionCreateUpdateParameters',
'SqlUserDefinedFunctionGetPropertiesResource',
'SqlUserDefinedFunctionGetResults',
'SqlUserDefinedFunctionListResult',
'SqlUserDefinedFunctionResource',
'TableCreateUpdateParameters',
'TableGetPropertiesOptions',
'TableGetPropertiesResource',
'TableGetResults',
'TableListResult',
'TableResource',
'ThroughputPolicyResource',
'ThroughputSettingsGetPropertiesResource',
'ThroughputSettingsGetResults',
'ThroughputSettingsResource',
'ThroughputSettingsUpdateParameters',
'UniqueKey',
'UniqueKeyPolicy',
'Usage',
'UsagesResult',
'VirtualNetworkRule',
'BackupPolicyType',
'CompositePathSortOrder',
'ConflictResolutionMode',
'ConnectorOffer',
'DataType',
'DatabaseAccountKind',
'DefaultConsistencyLevel',
'IndexKind',
'IndexingMode',
'KeyKind',
'NetworkAclBypass',
'NotebookWorkspaceName',
'PartitionKind',
'PrimaryAggregationType',
'PublicNetworkAccess',
'ResourceIdentityType',
'RoleDefinitionType',
'ServerVersion',
'SpatialType',
'TriggerOperation',
'TriggerType',
'UnitType',
]
| 48.398182
| 139
| 0.783162
|
a8998afc3d4d92d142fe13c4efd8453b9ca58b45
| 1,161
|
py
|
Python
|
http_request_builder.py
|
armiantos/CMPUT404-assignment-web-client
|
f3e2e685aaae4fda1ed399bcc73eb014a1ec9273
|
[
"Apache-2.0"
] | null | null | null |
http_request_builder.py
|
armiantos/CMPUT404-assignment-web-client
|
f3e2e685aaae4fda1ed399bcc73eb014a1ec9273
|
[
"Apache-2.0"
] | null | null | null |
http_request_builder.py
|
armiantos/CMPUT404-assignment-web-client
|
f3e2e685aaae4fda1ed399bcc73eb014a1ec9273
|
[
"Apache-2.0"
] | null | null | null |
def build_http_request(method: str, path: str, host: str, extra_headers=[], body: str = "") -> str:
"""
Returns a valid HTTP request from the given parameters.
Parameters:
- `method` - valid HTTP methods (e.g. "POST" or "GET")
- `path` - the path part of a URL (e.g. "/" or "/index.html")
- `host` - the host of the endpoint (e.g. "google.com" or "ualberta.ca")
- `extra_headers` - an optional list of strings to be included as part
of the request headers (e.g. ["Content-Type": "application/json"])
- `body` - the optional body of the request (if any)
Returns:
A string representation of a valid HTTP request
"""
status_line = f"{method} {path} HTTP/1.1"
headers = [
f"Host: {host}",
"Connection: close",
"User-Agent: sumitro-client/1.0"
]
if len(extra_headers) > 0:
headers.extend(extra_headers)
payload = "\r\n"
if len(body) > 0 or method == "POST":
payload += body
headers.append(f"Content-Length: {len(body)}")
request_body = "\r\n".join([status_line, "\r\n".join(headers), payload])
return request_body
| 36.28125
| 99
| 0.596038
|
3e44ce3ad007c9a9079e2a432648dcd4ccaebe52
| 5,734
|
py
|
Python
|
ljdata/ds/JPEGBaseline/__init__.py
|
scaramallion/pylibjpeg-data
|
2ab4b8a65b070656eca2582bd23197a3d01cdccd
|
[
"MIT"
] | 3
|
2020-03-07T21:54:09.000Z
|
2020-06-11T02:23:58.000Z
|
ljdata/ds/JPEGBaseline/__init__.py
|
scaramallion/pylibjpeg-data
|
2ab4b8a65b070656eca2582bd23197a3d01cdccd
|
[
"MIT"
] | 2
|
2020-03-08T00:35:51.000Z
|
2020-07-19T23:42:13.000Z
|
ljdata/ds/JPEGBaseline/__init__.py
|
scaramallion/pylibjpeg-data
|
2ab4b8a65b070656eca2582bd23197a3d01cdccd
|
[
"MIT"
] | 1
|
2020-03-07T21:49:07.000Z
|
2020-03-07T21:49:07.000Z
|
"""1.2.840.10008.1.2.4.50 - JPEG Baseline (Process 1)"""
INDEX = {
"color3d_jpeg_baseline.dcm" : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 3),
'PhotometricInterpretation' : ('CS', 'YBR_FULL_422'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '120'),
'Rows' : ('US', 480),
'Columns' : ('US', 640),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
},
"JPEGBaseline_1s_1f_u_08_08.dcm" : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 1),
'PhotometricInterpretation' : ('CS', 'MONOCHROME2'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '1'),
'Rows' : ('US', 100),
'Columns' : ('US', 100),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
'ImageComments' : (
'LT',
(
"Created from SC_rgb_jpeg_dcmtk.dcm using IJG's cjpeg with "
"-grayscale and -baseline flags"
)
),
},
'SC_rgb_dcmtk_+eb+cr.dcm' : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 3),
'PhotometricInterpretation' : ('CS', 'RGB'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '1'),
'Rows' : ('US', 100),
'Columns' : ('US', 100),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
},
'SC_rgb_dcmtk_+eb+cy+n1.dcm' : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 3),
'PhotometricInterpretation' : ('CS', 'YBR_FULL'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '1'),
'Rows' : ('US', 100),
'Columns' : ('US', 100),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
},
'SC_rgb_dcmtk_+eb+cy+n2.dcm' : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 3),
'PhotometricInterpretation' : ('CS', 'YBR_FULL'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '1'),
'Rows' : ('US', 100),
'Columns' : ('US', 100),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
},
'SC_rgb_dcmtk_+eb+cy+np.dcm' : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 3),
'PhotometricInterpretation' : ('CS', 'YBR_FULL_422'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '1'),
'Rows' : ('US', 100),
'Columns' : ('US', 100),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
},
'SC_rgb_dcmtk_+eb+cy+s2.dcm' : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 3),
'PhotometricInterpretation' : ('CS', 'YBR_FULL_422'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '1'),
'Rows' : ('US', 100),
'Columns' : ('US', 100),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
},
'SC_rgb_dcmtk_+eb+cy+s4.dcm' : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 3),
'PhotometricInterpretation' : ('CS', 'YBR_FULL'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '1'),
'Rows' : ('US', 100),
'Columns' : ('US', 100),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
},
'SC_rgb_jpeg_dcmtk.dcm' : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 3),
'PhotometricInterpretation' : ('CS', 'YBR_FULL'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '1'),
'Rows' : ('US', 100),
'Columns' : ('US', 100),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
},
'SC_rgb_jpeg_lossy_gdcm.dcm' : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 3),
'PhotometricInterpretation' : ('CS', 'RGB'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '1'),
'Rows' : ('US', 100),
'Columns' : ('US', 100),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
},
'SC_rgb_small_odd_jpeg.dcm' : {
'TransferSyntaxUID' : ('UI', '1.2.840.10008.1.2.4.50'),
'SamplesPerPixel' : ('US', 3),
'PhotometricInterpretation' : ('CS', 'YBR_FULL'),
'PlanarConfiguration' : ('US', 0),
'NumberOfFrames' : ('IS', '1'),
'Rows' : ('US', 3),
'Columns' : ('US', 3),
'BitsAllocated' : ('US', 8),
'BitsStored' : ('US', 8),
'HighBit' : ('US', 7),
'PixelRepresentation' : ('US', 0),
},
}
| 36.993548
| 76
| 0.465992
|
cdcd6ef1d58f41937328ea7500a51a4923057b26
| 9,945
|
py
|
Python
|
tests/unit/cloud/clouds/test_openstack.py
|
babs/salt
|
c536ea716d5308880b244e7980f4b659d86fc104
|
[
"Apache-2.0"
] | 19
|
2016-01-29T14:37:52.000Z
|
2022-03-30T18:08:01.000Z
|
tests/unit/cloud/clouds/test_openstack.py
|
babs/salt
|
c536ea716d5308880b244e7980f4b659d86fc104
|
[
"Apache-2.0"
] | 223
|
2016-03-02T16:39:41.000Z
|
2022-03-03T12:26:35.000Z
|
tests/unit/cloud/clouds/test_openstack.py
|
babs/salt
|
c536ea716d5308880b244e7980f4b659d86fc104
|
[
"Apache-2.0"
] | 64
|
2016-02-04T19:45:26.000Z
|
2021-12-15T02:02:31.000Z
|
"""
:codeauthor: `Tyler Johnson <tjohnson@saltstack.com>`
tests.unit.cloud.clouds.openstack_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from salt.cloud.clouds import openstack
from salt.utils import dictupdate
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
# pylint: disable=confusing-with-statement
class MockImage:
name = "image name"
id = "image id"
class MockNode:
name = "node name"
id = "node id"
flavor = MockImage()
status = "node status"
def __init__(self, image):
self.image = image
def __iter__(self):
return iter(())
class MockConn:
def __init__(self, image):
self.node = MockNode(image)
def get_image(self, *args, **kwargs):
return self.node.image
def get_flavor(self, *args, **kwargs):
return self.node.flavor
def get_server(self, *args, **kwargs):
return self.node
def list_servers(self, *args, **kwargs):
return [self.node]
class OpenstackTestCase(TestCase, LoaderModuleMockMixin):
"""
Unit TestCase for salt.cloud.clouds.openstack module.
"""
def setup_loader_modules(self):
return {
openstack: {
"__active_provider_name__": "",
"__opts__": {
"providers": {
"my-openstack-cloud": {
"openstack": {
"auth": "daenerys",
"region_name": "westeros",
"cloud": "openstack",
}
}
}
},
}
}
def test_get_configured_provider_bad(self):
with patch.dict(openstack.__opts__, {"providers": {}}):
result = openstack.get_configured_provider()
self.assertEqual(result, False)
def test_get_configured_provider_auth(self):
config = {
"region_name": "westeros",
"auth": "daenerys",
}
with patch.dict(
openstack.__opts__,
{"providers": {"my-openstack-cloud": {"openstack": config}}},
):
result = openstack.get_configured_provider()
self.assertEqual(config, result)
def test_get_configured_provider_cloud(self):
config = {
"region_name": "westeros",
"cloud": "foo",
}
with patch.dict(
openstack.__opts__,
{"providers": {"my-openstack-cloud": {"openstack": config}}},
):
result = openstack.get_configured_provider()
self.assertEqual(config, result)
def test_get_dependencies(self):
HAS_SHADE = (True, "Please install newer version of shade: >= 1.19.0")
with patch("salt.cloud.clouds.openstack.HAS_SHADE", HAS_SHADE):
result = openstack.get_dependencies()
self.assertEqual(result, True)
def test_get_dependencies_no_shade(self):
HAS_SHADE = (False, "Install pypi module shade >= 1.19.0")
with patch("salt.cloud.clouds.openstack.HAS_SHADE", HAS_SHADE):
result = openstack.get_dependencies()
self.assertEqual(result, False)
def test_list_nodes_full_image_str(self):
node_image = "node image"
conn = MockConn(node_image)
with patch("salt.cloud.clouds.openstack._get_ips", return_value=[]):
ret = openstack.list_nodes_full(conn=conn)
self.assertEqual(ret[conn.node.name]["image"], node_image)
def test_list_nodes_full_image_obj(self):
conn = MockConn(MockImage())
with patch("salt.cloud.clouds.openstack._get_ips", return_value=[]):
ret = openstack.list_nodes_full(conn=conn)
self.assertEqual(ret[conn.node.name]["image"], MockImage.name)
def test_show_instance(self):
conn = MockConn(MockImage())
with patch("salt.cloud.clouds.openstack._get_ips", return_value=[]):
ret = openstack.show_instance(conn.node.name, conn=conn, call="action")
self.assertEqual(ret["image"], MockImage.name)
def test_request_instance_should_use_provided_connection_if_not_None(self):
fake_conn = MagicMock()
patch_get_conn = patch("salt.cloud.clouds.openstack.get_conn", autospec=True)
patch_utils = patch.dict(
openstack.__utils__,
{"cloud.check_name": MagicMock(), "dictupdate.update": dictupdate.update},
)
patch_shade = patch.object(
openstack, "shade.exc.OpenStackCloudException", Exception, create=True
)
with patch_get_conn as fake_get_conn, patch_utils, patch_shade:
openstack.request_instance(
vm_={"name": "fnord", "driver": "fnord"}, conn=fake_conn
)
fake_get_conn.assert_not_called()
def test_request_instance_should_create_conn_if_provided_is_None(self):
none_conn = None
patch_get_conn = patch("salt.cloud.clouds.openstack.get_conn", autospec=True)
patch_utils = patch.dict(
openstack.__utils__,
{"cloud.check_name": MagicMock(), "dictupdate.update": dictupdate.update},
)
patch_shade = patch.object(
openstack, "shade.exc.OpenStackCloudException", Exception, create=True
)
with patch_get_conn as fake_get_conn, patch_utils, patch_shade:
openstack.request_instance(
vm_={"name": "fnord", "driver": "fnord"}, conn=none_conn
)
fake_get_conn.assert_called_once_with()
# According to
# https://docs.openstack.org/shade/latest/user/usage.html#shade.OpenStackCloud.create_server
# the `network` parameter can be:
# (optional) Network dict or name or ID to attach the server to.
# Mutually exclusive with the nics parameter. Can also be be a list of
# network names or IDs or network dicts.
#
# Here we're testing a normal dictionary
def test_request_instance_should_be_able_to_provide_a_dictionary_for_network(self):
fake_conn = MagicMock()
expected_network = {"foo": "bar"}
vm_ = {"name": "fnord", "driver": "fnord", "network": expected_network}
patch_utils = patch.dict(
openstack.__utils__,
{"cloud.check_name": MagicMock(), "dictupdate.update": dictupdate.update},
)
with patch_utils:
openstack.request_instance(vm_=vm_, conn=fake_conn)
call_kwargs = fake_conn.create_server.mock_calls[0][-1]
self.assertDictEqual(call_kwargs["network"], expected_network)
# Here we're testing the list of dictionaries
def test_request_instance_should_be_able_to_provide_a_list_of_dictionaries_for_network(
self,
):
fake_conn = MagicMock()
expected_network = [{"foo": "bar"}, {"bang": "quux"}]
vm_ = {"name": "fnord", "driver": "fnord", "network": expected_network}
patch_utils = patch.dict(
openstack.__utils__,
{"cloud.check_name": MagicMock(), "dictupdate.update": dictupdate.update},
)
with patch_utils:
openstack.request_instance(vm_=vm_, conn=fake_conn)
call_kwargs = fake_conn.create_server.mock_calls[0][-1]
assert call_kwargs["network"] == expected_network
# Here we're testing for names/IDs
def test_request_instance_should_be_able_to_provide_a_list_of_single_ids_or_names_for_network(
self,
):
fake_conn = MagicMock()
expected_network = ["foo", "bar", "bang", "fnord1", "fnord2"]
vm_ = {"name": "fnord", "driver": "fnord", "network": expected_network}
patch_utils = patch.dict(
openstack.__utils__,
{"cloud.check_name": MagicMock(), "dictupdate.update": dictupdate.update},
)
with patch_utils:
openstack.request_instance(vm_=vm_, conn=fake_conn)
call_kwargs = fake_conn.create_server.mock_calls[0][-1]
assert call_kwargs["network"] == expected_network
# Testing that we get a dict that we expect for create_server
def test__clean_create_kwargs(self):
params = {
"name": "elmer",
"image": "mirrormirror",
"flavor": "chocolate",
"auto_ip": True,
"ips": ["hihicats"],
"ip_pool": "olympic",
"root_volume": "iamgroot",
"boot_volume": "pussnboots",
"terminate_volume": False,
"volumes": ["lots", "of", "books"],
"meta": {"full": "meta"},
"files": {"shred": "this"},
"reservation_id": "licenseandregistration",
"security_groups": ["wanna", "play", "repeat"],
"key_name": "clortho",
"availability_zone": "callmemaybe",
"block_device_mapping": [{"listof": "dicts"}],
"block_device_mapping_v2": [{"listof": "dicts"}],
"nics": ["thats", "me"],
"scheduler_hints": {"so": "many"},
"config_drive": True,
"disk_config": "donkey",
"admin_pass": "password",
"wait": False,
"timeout": 30,
"reuse_ips": True,
"network": ["also", "a", "dict"],
"boot_from_volume": True,
"volume_size": 30,
"nat_destination": "albuquerque",
"group": "ledzeppelin",
"userdata": "needmoreinput",
"thisgetsdropped": "yup",
}
patch_utils = patch.dict(
openstack.__utils__, {"dictupdate.update": dictupdate.update},
)
with patch_utils:
ret = openstack._clean_create_kwargs(**params)
params.pop("thisgetsdropped")
self.assertDictEqual(params, ret)
| 36.29562
| 98
| 0.591252
|
1fe1cf7a83f186a26294a432f7012ef585af5e08
| 274
|
py
|
Python
|
pyibge/__init__.py
|
renanbirck/pyibge
|
f70ad060ab7d491fa61afa2b01181a2dd176e319
|
[
"MIT"
] | 6
|
2019-06-15T03:40:11.000Z
|
2021-02-27T08:59:04.000Z
|
pyibge/__init__.py
|
renanbirck/pyibge
|
f70ad060ab7d491fa61afa2b01181a2dd176e319
|
[
"MIT"
] | null | null | null |
pyibge/__init__.py
|
renanbirck/pyibge
|
f70ad060ab7d491fa61afa2b01181a2dd176e319
|
[
"MIT"
] | 4
|
2017-10-09T21:30:06.000Z
|
2019-12-18T04:56:53.000Z
|
#!/usr/bin/env python3
#
# pyIBGE: A module to access data from the Brazilian Institute of Geography and Statistics (IBGE)
# (c) 2016 Renan Birck Pinheiro [renan.birck.pinheiro@gmail.com]
from .extra_routines import state_to_id, period_to_date
from .query import IBGEQuery
| 34.25
| 97
| 0.788321
|
4c1d921925d8409fd85e0126d81a5a21a1b9424a
| 186
|
py
|
Python
|
dikedata_api/tests.py
|
ddsc/dikedata-api
|
74776575a848863975793a9fde106161c655ee44
|
[
"MIT"
] | null | null | null |
dikedata_api/tests.py
|
ddsc/dikedata-api
|
74776575a848863975793a9fde106161c655ee44
|
[
"MIT"
] | null | null | null |
dikedata_api/tests.py
|
ddsc/dikedata-api
|
74776575a848863975793a9fde106161c655ee44
|
[
"MIT"
] | null | null | null |
# (c) Nelen & Schuurmans. MIT licensed, see LICENSE.rst.
from django.test import TestCase
class ExampleTest(TestCase):
def test_something(self):
self.assertEquals(1, 1)
| 18.6
| 57
| 0.704301
|
e69892fd895e0852d48fed48e20638d6fd32f61a
| 2,145
|
py
|
Python
|
esp32_ds1307.py
|
tnoumar/esp32-libraries
|
98a7c9ba90ddb65c9eece2849f255142931e3e9a
|
[
"MIT"
] | 1
|
2021-07-29T09:04:32.000Z
|
2021-07-29T09:04:32.000Z
|
esp32_ds1307.py
|
vittascience/esp32-libraries
|
98a7c9ba90ddb65c9eece2849f255142931e3e9a
|
[
"MIT"
] | null | null | null |
esp32_ds1307.py
|
vittascience/esp32-libraries
|
98a7c9ba90ddb65c9eece2849f255142931e3e9a
|
[
"MIT"
] | null | null | null |
from micropython import const
# I2C-Address - DS1307
RTC_V1_ADDRESS = const(0x68)
# registar overview - crtl & status reg
RTC_CTRL_1 = const(0x00)
RTC_CTRL_2 = const(0x01)
# registar overview - time & data reg
RTC_SECOND_ADDR = const(0x00)
RTC_MINUTE_ADDR = const(0x01)
RTC_HOUR_ADDR = const(0x02)
RTC_WDAY_ADDR = const(0x03)
RTC_DAY_ADDR = const(0x04)
RTC_MONTH_ADDR = const(0x05)
RTC_YEAR_ADDR = const(0x06)
DAY_OF_WEEK = ["MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"]
class DS1307:
def __init__(self, i2c, addr=RTC_V1_ADDRESS):
self._addr = addr
self._i2c = i2c
def decToBcd(self, val):
return (val // 10) << 4 | (val % 10)
def bcdToDec(self, val):
return ((val >> 4) * 10) + (val & 0x0F)
def reset(self):
self._i2c.writeto_mem(self._addr, RTC_CTRL_1, '\x58')
def fillByHMS(self, hour, minute, second):
self._i2c.writeto_mem(self._addr, RTC_SECOND_ADDR, bytearray([self.decToBcd(second)]))
self._i2c.writeto_mem(self._addr, RTC_MINUTE_ADDR, bytearray([self.decToBcd(minute)]))
self._i2c.writeto_mem(self._addr, RTC_HOUR_ADDR, bytearray([self.decToBcd(hour)]))
def fillByYMD(self, year, month, day):
self._i2c.writeto_mem(self._addr, RTC_DAY_ADDR, bytearray([self.decToBcd(day)]))
self._i2c.writeto_mem(self._addr, RTC_MONTH_ADDR, bytearray([self.decToBcd(month)]))
self._i2c.writeto_mem(self._addr, RTC_YEAR_ADDR, bytearray([self.decToBcd(year-2000)]))
def fillDayOfWeek(self, dayOfWeek):
self._i2c.writeto_mem(self._addr, RTC_WDAY_ADDR, bytearray([self.decToBcd(DAY_OF_WEEK.index(dayOfWeek))]))
def startClock(self):
second = self._i2c.readfrom(self._addr, 1)[0] & 0x7f
self._i2c.writeto_mem(self._addr, RTC_CTRL_1, bytearray([second]))
def readTime(self):
rdata = self._i2c.readfrom_mem(self._addr, RTC_SECOND_ADDR, 7)
return (
self.bcdToDec(rdata[6]) + 2000, # year
self.bcdToDec(rdata[5]), # month
self.bcdToDec(rdata[4]), # day
DAY_OF_WEEK[self.bcdToDec(rdata[3])], # weekday
self.bcdToDec(rdata[2]), # hour
self.bcdToDec(rdata[1]), # minute
self.bcdToDec(rdata[0] & 0x7F), # second
)
| 34.047619
| 110
| 0.69324
|
85ca6cf9633d0895a007f833038fc88ba0402d92
| 1,430
|
py
|
Python
|
torch2trt/converters/__init__.py
|
nuhpiskin/torch2trt
|
ce39d035d067cd6596dbed60aa05b37051915c74
|
[
"MIT"
] | null | null | null |
torch2trt/converters/__init__.py
|
nuhpiskin/torch2trt
|
ce39d035d067cd6596dbed60aa05b37051915c74
|
[
"MIT"
] | null | null | null |
torch2trt/converters/__init__.py
|
nuhpiskin/torch2trt
|
ce39d035d067cd6596dbed60aa05b37051915c74
|
[
"MIT"
] | null | null | null |
# dummy converters throw warnings method encountered
import tensorrt as trt
from .dummy_converters import *
# supported converters will override dummy converters
from .AdaptiveAvgPool2d import *
from .BatchNorm1d import *
from .BatchNorm2d import *
from .Conv import *
from .Conv1d import *
from .Conv2d import *
from .ConvTranspose import *
from .ConvTranspose2d import *
from .Linear import *
from .LogSoftmax import *
from .activation import *
from .adaptive_avg_pool2d import *
from .adaptive_max_pool2d import *
from .add import *
from .avg_pool import *
from .batch_norm import *
from .cat import *
from .chunk import *
from .clamp import *
from .compare import *
from .div import *
from .expand import *
from .floordiv import *
from .getitem import *
from .identity import *
from .instance_norm import *
from .interpolate import *
from .group_norm import *
from .max import *
from .max_pool2d import *
from .mean import *
from .min import *
from .mod import *
from .mul import *
from .normalize import *
from .ne import *
from .narrow import *
from .pad import *
from .permute import *
from .pow import *
from .prelu import *
from .prod import *
from .relu import *
from .relu6 import *
from .sigmoid import *
from .softmax import *
from .split import *
from .stack import *
from .sub import *
from .sum import *
from .tanh import *
from .tensor import *
from .transpose import *
from .unary import *
from .view import *
| 23.064516
| 53
| 0.74965
|
1d07c1ee206e1db1c91d6b827c3c492d1f7b65e5
| 735
|
py
|
Python
|
techgig.py
|
heykush/Coding-Challenge-Scaper
|
da4c88d93faa21b3562b937d70575cb777010f5a
|
[
"Apache-2.0"
] | null | null | null |
techgig.py
|
heykush/Coding-Challenge-Scaper
|
da4c88d93faa21b3562b937d70575cb777010f5a
|
[
"Apache-2.0"
] | null | null | null |
techgig.py
|
heykush/Coding-Challenge-Scaper
|
da4c88d93faa21b3562b937d70575cb777010f5a
|
[
"Apache-2.0"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from time import sleep
driver = webdriver.Chrome(r"C:\Users\gkush\Downloads\chromedriver.exe")
url="https://www.techgig.com/challenge"
r = requests.get(url)
driver.get(url)
content = driver.page_source.encode('utf-8').strip()
soup = BeautifulSoup(content,"html.parser")
sleep(2)
ksite=driver.find_elements_by_xpath('//*[@class="contest-listing"]')
liv=[]
for i in ksite:
lin=i.find_elements_by_xpath('//*[@class="contest-box prize-hiring-1"]')
liv.extend(lin)
lin1=i.find_elements_by_xpath('//*[@class="contest-box prize-hiring-2"]')
liv.extend(lin1)
for j in liv:
print("\n=======================\n", j.text , end="\n")
| 27.222222
| 75
| 0.682993
|
cee0741bc2e3be8859b9ae592096c3453fd46ce9
| 4,359
|
py
|
Python
|
texar/modules/networks/conv_networks_test.py
|
Holmeswww/Text_Infilling
|
f63cd24bee5c62d7dedd8fb35c4e52aee20c39f3
|
[
"Apache-2.0"
] | 87
|
2019-09-11T04:41:46.000Z
|
2022-03-23T02:37:52.000Z
|
texar/modules/networks/conv_networks_test.py
|
ysglh/texar
|
9c699e8143fd8ecb5d65a41ceef09c45832b9258
|
[
"Apache-2.0"
] | 10
|
2019-10-01T16:09:17.000Z
|
2021-10-19T21:20:11.000Z
|
texar/modules/networks/conv_networks_test.py
|
ysglh/texar
|
9c699e8143fd8ecb5d65a41ceef09c45832b9258
|
[
"Apache-2.0"
] | 20
|
2019-09-13T16:32:37.000Z
|
2021-06-03T07:14:11.000Z
|
#
"""
Unit tests for conv networks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import texar as tx
from texar.modules.networks.conv_networks import Conv1DNetwork
class Conv1DNetworkTest(tf.test.TestCase):
"""Tests :class:`~texar.modules.Conv1DNetwork` class.
"""
def test_feedforward(self):
"""Tests feed forward.
"""
network_1 = Conv1DNetwork()
self.assertEqual(len(network_1.layers), 4)
self.assertTrue(isinstance(network_1.layer_by_name("conv_pool_1"),
tx.core.MergeLayer))
for layer in network_1.layers[0].layers:
self.assertTrue(isinstance(layer, tx.core.SequentialLayer))
inputs_1 = tf.ones([64, 16, 300], tf.float32)
outputs_1 = network_1(inputs_1)
self.assertEqual(outputs_1.shape, [64, 128])
hparams = {
# Conv layers
"num_conv_layers": 2,
"filters": 128,
"kernel_size": [[3, 4, 5], 4],
"other_conv_kwargs": {"padding": "same"},
# Pooling layers
"pooling": "AveragePooling",
"pool_size": 2,
"pool_strides": 1,
# Dense layers
"num_dense_layers": 3,
"dense_size": [128, 128, 10],
"dense_activation": "relu",
"other_dense_kwargs": {"use_bias": False},
# Dropout
"dropout_conv": [0, 1, 2],
"dropout_dense": 2
}
network_2 = Conv1DNetwork(hparams)
# nlayers = nconv-pool + nconv + npool + ndense + ndropout + flatten
self.assertEqual(len(network_2.layers), 1+1+1+3+4+1)
self.assertTrue(isinstance(network_2.layer_by_name("conv_pool_1"),
tx.core.MergeLayer))
for layer in network_2.layers[1].layers:
self.assertTrue(isinstance(layer, tx.core.SequentialLayer))
inputs_2 = tf.ones([64, 16, 300], tf.float32)
outputs_2 = network_2(inputs_2)
self.assertEqual(outputs_2.shape, [64, 10])
def test_unknown_seq_length(self):
"""Tests use of pooling layer when the seq_length dimension of inputs
is `None`.
"""
network_1 = Conv1DNetwork()
inputs_1 = tf.placeholder(tf.float32, [64, None, 300])
outputs_1 = network_1(inputs_1)
self.assertEqual(outputs_1.shape, [64, 128])
hparams = {
# Conv layers
"num_conv_layers": 2,
"filters": 128,
"kernel_size": [[3, 4, 5], 4],
# Pooling layers
"pooling": "AveragePooling",
"pool_size": [2, None],
# Dense layers
"num_dense_layers": 1,
"dense_size": 10,
}
network = Conv1DNetwork(hparams)
# nlayers = nconv-pool + nconv + npool + ndense + ndropout + flatten
self.assertEqual(len(network.layers), 1+1+1+1+1+1)
self.assertTrue(isinstance(network.layer_by_name('pool_2'),
tx.core.AverageReducePooling1D))
inputs = tf.placeholder(tf.float32, [64, None, 300])
outputs = network(inputs)
self.assertEqual(outputs.shape, [64, 10])
hparams_2 = {
# Conv layers
"num_conv_layers": 1,
"filters": 128,
"kernel_size": 4,
"other_conv_kwargs": {'data_format': 'channels_first'},
# Pooling layers
"pooling": "MaxPooling",
"other_pool_kwargs": {'data_format': 'channels_first'},
# Dense layers
"num_dense_layers": 1,
"dense_size": 10,
}
network_2 = Conv1DNetwork(hparams_2)
inputs_2 = tf.placeholder(tf.float32, [64, 300, None])
outputs_2 = network_2(inputs_2)
self.assertEqual(outputs_2.shape, [64, 10])
def test_mask_input(self):
"""Tests masked inputs.
"""
network_1 = Conv1DNetwork()
inputs_1 = tf.ones([3, 16, 300], tf.float32)
seq_length = [10, 15, 1]
outputs_1 = network_1(inputs_1, sequence_length=seq_length)
self.assertEqual(outputs_1.shape, [3, 128])
if __name__ == "__main__":
tf.test.main()
| 34.322835
| 77
| 0.571691
|
c97227d163fc11631da08a508114cfc008987e3f
| 1,929
|
py
|
Python
|
tests/ipc_test.py
|
atikur/mythril
|
d0a3858400b727bdc5c927487e0dcc69ca1ef14d
|
[
"MIT"
] | 2
|
2018-04-13T00:07:28.000Z
|
2022-02-15T23:18:14.000Z
|
tests/ipc_test.py
|
vanessabridge/mythril
|
fb28d30f880057a735f0a932ad4c9bff4d50c2c3
|
[
"MIT"
] | null | null | null |
tests/ipc_test.py
|
vanessabridge/mythril
|
fb28d30f880057a735f0a932ad4c9bff4d50c2c3
|
[
"MIT"
] | 1
|
2020-10-11T21:49:05.000Z
|
2020-10-11T21:49:05.000Z
|
from unittest import TestCase
from mythril.ipc.client import EthIpc
class IpcTest(TestCase):
client = None
def setUp(self):
self.client = EthIpc()
def test_eth_coinbase(self):
coinbase = self.client.eth_coinbase()
self.assertTrue(coinbase.startswith("0x"), "coinbase should be a hex string")
self.assertEqual(len(coinbase), 42, "coinbase is a string with length of 42")
def test_eth_blockNumber(self):
block_number = self.client.eth_blockNumber()
self.assertGreater(block_number, 0, "we have made sure the blockNumber is > 0 for testing")
def test_eth_getBalance(self):
balance = self.client.eth_getBalance(address="0x0000000000000000000000000000000000000000")
self.assertGreater(balance, 10000000, "specified address should have a lot of balance")
def test_eth_getStorageAt(self):
storage = self.client.eth_getStorageAt(address="0x0000000000000000000000000000000000000000")
self.assertEqual(storage, '0x0000000000000000000000000000000000000000000000000000000000000000')
def test_eth_getBlockByNumber(self):
block = self.client.eth_getBlockByNumber(0)
self.assertEqual(block["extraData"], "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa", "the data of the first block should be right")
def test_eth_getCode(self):
# TODO: can't find a proper address for getting code
code = self.client.eth_getCode(address="0x0000000000000000000000000000000000000001")
self.assertEqual(code, "0x")
def test_eth_getTransactionReceipt(self):
transaction = self.client.eth_getTransactionReceipt(tx_hash="0xe363505adc6b2996111f8bd774f8653a61d244cc6567b5372c2e781c6c63b737")
self.assertEqual(transaction["from"], "0x22f2dcff5ad78c3eb6850b5cb951127b659522e6")
self.assertEqual(transaction["to"], "0x0000000000000000000000000000000000000000")
| 47.04878
| 161
| 0.75324
|
9f00ed665a3d7e3d865a1f146ea9ed4c15687557
| 11,833
|
py
|
Python
|
safe_transaction_service/notifications/tests/test_views.py
|
kanhirun/safe-transaction-service
|
9bd6103be7d77469a337b6f02c8e0693e7951e4c
|
[
"MIT"
] | 2
|
2021-09-29T23:54:40.000Z
|
2021-11-26T15:22:15.000Z
|
safe_transaction_service/notifications/tests/test_views.py
|
kanhirun/safe-transaction-service
|
9bd6103be7d77469a337b6f02c8e0693e7951e4c
|
[
"MIT"
] | 8
|
2022-03-15T18:39:45.000Z
|
2022-03-28T01:28:13.000Z
|
safe_transaction_service/notifications/tests/test_views.py
|
kanhirun/safe-transaction-service
|
9bd6103be7d77469a337b6f02c8e0693e7951e4c
|
[
"MIT"
] | 1
|
2022-03-29T12:19:51.000Z
|
2022-03-29T12:19:51.000Z
|
import time
import uuid
from unittest import mock
from django.urls import reverse
from eth_account import Account
from rest_framework import status
from rest_framework.test import APITestCase
from gnosis.safe.tests.safe_test_case import SafeTestCaseMixin
from safe_transaction_service.history.tests.factories import \
SafeContractFactory
from safe_transaction_service.notifications.models import (FirebaseDevice,
FirebaseDeviceOwner)
from ..utils import calculate_device_registration_hash
from .factories import FirebaseDeviceFactory, FirebaseDeviceOwnerFactory
class TestNotificationViews(SafeTestCaseMixin, APITestCase):
def test_notifications_devices_create_view(self):
response = self.client.post(reverse('v1:notifications:devices'))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
safe_address = Account.create().address
safe_contract = SafeContractFactory(address=safe_address)
self.assertEqual(FirebaseDevice.objects.count(), 0)
data = {
'safes': [safe_address],
'cloudMessagingToken': 'A' * 163,
'buildNumber': 0,
'bundle': 'company.package.app',
'deviceType': 'WEB',
'version': '2.0.1',
}
response = self.client.post(reverse('v1:notifications:devices'), format='json', data=data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(FirebaseDevice.objects.count(), 1)
device_uuid = response.data['uuid']
self.assertTrue(uuid.UUID(device_uuid))
# Same request should return a 400 because a new device with same push token cannot be created
response = self.client.post(reverse('v1:notifications:devices'), format='json', data=data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# Request with `uuid` should not create a new object
data['uuid'] = device_uuid
response = self.client.post(reverse('v1:notifications:devices'), format='json', data=data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(FirebaseDevice.objects.count(), 1)
# Changing the token and using the uuid will change the cloud messaging token
data['cloudMessagingToken'] = 'B' * 163
response = self.client.post(reverse('v1:notifications:devices'), format='json', data=data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(FirebaseDevice.objects.count(), 1)
self.assertEqual(FirebaseDevice.objects.first().cloud_messaging_token, data['cloudMessagingToken'])
# Add the same FirebaseDevice to another Safe
safe_contract_2 = SafeContractFactory()
data['safes'].append(safe_contract_2.address)
response = self.client.post(reverse('v1:notifications:devices'), format='json', data=data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(safe_contract.firebase_devices.count(), 1)
self.assertEqual(safe_contract_2.firebase_devices.count(), 1)
self.assertEqual(FirebaseDevice.objects.count(), 1)
self.assertEqual(FirebaseDevice.objects.first().safes.count(), 2)
# Use not valid deviceType
previous_device_type = data['deviceType']
data['deviceType'] = 'RANGER-MORPHER'
response = self.client.post(reverse('v1:notifications:devices'), format='json', data=data)
self.assertIn('is not a valid choice', response.content.decode())
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(safe_contract.firebase_devices.count(), 1)
data['deviceType'] = previous_device_type
# Use not valid version
previous_version = data['version']
data['version'] = 'Megazord'
response = self.client.post(reverse('v1:notifications:devices'), format='json', data=data)
self.assertIn('Semantic version was expected', response.content.decode())
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(safe_contract.firebase_devices.count(), 1)
data['version'] = previous_version
# Remove one of the Safes
data['safes'] = [safe_contract_2.address]
response = self.client.post(reverse('v1:notifications:devices'), format='json', data=data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(safe_contract.firebase_devices.count(), 0)
self.assertEqual(safe_contract_2.firebase_devices.count(), 1)
def test_notifications_devices_create_with_signatures_view(self):
safe_address = Account.create().address
safe_contract = SafeContractFactory(address=safe_address)
owner_account = Account.create()
owner_account_2 = Account.create()
self.assertEqual(FirebaseDevice.objects.count(), 0)
unique_id = uuid.uuid4()
timestamp = int(time.time())
cloud_messaging_token = 'A' * 163
safes = [safe_address]
hash_to_sign = calculate_device_registration_hash(timestamp,
unique_id,
cloud_messaging_token,
safes)
signatures = [owner_account.signHash(hash_to_sign)['signature'].hex()]
data = {
'uuid': unique_id,
'safes': [safe_address],
'cloudMessagingToken': cloud_messaging_token,
'buildNumber': 0,
'bundle': 'company.package.app',
'deviceType': 'WEB',
'version': '2.0.1',
'timestamp': timestamp,
'signatures': signatures,
}
response = self.client.post(reverse('v1:notifications:devices'), format='json', data=data)
# self.assertIn('is not an owner of any of the safes', str(response.data['non_field_errors']))
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['owners_registered'], [])
self.assertEqual(response.data['owners_not_registered'], [owner_account.address])
with mock.patch('safe_transaction_service.notifications.serializers.get_safe_owners',
return_value=[owner_account.address]):
response = self.client.post(reverse('v1:notifications:devices'), format='json', data=data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['uuid'], str(unique_id))
self.assertEqual(FirebaseDevice.objects.count(), 1)
self.assertEqual(FirebaseDeviceOwner.objects.count(), 1)
self.assertEqual(FirebaseDeviceOwner.objects.first().owner, owner_account.address)
# Add another signature
signatures.append(owner_account_2.signHash(hash_to_sign)['signature'].hex())
response = self.client.post(reverse('v1:notifications:devices'), format='json', data=data)
# self.assertIn('is not an owner of any of the safes', str(response.data['non_field_errors']))
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['owners_registered'], [owner_account.address])
self.assertEqual(response.data['owners_not_registered'], [owner_account_2.address])
with mock.patch('safe_transaction_service.notifications.serializers.get_safe_owners',
return_value=[owner_account.address, owner_account_2.address]):
response = self.client.post(reverse('v1:notifications:devices'), format='json', data=data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['owners_registered'], [owner_account.address, owner_account_2.address])
self.assertEqual(response.data['owners_not_registered'], [])
self.assertEqual(FirebaseDevice.objects.count(), 1)
self.assertCountEqual(FirebaseDeviceOwner.objects.values_list('owner', flat=True),
[owner_account.address, owner_account_2.address])
def test_notifications_devices_delete_view(self):
safe_contract = SafeContractFactory()
firebase_device = FirebaseDeviceFactory()
firebase_device.safes.add(safe_contract)
device_id = firebase_device.uuid
FirebaseDeviceOwnerFactory(firebase_device=firebase_device)
self.assertEqual(FirebaseDevice.objects.count(), 1)
self.assertEqual(FirebaseDeviceOwner.objects.count(), 1)
response = self.client.delete(reverse('v1:notifications:devices-delete', args=(device_id,)), format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(FirebaseDevice.objects.count(), 0)
self.assertEqual(FirebaseDeviceOwner.objects.count(), 0)
# Try to delete again if not exists
response = self.client.delete(reverse('v1:notifications:devices-delete', args=(device_id,)), format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_notifications_devices_safe_delete_view(self):
safe_contract = SafeContractFactory()
firebase_device = FirebaseDeviceFactory()
firebase_device_owner = FirebaseDeviceOwnerFactory(firebase_device=firebase_device)
not_related_firebase_device_owner = FirebaseDeviceOwnerFactory()
firebase_device.safes.add(safe_contract)
device_id = firebase_device.uuid
# Test not existing `safe_contract`, even if `device_id` is correct
random_safe_address = Account.create().address
self.assertEqual(firebase_device.safes.count(), 1)
self.assertEqual(FirebaseDeviceOwner.objects.count(), 2)
response = self.client.delete(reverse('v1:notifications:devices-safes-delete',
args=(device_id, random_safe_address)), format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(firebase_device.safes.count(), 1)
self.assertEqual(FirebaseDeviceOwner.objects.count(), 2)
# Happy path
response = self.client.delete(reverse('v1:notifications:devices-safes-delete',
args=(device_id, safe_contract.address)), format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(firebase_device.safes.count(), 0)
self.assertEqual(FirebaseDeviceOwner.objects.count(), 1)
self.assertEqual(FirebaseDeviceOwner.objects.get(), not_related_firebase_device_owner)
# Try to delete again and get the same result even if the Safe is not linked
response = self.client.delete(reverse('v1:notifications:devices-safes-delete',
args=(device_id, safe_contract.address)), format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(firebase_device.safes.count(), 0)
self.assertEqual(FirebaseDeviceOwner.objects.count(), 1)
# Remove not existing Safe should not trigger an error
response = self.client.delete(reverse('v1:notifications:devices-safes-delete',
args=(device_id, Account.create().address)), format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(firebase_device.safes.count(), 0)
self.assertEqual(FirebaseDeviceOwner.objects.count(), 1)
| 55.294393
| 115
| 0.681653
|
ef652e2d3a91ac44dcf5abe6fc8889ab26a28b6c
| 8,954
|
py
|
Python
|
src/sasctl/_services/microanalytic_score.py
|
InvalidPointer/python-sasctl
|
bc40918fcde7439b9c2eff0e286469354b2e9fc9
|
[
"Apache-2.0"
] | null | null | null |
src/sasctl/_services/microanalytic_score.py
|
InvalidPointer/python-sasctl
|
bc40918fcde7439b9c2eff0e286469354b2e9fc9
|
[
"Apache-2.0"
] | null | null | null |
src/sasctl/_services/microanalytic_score.py
|
InvalidPointer/python-sasctl
|
bc40918fcde7439b9c2eff0e286469354b2e9fc9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""A stateless, memory-resident, high-performance program execution service."""
import re
from collections import OrderedDict
from math import isnan
import six
from .service import Service
class MicroAnalyticScore(Service):
"""Micro Analytic Service (MAS) client."""
_SERVICE_ROOT = '/microanalyticScore'
@classmethod
def is_uuid(cls, id):
"""Check if the ID appears to be a valid MAS id.
Indicates whether `id` appears to be a correctly formatted ID. Does
**not** check whether a module with `id` actually exists.
Parameters
----------
id : str
Returns
-------
bool
Notes
-----
Overrides the :meth:`Service.is_uuid` method since MAS modules do
not currently use IDs that are actually UUIDs.
"""
# Anything that consists of only numbers, lowercase letters,
# and underscores, and does not start with a number, looks like a
# MAS id.
return re.match('^[_a-z][_a-z0-9]+$', id) is not None
list_modules, get_module, update_module, \
delete_module = Service._crud_funcs('/modules', 'module')
def get_module_step(self, module, step):
"""Details of a single step in a given module.
Parameters
----------
module : str or dict
Name, id, or dictionary representation of a module
step : str
Name of the step
Returns
-------
RestObj
"""
module = self.get_module(module)
r = self.get('/modules/{}/steps/{}'.format(module.id, step))
return r
def list_module_steps(self, module):
"""List all steps defined for a module.
Parameters
----------
module : str or dict
Name, id, or dictionary representation of a module
Returns
-------
list
List of :class:`.RestObj` instances representing each step.
"""
module = self.get_module(module)
steps = self.get('/modules/{}/steps'.format(module.id))
return steps if isinstance(steps, list) else [steps]
def execute_module_step(self, module, step, return_dict=True, **kwargs):
"""Call a module step with the given parameters.
Parameters
----------
module : str or dict
Name, id, or dictionary representation of a module
step : str
Name of the step
return_dict : bool, optional
Whether the results should be returned as a dictionary instead
of a tuple
kwargs : any
Passed as arguments to the module step
Returns
-------
any
Results of the step execution. Returned as a dictionary if
`return_dict` is True, otherwise returned as a tuple if more
than one value is returned, otherwise the single value.
"""
module_name = module.name if hasattr(module, 'name') else str(module)
module = self.get_module(module)
if module is None:
raise ValueError("Module '{}' was not found.".format(module_name))
module = module.id
step = step.id if hasattr(step, 'id') else step
# Make sure all inputs are JSON serializable
# Common types such as numpy.int64 and numpy.float64 are NOT serializable
for k in kwargs.keys():
type_name = type(kwargs[k]).__name__
if type_name == 'float64':
kwargs[k] = float(kwargs[k])
elif type_name == 'int64':
kwargs[k] = int(kwargs[k])
body = {'inputs': [{'name': k, 'value': v}
for k, v in six.iteritems(kwargs)]}
# Convert NaN to None (null) before calling MAS
for input in body['inputs']:
try:
if isnan(input['value']):
input['value'] = None
except TypeError:
pass
r = self.post('/modules/{}/steps/{}'.format(module, step), json=body)
# Convert list of name/value pair dictionaries to single dict
outputs = OrderedDict()
for output in r.get('outputs', []):
k, v = output['name'], output.get('value')
# Remove padding from CHAR columns
if isinstance(v, str):
v = v.strip()
outputs[k] = v
if return_dict:
# Return results as k=v pairs
return outputs
else:
# Return only the values, as if calling another Python function.
outputs = tuple(outputs.values())
if len(outputs) == 1:
return outputs[0]
else:
return outputs
def create_module(self, name=None, description=None, source=None,
language='python', scope='public'):
"""Create a new module in MAS.
Parameters
----------
name : str
description : str
source : str
language : str { 'python', 'ds2' }
scope : str { 'public', 'private' }
Returns
-------
RestObj
"""
if source is None:
raise ValueError('The `source` parameter is required.')
else:
source = str(source)
if language == 'python':
t = 'text/x-python'
elif language == 'ds2':
t = 'text/vnd.sas.source.ds2'
else:
raise ValueError('Unrecognized source code language `%s`.'
% language)
data = {'id': name,
'type': t,
'description': description,
'source': source,
'scope': scope}
r = self.post('/modules', json=data)
return r
def define_steps(self, module):
"""Map MAS steps to Python methods.
Defines python methods on a module that automatically call the
corresponding MAS steps.
Parameters
----------
module : str or dict
Name, id, or dictionary representation of a module
Returns
-------
module
"""
import types
module = self.get_module(module)
# Define a method for each step of the module
for id in module.get('stepIds', []):
step = self.get_module_step(module, id)
# Method should have an argument for each parameter of the step
arguments = [k['name'] for k in step.inputs]
arg_types = [k['type'] for k in step.inputs]
# Format call to execute_module_step()
call_params = ['{}={}'.format(i, i) for i in arguments]
# Set type hints for the function
type_string = ' # type: ({})'.format(', '.join(arg_types))
# Method signature
signature = 'def _%s_%s(%s, **kwargs):' \
% (module.name,
step.id,
', '.join(a for a in arguments))
# MAS always lower-cases variable names
# Since the original Python variables may have a different case,
# allow kwargs to be used to input alternative caps
arg_checks = ['for k in kwargs.keys():']
for arg in arguments:
arg_checks.append(" if k.lower() == '%s':" % arg.lower())
arg_checks.append(" %s = kwargs[k]" % arg)
arg_checks.append(" continue")
# Full method source code
# Drops 'rc' and 'msg' from return values
code = (signature,
type_string,
' """Execute step %s of module %s."""' % (step, module),
'\n'.join([' %s' % a for a in arg_checks]),
' r = execute_module_step(module, step, {})'.format(
', '.join(call_params)),
' r.pop("rc", None)',
' r.pop("msg", None)',
' if len(r) == 1:',
' return r.popitem()[1]',
' return tuple(v for v in r.values())'
)
code = '\n'.join(code)
compiled = compile(code, '<string>', 'exec')
env = globals().copy()
env.update({'execute_module_step': self.execute_module_step,
'module': module,
'step': step})
func = types.FunctionType(compiled.co_consts[0],
env,
argdefs=tuple(None for x in arguments))
setattr(module, step.id, func)
return module
| 31.528169
| 81
| 0.517199
|
e57e35683a98e760ffc2ccbea67e42947c9c3092
| 62,079
|
py
|
Python
|
torchbenchmark/models/hf_Bert/modeling.py
|
puririshi98/benchmark
|
79f554f1e1cf36f62994c78e0e6e5b360f554022
|
[
"BSD-3-Clause"
] | null | null | null |
torchbenchmark/models/hf_Bert/modeling.py
|
puririshi98/benchmark
|
79f554f1e1cf36f62994c78e0e6e5b360f554022
|
[
"BSD-3-Clause"
] | null | null | null |
torchbenchmark/models/hf_Bert/modeling.py
|
puririshi98/benchmark
|
79f554f1e1cf36f62994c78e0e6e5b360f554022
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.utils import checkpoint
cached_path = lambda x: x
from torch.nn import Module
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import torch.nn.init as init
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
}
CONFIG_NAME = 'bert_config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
TF_WEIGHTS_NAME = 'model.ckpt'
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
print("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m"] for n in name):
print("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.ascontiguousarray(np.transpose(array))
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
return x * 0.5 * (1.0 + torch.erf(x / 1.41421))
#used only for triton inference
def bias_gelu(bias, y):
x = bias + y
return x * 0.5 * (1.0 + torch.erf(x / 1.41421))
# used specifically for training since torch.nn.functional.gelu breaks ONNX export
def bias_gelu_training(bias, y):
x = bias + y
return torch.nn.functional.gelu(x) # Breaks ONNX export
def bias_tanh(bias, y):
x = bias + y
return torch.tanh(x)
def swish(x):
return x * torch.sigmoid(x)
#torch.nn.functional.gelu(x) # Breaks ONNX export
ACT2FN = {"gelu": gelu, "bias_gelu": bias_gelu, "bias_tanh": bias_tanh, "relu": torch.nn.functional.relu, "swish": swish}
class LinearActivation(Module):
r"""Fused Linear and activation Module.
"""
__constants__ = ['bias']
def __init__(self, in_features, out_features, act='gelu', bias=True):
super(LinearActivation, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.act_fn = nn.Identity() #
self.biased_act_fn = None #
self.bias = None #
if isinstance(act, str) or (sys.version_info[0] == 2 and isinstance(act, unicode)): # For TorchScript
if bias and not 'bias' in act: # compatibility
act = 'bias_' + act #
self.biased_act_fn = ACT2FN[act] #
else:
self.act_fn = ACT2FN[act]
else:
self.act_fn = act
self.weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
if not self.bias is None:
return self.biased_act_fn(self.bias, F.linear(input, self.weight, None))
else:
return self.act_fn(F.linear(input, self.weight, self.bias))
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
output_all_encoded_layers=False):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.output_all_encoded_layers = output_all_encoded_layers
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertNonFusedLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertNonFusedLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u)
s = s * s
s = s.mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
try:
import apex
#apex.amp.register_half_function(apex.normalization.fused_layer_norm, 'FusedLayerNorm')
import apex.normalization
from apex.normalization.fused_layer_norm import FusedLayerNormAffineFunction
#apex.amp.register_float_function(apex.normalization.FusedLayerNorm, 'forward')
#BertLayerNorm = apex.normalization.FusedLayerNorm
APEX_IS_AVAILABLE = True
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
#BertLayerNorm = BertNonFusedLayerNorm
APEX_IS_AVAILABLE = False
# class BertLayerNorm(Module):
# def __init__(self, hidden_size, eps=1e-12):
# super(BertLayerNorm, self).__init__()
# self.shape = torch.Size((hidden_size,))
# self.eps = eps
# self.weight = nn.Parameter(torch.ones(hidden_size))
# self.bias = nn.Parameter(torch.zeros(hidden_size))
# self.apex_enabled = APEX_IS_AVAILABLE
# @torch.jit.unused
# def fused_layer_norm(self, x):
# return FusedLayerNormAffineFunction.apply(
# x, self.weight, self.bias, self.shape, self.eps)
# def forward(self, x):
# if self.apex_enabled and not torch.jit.is_scripting():
# x = self.fused_layer_norm(x)
# else:
# u = x.mean(-1, keepdim=True)
# s = (x - u)
# s = s * s
# s = s.mean(-1, keepdim=True)
# x = (x - u) / torch.sqrt(s + self.eps)
# x = self.weight * x + self.bias
# return x
BertLayerNorm = nn.LayerNorm
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = torch.reshape(x, new_x_shape)
return x.permute(0, 2, 1, 3)
def transpose_key_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = torch.reshape(x, new_x_shape)
return x.permute(0, 2, 3, 1)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_key_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer)
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = F.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = torch.reshape(context_layer, new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense_act = LinearActivation(config.hidden_size, config.intermediate_size, act=config.hidden_act)
def forward(self, hidden_states):
hidden_states = self.dense_act(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
self.output_all_encoded_layers = config.output_all_encoded_layers
self._checkpoint_activations = False
@torch.jit.unused
def checkpointed_forward(self, hidden_states, attention_mask):
def custom(start, end):
def custom_forward(*inputs):
layers = self.layer[start:end]
x_ = inputs[0]
for layer in layers:
x_ = layer(x_, inputs[1])
return x_
return custom_forward
l = 0
num_layers = len(self.layer)
chunk_length = math.ceil(math.sqrt(num_layers))
while l < num_layers:
hidden_states = checkpoint.checkpoint(custom(l, l+chunk_length), hidden_states, attention_mask*1)
l += chunk_length
return hidden_states
def forward(self, hidden_states, attention_mask):
all_encoder_layers = []
if self._checkpoint_activations:
hidden_states = self.checkpointed_forward(hidden_states, attention_mask)
else:
for i,layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states, attention_mask)
if self.output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not self.output_all_encoded_layers or self._checkpoint_activations:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act="tanh")
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense_act(first_token_tensor)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act=config.hidden_act)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense_act(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def checkpoint_activations(self, val):
def _apply_flag(module):
if hasattr(module, "_checkpoint_activations"):
module._checkpoint_activations=val
self.apply(_apply_flag)
def enable_apex(self, val):
def _apply_flag(module):
if hasattr(module, "apex_enabled"):
module.apex_enabled=val
self.apply(_apply_flag)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None,
from_tf=False, *inputs, **kwargs):
"""
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = pretrained_model_name_or_path
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
archive_file))
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file) or from_tf:
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None and not from_tf:
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path, map_location='cpu' if not torch.cuda.is_available() else None)
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
if from_tf:
# Directly load from a TensorFlow checkpoint
weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME)
return load_tf_weights_in_bert(model, weights_path)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
return model
class BertModel(BertPreTrainedModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
self.output_all_encoded_layers = config.output_all_encoded_layers
def forward(self, input_ids, token_type_ids, attention_mask):
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.embeddings.word_embeddings.weight.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output, extended_attention_mask)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not self.output_all_encoded_layers:
encoded_layers = encoded_layers[-1:]
return encoded_layers, pooled_output
class BertForPreTraining(BertPreTrainedModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids, attention_mask):
encoded_layers, pooled_output = self.bert(input_ids, token_type_ids, attention_mask)
sequence_output = encoded_layers[-1]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
return prediction_scores, seq_relationship_score
class BertForMaskedLM(BertPreTrainedModel):
"""BERT model with the masked language modeling head.
This module comprises the BERT model followed by the masked language modeling head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
Outputs:
if `masked_lm_labels` is not `None`:
Outputs the masked language modeling loss.
if `masked_lm_labels` is `None`:
Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForMaskedLM(config)
masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForMaskedLM, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None):
encoded_layers, _ = self.bert(input_ids, token_type_ids, attention_mask)
sequence_output = encoded_layers[-1]
prediction_scores = self.cls(sequence_output)
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
return masked_lm_loss
else:
return prediction_scores
class BertForNextSentencePrediction(BertPreTrainedModel):
"""BERT model with next sentence prediction head.
This module comprises the BERT model followed by the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `next_sentence_label` is not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `next_sentence_label` is `None`:
Outputs the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForNextSentencePrediction(config)
seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForNextSentencePrediction, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask)
seq_relationship_score = self.cls( pooled_output)
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
return next_sentence_loss
else:
return seq_relationship_score
class BertForSequenceClassification(BertPreTrainedModel):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask)
pooled_output = self.dropout(pooled_output)
return self.classifier(pooled_output)
class BertForMultipleChoice(BertPreTrainedModel):
"""BERT model for multiple choice tasks.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_choices`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_choices = 2
model = BertForMultipleChoice(config, num_choices)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_choices):
super(BertForMultipleChoice, self).__init__(config)
self.num_choices = num_choices
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
_, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, self.num_choices)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return loss
else:
return reshaped_logits
class BertForTokenClassification(BertPreTrainedModel):
"""BERT model for token-level classification.
This module is composed of the BERT model with a linear layer on top of
the full hidden state of the last layer.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForTokenClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels):
super(BertForTokenClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
encoded_layers, _ = self.bert(input_ids, token_type_ids, attention_mask)
sequence_output = encoded_layers[-1]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForQuestionAnswering(BertPreTrainedModel):
"""BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
Outputs:
Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
position tokens of shape [batch_size, sequence_length].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForQuestionAnswering(config)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.bert = BertModel(config)
# TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids, attention_mask):
encoded_layers, _ = self.bert(input_ids, token_type_ids, attention_mask)
sequence_output = encoded_layers[-1]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
return start_logits, end_logits
| 48.423557
| 134
| 0.65378
|
2624c148bae85093e9ddb4a311244a475455bd01
| 5,497
|
py
|
Python
|
ci/build.py
|
TheButlah/SlimeVR-Tracker-ESP
|
0ec39b41eb59e4dd0b411f6407a34a3ffded3aea
|
[
"MIT"
] | 4
|
2022-01-06T14:41:10.000Z
|
2022-03-11T02:27:47.000Z
|
ci/build.py
|
TheButlah/SlimeVR-Tracker-ESP
|
0ec39b41eb59e4dd0b411f6407a34a3ffded3aea
|
[
"MIT"
] | null | null | null |
ci/build.py
|
TheButlah/SlimeVR-Tracker-ESP
|
0ec39b41eb59e4dd0b411f6407a34a3ffded3aea
|
[
"MIT"
] | null | null | null |
import json
import os
import shutil
from enum import Enum
from textwrap import dedent
from typing import List
COLOR_ESC = '\033['
COLOR_RESET = f'{COLOR_ESC}0m'
COLOR_GREEN = f'{COLOR_ESC}32m'
COLOR_RED = f'{COLOR_ESC}31m'
COLOR_CYAN = f'{COLOR_ESC}36m'
COLOR_GRAY = f'{COLOR_ESC}30;1m'
class Board(Enum):
SLIMEVR = "BOARD_SLIMEVR"
WROOM32 = "BOARD_WROOM32"
class DeviceConfiguration:
def __init__(self, platform: str, board: Board, platformio_board: str) -> None:
self.platform = platform
self.board = board
self.platformio_board = platformio_board
def get_platformio_section(self) -> str:
section = dedent(f"""
[env:{self.platformio_board}]
platform = {self.platform}
board = {self.platformio_board}""")
if self.platform == "espressif32":
section += dedent("""
lib_deps =
${env.lib_deps}
lorol/LittleFS_esp32 @ 1.0.6
""")
return section
def filename(self) -> str:
return f"{self.platformio_board}.bin"
def build_header(self) -> str:
sda = ""
scl = ""
imu_int = ""
imu_int2 = ""
battery_level = ""
leds = True
if self.board == Board.SLIMEVR:
sda = "4"
scl = "5"
imu_int = "10"
imu_int2 = "13"
battery_level = "17"
elif self.board == Board.WROOM32:
sda = "21"
scl = "22"
imu_int = "23"
imu_int2 = "25"
battery_level = "36"
else:
raise Exception(f"Unknown board: {self.board.value}")
return f"""
#define IMU IMU_BNO085
#define SECOND_IMU IMU
#define BOARD {self.board.value}
#define BATTERY_MONITOR BAT_EXTERNAL
#define PIN_IMU_SDA {sda}
#define PIN_IMU_SCL {scl}
#define PIN_IMU_INT {imu_int}
#define PIN_IMU_INT_2 {imu_int2}
#define PIN_BATTERY_LEVEL {battery_level}
#define ENABLE_LEDS {leds.__str__().lower()}
#define BATTERY_SHIELD_RESISTANCE 180
#define IMU_ROTATION DEG_90
#define SECOND_IMU_ROTATION DEG_90
"""
def __str__(self) -> str:
return f"{self.platform}@{self.board.value}"
def get_matrix() -> List[DeviceConfiguration]:
matrix: List[DeviceConfiguration] = []
configFile = open("./ci/devices.json", "r")
config = json.load(configFile)
for deviceConfig in config:
matrix.append(DeviceConfiguration(
deviceConfig["platform"], Board[deviceConfig["board"]], deviceConfig["platformio_board"]))
return matrix
def prepare() -> None:
print(f"🡢 {COLOR_CYAN}Preparation{COLOR_RESET}")
print(f" 🡢 {COLOR_GRAY}Backing up src/defines.h{COLOR_RESET}")
shutil.copy("src/defines.h", "src/defines.h.bak")
print(f" 🡢 {COLOR_GRAY}Backing up platformio.ini{COLOR_RESET}")
shutil.copy("./platformio.ini", "platformio.ini.bak")
print(f" 🡢 {COLOR_GRAY}Copying over build/platformio.ini{COLOR_RESET}")
shutil.copy("./ci/platformio.ini", "platformio.ini")
if os.path.exists("./build"):
print(f" 🡢 {COLOR_GRAY}Removing existing build folder...{COLOR_RESET}")
shutil.rmtree("./build")
print(f" 🡢 {COLOR_GRAY}Creating build folder...{COLOR_RESET}")
os.mkdir("./build")
print(f" 🡢 {COLOR_GREEN}Success!{COLOR_RESET}")
def cleanup() -> None:
print(f"🡢 {COLOR_CYAN}Cleanup{COLOR_RESET}")
print(f" 🡢 {COLOR_GRAY}Restoring src/defines.h...{COLOR_RESET}")
shutil.copy("src/defines.h.bak", "src/defines.h")
print(f" 🡢 {COLOR_GRAY}Removing src/defines.h.bak...{COLOR_RESET}")
os.remove("src/defines.h.bak")
print(f" 🡢 {COLOR_GRAY}Restoring platformio.ini...{COLOR_RESET}")
shutil.copy("platformio.ini.bak", "platformio.ini")
print(f" 🡢 {COLOR_GRAY}Removing platformio.ini.bak...{COLOR_RESET}")
os.remove("platformio.ini.bak")
print(f" 🡢 {COLOR_GREEN}Success!{COLOR_RESET}")
def build() -> int:
print(f"🡢 {COLOR_CYAN}Build{COLOR_RESET}")
failed_builds: List[str] = []
code = 0
matrix = get_matrix()
with open("./platformio.ini", "a") as f1:
for device in matrix:
f1.write(device.get_platformio_section())
for device in matrix:
print(f" 🡢 {COLOR_CYAN}Building for {device.platform}{COLOR_RESET}")
status = build_for_device(device)
if status == False:
failed_builds.append(device.platformio_board)
if len(failed_builds) > 0:
print(f" 🡢 {COLOR_RED}Failed!{COLOR_RESET}")
for failed_build in failed_builds:
print(f" 🡢 {COLOR_RED}{failed_build}{COLOR_RESET}")
code = 1
else:
print(f" 🡢 {COLOR_GREEN}Success!{COLOR_RESET}")
return code
def build_for_device(device: DeviceConfiguration) -> bool:
success = True
print(f"::group::Build {device}")
with open("src/defines.h", "wt") as f:
f.write(device.build_header())
code = os.system(
f"platformio run -e {device.platformio_board}")
if code == 0:
shutil.copy(f".pio/build/{device.platformio_board}/firmware.bin",
f"build/{device.filename()}")
print(f" 🡢 {COLOR_GREEN}Success!{COLOR_RESET}")
else:
success = False
print(f" 🡢 {COLOR_RED}Failed!{COLOR_RESET}")
print(f"::endgroup::")
return success
def main() -> None:
prepare()
code = build()
cleanup()
os._exit(code)
if __name__ == "__main__":
main()
| 25.929245
| 102
| 0.617973
|
73448355aedee533025f0389d1b1b517535a68f3
| 232
|
py
|
Python
|
application.py
|
vanveele/docker-demo
|
eafd5dec9da5b103718749feaaecf4e6d3defba3
|
[
"MIT"
] | null | null | null |
application.py
|
vanveele/docker-demo
|
eafd5dec9da5b103718749feaaecf4e6d3defba3
|
[
"MIT"
] | null | null | null |
application.py
|
vanveele/docker-demo
|
eafd5dec9da5b103718749feaaecf4e6d3defba3
|
[
"MIT"
] | null | null | null |
from flask import Flask
import os
app = Flask(__name__)
@app.route("/")
def hello():
ver = os.getenv('VERSION', 'undef')
return "Hello World! Version: {}".format(ver)
if __name__ == "__main__":
app.run(host='0.0.0.0')
| 19.333333
| 49
| 0.633621
|
41ee78b6c443d34f7d49666d8b4e6e171ab614d4
| 412
|
py
|
Python
|
detagger.py
|
CrysthelAparicio/html-xml-detagger
|
5951d17062c2e301a37b127e5a1731d4acfe0c5a
|
[
"MIT"
] | null | null | null |
detagger.py
|
CrysthelAparicio/html-xml-detagger
|
5951d17062c2e301a37b127e5a1731d4acfe0c5a
|
[
"MIT"
] | null | null | null |
detagger.py
|
CrysthelAparicio/html-xml-detagger
|
5951d17062c2e301a37b127e5a1731d4acfe0c5a
|
[
"MIT"
] | null | null | null |
import sys
import re
r = open(sys.argv[1], 'r')
w = open(sys.argv[1] + '_detagged.txt', 'w')
regex = re.compile(r'<.*?>')
regex_tag_to_end_of_line = re.compile(r'<.*?\n')
regex_start_of_line_to_tag = re.compile(r'^.*?>')
for line in r:
line = regex.sub(' ', line)
line = regex_tag_to_end_of_line.sub('', line)
line = regex_start_of_line_to_tag.sub('',line)
w.write(line)
w.close()
r.close()
| 20.6
| 50
| 0.63835
|
4a27916aaed4e9d577d8960eb3a0d3ceb20db678
| 18,424
|
py
|
Python
|
sample_mods/distl/dist_col.py
|
kuanhanl/cappresse
|
31cd7d03414a930f7e2c21e1a3eb5e7dd25cc500
|
[
"MIT"
] | 2
|
2019-09-26T20:56:06.000Z
|
2019-11-18T21:03:27.000Z
|
sample_mods/distl/dist_col.py
|
kuanhanl/cappresse
|
31cd7d03414a930f7e2c21e1a3eb5e7dd25cc500
|
[
"MIT"
] | 6
|
2018-03-19T20:36:49.000Z
|
2018-04-13T15:27:29.000Z
|
sample_mods/distl/dist_col.py
|
kuanhanl/cappresse
|
31cd7d03414a930f7e2c21e1a3eb5e7dd25cc500
|
[
"MIT"
] | 5
|
2018-10-04T18:51:02.000Z
|
2020-07-02T15:31:35.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from pyomo.core.base import ConcreteModel, Set, Constraint, Var,\
Param, Objective, minimize, sqrt, exp, Suffix, Expression, value
from nmpc_mhe.aux.cpoinsc import collptsgen
from nmpc_mhe.aux.lagrange_f import lgr, lgry, lgrdot, lgrydot
from dist_col_mod import *
from six import itervalues, iterkeys, iteritems
from pyomo.opt import ProblemFormat, SolverFactory
import re, os
"""
Version 03.
Need a reference model that can initialize the reference steady-state model.
"""
__author__ = 'David M Thierry @dthierry'
class DistDiehlNegrete(ConcreteModel):
def __init__(self, nfe_t, ncp_t, **kwargs):
ConcreteModel.__init__(self)
steady = kwargs.pop('steady', False)
_t = kwargs.pop('_t', 1.0)
Ntray = kwargs.pop('Ntray', 42)
# --------------------------------------------------------------------------------------------------------------
# Orthogonal Collocation Parameters section
# Radau
self._alp_gauB_t = 1
self._bet_gauB_t = 0
if steady:
print("[I] " + str(self.__class__.__name__) + " NFE and NCP Overriden - Steady state mode")
self.nfe_t = 1
self.ncp_t = 1
else:
self.nfe_t = nfe_t
self.ncp_t = ncp_t
self.tau_t = collptsgen(self.ncp_t, self._alp_gauB_t, self._bet_gauB_t)
# start at zero
self.tau_i_t = {0: 0.}
# create a list
for ii in range(1, self.ncp_t + 1):
self.tau_i_t[ii] = self.tau_t[ii - 1]
# ======= SETS ======= #
# For finite element = 1 .. NFE
# This has to be > 0
self.fe_t = Set(initialize=[ii for ii in range(1, self.nfe_t + 1)])
# collocation points
# collocation points for differential variables
self.cp_t = Set(initialize=[ii for ii in range(0, self.ncp_t + 1)])
# collocation points for algebraic variables
self.cp_ta = Set(within=self.cp_t, initialize=[ii for ii in range(1, self.ncp_t + 1)])
# create collocation param
self.taucp_t = Param(self.cp_t, initialize=self.tau_i_t)
self.ldot_t = Param(self.cp_t, self.cp_t, initialize=
(lambda m, j, k: lgrdot(k, m.taucp_t[j], self.ncp_t, self._alp_gauB_t, self._bet_gauB_t))) #: watch out for this!
self.l1_t = Param(self.cp_t, initialize=
(lambda m, j: lgr(j, 1, self.ncp_t, self._alp_gauB_t, self._bet_gauB_t)))
# --------------------------------------------------------------------------------------------------------------
# Model parameters
self.Ntray = Ntray
self.tray = Set(initialize=[i for i in range(1, Ntray + 1)])
self.feed = Param(self.tray,
initialize=lambda m, t: 57.5294 if t == 21 else 0.0,
mutable=True)
self.xf = Param(initialize=0.32, mutable=True) # feed mole fraction
self.hf = Param(initialize=9081.3) # feed enthalpy
self.hlm0 = Param(initialize=2.6786e-04)
self.hlma = Param(initialize=-0.14779)
self.hlmb = Param(initialize=97.4289)
self.hlmc = Param(initialize=-2.1045e04)
self.hln0 = Param(initialize=4.0449e-04)
self.hlna = Param(initialize=-0.1435)
self.hlnb = Param(initialize=121.7981)
self.hlnc = Param(initialize=-3.0718e04)
self.r = Param(initialize=8.3147)
self.a = Param(initialize=6.09648)
self.b = Param(initialize=1.28862)
self.c1 = Param(initialize=1.016)
self.d = Param(initialize=15.6875)
self.l = Param(initialize=13.4721)
self.f = Param(initialize=2.615)
self.gm = Param(initialize=0.557)
self.Tkm = Param(initialize=512.6)
self.Pkm = Param(initialize=8.096e06)
self.gn = Param(initialize=0.612)
self.Tkn = Param(initialize=536.7)
self.Pkn = Param(initialize=5.166e06)
self.CapAm = Param(initialize=23.48)
self.CapBm = Param(initialize=3626.6)
self.CapCm = Param(initialize=-34.29)
self.CapAn = Param(initialize=22.437)
self.CapBn = Param(initialize=3166.64)
self.CapCn = Param(initialize=-80.15)
self.pstrip = Param(initialize=250)
self.prect = Param(initialize=190)
def _p_init(m, t):
ptray = 9.39e04
if t <= 20:
return _p_init(m, 21) + m.pstrip * (21 - t)
elif 20 < t < m.Ntray:
return ptray + m.prect * (m.Ntray - t)
elif t == m.Ntray:
return 9.39e04
self.p = Param(self.tray, initialize=_p_init)
self.T29_des = Param(initialize=343.15)
self.T15_des = Param(initialize=361.15)
self.Dset = Param(initialize=1.83728)
self.Qcset = Param(initialize=1.618890)
self.Qrset = Param(initialize=1.786050)
# self.Recset = Param()
self.alpha_T29 = Param(initialize=1)
self.alpha_T15 = Param(initialize=1)
self.alpha_D = Param(initialize=1)
self.alpha_Qc = Param(initialize=1)
self.alpha_Qr = Param(initialize=1)
self.alpha_Rec = Param(initialize=1)
def _alpha_init(m, i):
if i <= 21:
return 0.62
else:
return 0.35
self.alpha = Param(self.tray,
initialize=lambda m, t: 0.62 if t <= 21 else 0.35)
# --------------------------------------------------------------------------------------------------------------
#: First define differential state variables (state: x, ic-Param: x_ic, derivative-Var:dx_dt
#: States (differential) section
zero_tray = dict.fromkeys(self.tray)
zero3 = dict.fromkeys(self.fe_t * self.cp_t * self.tray)
for key in zero3.keys():
zero3[key] = 0.0
def __m_init(m, i, j, t):
if t < m.Ntray:
return 4000.
elif t == 1:
return 104340.
elif t == m.Ntray:
return 5000.
#: Liquid hold-up
self.M = Var(self.fe_t, self.cp_t, self.tray,
initialize=__m_init)
#: Mole-fraction
self.x = Var(self.fe_t, self.cp_t, self.tray, initialize=lambda m, i, j, t: 0.999 * t / m.Ntray)
#: Initial state-Param
self.M_ic = zero_tray if steady else Param(self.tray, initialize=0.0, mutable=True)
self.x_ic = zero_tray if steady else Param(self.tray, initialize=0.0, mutable=True)
#: Derivative-var
self.dM_dt = zero3 if steady else Var(self.fe_t, self.cp_t, self.tray, initialize=0.0)
self.dx_dt = zero3 if steady else Var(self.fe_t, self.cp_t, self.tray, initialize=0.0)
# --------------------------------------------------------------------------------------------------------------
# States (algebraic) section
# Tray temperature
self.T = Var(self.fe_t, self.cp_ta, self.tray,
initialize=lambda m, i, j, t: ((370.781 - 335.753) / m.Ntray) * t + 370.781)
self.Tdot = Var(self.fe_t, self.cp_ta, self.tray, initialize=1e-05) #: Not really a der_var
# saturation pressures
self.pm = Var(self.fe_t, self.cp_ta, self.tray, initialize=1e4)
self.pn = Var(self.fe_t, self.cp_ta, self.tray, initialize=1e4)
# Vapor mole flowrate
self.V = Var(self.fe_t, self.cp_ta, self.tray, initialize=44.0)
def _l_init(m, i, j, t):
if 2 <= t <= 21:
return 83.
elif 22 <= t <= 42:
return 23
elif t == 1:
return 40
# Liquid mole flowrate
self.L = Var(self.fe_t, self.cp_ta, self.tray, initialize=_l_init)
# Vapor mole frac & diff var
self.y = Var(self.fe_t, self.cp_ta, self.tray,
initialize=lambda m, i, j, t: ((0.99 - 0.005) / m.Ntray) * t + 0.005)
# Liquid enthalpy # enthalpy
self.hl = Var(self.fe_t, self.cp_ta, self.tray, initialize=10000.)
# Liquid enthalpy # enthalpy
self.hv = Var(self.fe_t, self.cp_ta, self.tray, initialize=5e+04)
# Re-boiler & condenser heat
self.Qc = Var(self.fe_t, self.cp_ta, initialize=1.6e06)
self.D = Var(self.fe_t, self.cp_ta, initialize=18.33)
# vol holdups
self.Vm = Var(self.fe_t, self.cp_ta, self.tray, initialize=6e-05)
self.Mv = Var(self.fe_t, self.cp_ta, self.tray,
initialize=lambda m, i, j, t: 0.23 if 1 < t < m.Ntray else 0.0)
self.Mv1 = Var(self.fe_t, self.cp_ta, initialize=8.57)
self.Mvn = Var(self.fe_t, self.cp_ta, initialize=0.203)
hi_t = dict.fromkeys(self.fe_t)
for key in hi_t.keys():
hi_t[key] = 1.0 if steady else _t/self.nfe_t
self.hi_t = hi_t if steady else Param(self.fe_t, initialize=hi_t)
# --------------------------------------------------------------------------------------------------------------
#: Controls
self.u1 = Param(self.fe_t, initialize=7.72700925775773761472464684629813E-01, mutable=True) #: Dummy
self.u2 = Param(self.fe_t, initialize=1.78604740940007800236344337463379E+06, mutable=True) #: Dummy
self.Rec = Var(self.fe_t, initialize=7.72700925775773761472464684629813E-01)
self.Qr = Var(self.fe_t, initialize=1.78604740940007800236344337463379E+06)
# --------------------------------------------------------------------------------------------------------------
#: Constraints for the differential states
#: Then the ode-Con:de_x, collocation-Con:dvar_t_x, noisy-Expr: noisy_x, cp-Constraint: cp_x, initial-Con: x_icc
#: Differential equations
self.de_M = Constraint(self.fe_t, self.cp_ta, self.tray, rule=m_ode)
self.de_x = Constraint(self.fe_t, self.cp_ta, self.tray, rule=x_ode)
#: Collocation equations
self.dvar_t_M = None if steady else Constraint(self.fe_t, self.cp_ta, self.tray, rule=M_COLL)
self.dvar_t_x = None if steady else Constraint(self.fe_t, self.cp_ta, self.tray, rule=x_coll)
#: Continuation equations (redundancy here)
if self.nfe_t > 1:
#: Noisy expressions
self.noisy_M = None if steady else Expression(self.fe_t, self.tray, rule=M_CONT)
self.noisy_x = None if steady else Expression(self.fe_t, self.tray, rule=x_cont)
#: Continuation equations
self.cp_M = None if steady else \
Constraint(self.fe_t, self.tray,
rule=lambda m, i, t: self.noisy_M[i, t] == 0.0 if i < self.nfe_t else Constraint.Skip)
self.cp_x = None if steady else \
Constraint(self.fe_t, self.tray,
rule=lambda m, i, t: self.noisy_x[i, t] == 0.0 if i < self.nfe_t else Constraint.Skip)
#: Initial condition-Constraints
self.M_icc = None if steady else Constraint(self.tray, rule=acm)
self.x_icc = None if steady else Constraint(self.tray, rule=acx)
# --------------------------------------------------------------------------------------------------------------
#: Constraint section (algebraic equations)
self.hrc = Constraint(self.fe_t, self.cp_ta, rule=hrc)
self.gh = Constraint(self.fe_t, self.cp_ta, self.tray, rule=gh)
self.ghb = Constraint(self.fe_t, self.cp_ta, rule=ghb)
self.ghc = Constraint(self.fe_t, self.cp_ta, rule=ghc)
self.hkl = Constraint(self.fe_t, self.cp_ta, self.tray, rule=hkl)
self.hkv = Constraint(self.fe_t, self.cp_ta, self.tray, rule=hkv)
self.lpself = Constraint(self.fe_t, self.cp_ta, self.tray, rule=lpm)
self.lpn = Constraint(self.fe_t, self.cp_ta, self.tray, rule=lpn)
self.dp = Constraint(self.fe_t, self.cp_ta, self.tray, rule=dp)
self.lTdot = Constraint(self.fe_t, self.cp_ta, self.tray, rule=lTdot)
self.gy0 = Constraint(self.fe_t, self.cp_ta, rule=gy0)
self.gy = Constraint(self.fe_t, self.cp_ta, self.tray, rule=gy)
self.dMV = Constraint(self.fe_t, self.cp_ta, self.tray, rule=dMV)
self.dMv1 = Constraint(self.fe_t, self.cp_ta, rule=dMv1)
self.dMvn = Constraint(self.fe_t, self.cp_ta, rule=dMvn)
self.hyd = Constraint(self.fe_t, self.cp_ta, self.tray, rule=hyd)
self.hyd1 = Constraint(self.fe_t, self.cp_ta, rule=hyd1)
self.hydN = Constraint(self.fe_t, self.cp_ta, rule=hydN)
self.dvself = Constraint(self.fe_t, self.cp_ta, self.tray, rule=dvm)
# --------------------------------------------------------------------------------------------------------------
#: Control constraint
self.u1_e = Expression(self.fe_t, rule=lambda m, i: self.Rec[i])
self.u2_e = Expression(self.fe_t, rule=lambda m, i: self.Qr[i])
self.u1_c = Constraint(self.fe_t, rule=lambda m, i: self.u1[i] == self.u1_e[i])
self.u2_c = Constraint(self.fe_t, rule=lambda m, i: self.u2[i] == self.u2_e[i])
# --------------------------------------------------------------------------------------------------------------
#: Suffixes
self.dual = Suffix(direction=Suffix.IMPORT_EXPORT)
self.ipopt_zL_out = Suffix(direction=Suffix.IMPORT)
self.ipopt_zU_out = Suffix(direction=Suffix.IMPORT)
self.ipopt_zL_in = Suffix(direction=Suffix.EXPORT)
self.ipopt_zU_in = Suffix(direction=Suffix.EXPORT)
def write_nl(self):
"""Writes the nl file and the respective row & col"""
name = str(self.__class__.__name__) + ".nl"
self.write(filename=name,
format=ProblemFormat.nl,
io_options={"symbolic_solver_labels": True})
def create_bounds(self):
"""Creates bounds for the variables"""
for value in itervalues(self.M):
value.setlb(1.0)
for value in itervalues(self.T):
value.setlb(200)
for value in itervalues(self.pm):
value.setlb(1.0)
for value in itervalues(self.pn):
value.setlb(1.0)
for value in itervalues(self.L):
value.setlb(0.0)
for value in itervalues(self.V):
value.setlb(0.0)
for value in itervalues(self.x):
value.setlb(0.0)
for value in itervalues(self.y):
value.setlb(0.0)
for value in itervalues(self.hl):
value.setlb(1.0)
for value in itervalues(self.hv):
value.setlb(1.0)
for value in itervalues(self.Qc):
value.setlb(0.0)
for value in itervalues(self.D):
value.setlb(0.0)
for value in itervalues(self.Vm):
value.setlb(0.0)
for value in itervalues(self.Mv):
value.setlb(0.155 + 1e-06)
for value in itervalues(self.Mv1):
value.setlb(8.5 + 1e-06)
for value in itervalues(self.Mvn):
value.setlb(0.17 + 1e-06)
for value in itervalues(self.M):
value.setub(1e+07)
for value in itervalues(self.T):
value.setub(500)
for value in itervalues(self.pm):
value.setub(5e+07)
for value in itervalues(self.pn):
value.setub(5e+07)
for value in itervalues(self.L):
value.setub(1e+03)
for value in itervalues(self.V):
value.setub(1e+03)
for value in itervalues(self.x):
value.setub(1.0)
for value in itervalues(self.y):
value.setub(1.0)
for value in itervalues(self.hl):
value.setub(1e+07)
for value in itervalues(self.hv):
value.setub(1e+07)
for value in itervalues(self.Qc):
value.setub(1e+08)
for value in itervalues(self.D):
value.setub(1e+04)
for value in itervalues(self.Vm):
value.setub(1e+04)
for value in itervalues(self.Mv):
value.setub(1e+04)
for value in itervalues(self.Mv1):
value.setub(1e+04)
for value in itervalues(self.Mvn):
value.setub(1e+04)
@staticmethod
def parse_ig_ampl(file_i):
lines = file_i.readlines()
dict = {}
for line in lines:
kk = re.split('(?:let)|[:=\s\[\]]', line)
try:
var = kk[2]
# print(var)
key = kk[3]
key = re.split(',', key)
actual_key = []
for k in key:
actual_key.append(int(k))
actual_key.append(actual_key.pop(0))
actual_key = tuple(actual_key)
value = kk[8]
value = float(value)
dict[var, actual_key] = value
except IndexError:
continue
file_i.close()
return dict
def init_steady_ref(self):
"""If the model is steady, we try to initialize it with an initial guess from ampl"""
cur_dir = os.path.dirname(__file__)
ampl_ig = os.path.join(cur_dir, "iv_ss.txt")
file_tst = open(ampl_ig, "r")
if self.nfe_t == 1 and self.ncp_t == 1:
somedict = self.parse_ig_ampl(file_tst)
for var in self.component_objects(Var, active=True):
vx = getattr(self, str(var))
for v, k in var.iteritems():
try:
vx[v] = somedict[str(var), v]
except KeyError:
continue
solver = SolverFactory('ipopt')
someresults = solver.solve(self, tee=True)
def equalize_u(self, direction="u_to_r"):
"""set current controls to the values of their respective dummies"""
if direction == "u_to_r":
for i in iterkeys(self.Rec):
self.Rec[i].set_value(value(self.u1[i]))
for i in iterkeys(self.Rec):
self.Qr[i].set_value(value(self.u2[i]))
elif direction == "r_to_u":
for i in iterkeys(self.u1):
self.u1[i].value = value(self.Rec[i])
for i in iterkeys(self.u2):
self.u2[i].value = value(self.Qr[i])
| 40.942222
| 122
| 0.547981
|
7749968593dca61f8932979a24a68864c24e4e6e
| 18,297
|
py
|
Python
|
chainer/links/model/vision/googlenet.py
|
takeratta/chainer
|
02686e98cd6dc8f20979a1f3a79130f076cbfc6c
|
[
"MIT"
] | 2
|
2018-02-05T07:25:48.000Z
|
2018-08-28T20:29:45.000Z
|
chainer/links/model/vision/googlenet.py
|
takeratta/chainer
|
02686e98cd6dc8f20979a1f3a79130f076cbfc6c
|
[
"MIT"
] | null | null | null |
chainer/links/model/vision/googlenet.py
|
takeratta/chainer
|
02686e98cd6dc8f20979a1f3a79130f076cbfc6c
|
[
"MIT"
] | 1
|
2018-08-23T01:34:57.000Z
|
2018-08-23T01:34:57.000Z
|
from __future__ import print_function
import collections
import os
import numpy
try:
from PIL import Image
available = True
except ImportError as e:
available = False
_import_error = e
import chainer
from chainer.dataset.convert import concat_examples
from chainer.dataset import download
from chainer import function
from chainer.functions.activation.relu import relu
from chainer.functions.activation.softmax import softmax
from chainer.functions.array.reshape import reshape
from chainer.functions.math.average import average
from chainer.functions.noise.dropout import dropout
from chainer.functions.normalization.local_response_normalization import (
local_response_normalization)
from chainer.functions.pooling.average_pooling_2d import average_pooling_2d
from chainer.functions.pooling.max_pooling_2d import max_pooling_2d
from chainer.initializers import constant
from chainer.initializers import uniform
from chainer import link
from chainer.links.connection.convolution_2d import Convolution2D
from chainer.links.connection.inception import Inception
from chainer.links.connection.linear import Linear
from chainer.serializers import npz
from chainer.utils import argument
from chainer.utils import imgproc
from chainer.variable import Variable
class GoogLeNet(link.Chain):
"""A pre-trained GoogLeNet model provided by BVLC.
When you specify the path of the pre-trained chainer model serialized as
a ``.npz`` file in the constructor, this chain model automatically
initializes all the parameters with it.
This model would be useful when you want to extract a semantic feature
vector per image, or fine-tune the model on a different dataset.
If you want to manually convert the pre-trained caffemodel to a chainer
model that can be specified in the constructor,
please use ``convert_caffemodel_to_npz`` classmethod instead.
GoogLeNet, which is also called Inception-v1, is an architecture of
convolutional neural network proposed in 2014. This model is relatively
lightweight and requires small memory footprint during training compared
with modern architectures such as ResNet. Therefore, if you fine-tune your
network based on a model pre-trained by Imagenet and need to train it with
large batch size, GoogLeNet may be useful. On the other hand, if you just
want an off-the-shelf classifier, we recommend you to use ResNet50 or other
models since they are more accurate than GoogLeNet.
The original model is provided here:
`<https://github.com/BVLC/caffe/tree/master/models/bvlc_googlenet>`_
Args:
pretrained_model (str): the destination of the pre-trained
chainer model serialized as a ``.npz`` file.
If this argument is specified as ``auto``,
it automatically downloads the caffemodel from the internet.
Note that in this case the converted chainer model is stored
on ``$CHAINER_DATASET_ROOT/pfnet/chainer/models`` directory,
where ``$CHAINER_DATASET_ROOT`` is set as
``$HOME/.chainer/dataset`` unless you specify another value
as a environment variable. The converted chainer model is
automatically used from the second time.
If the argument is specified as ``None``, all the parameters
are not initialized by the pre-trained model, but the default
initializer used in BVLC, i.e.,
``chainer.initializers.LeCunUniform(scale=1.0)``.
Note that, in Caffe, when weight_filler is specified as
"xavier" type without variance_norm parameter, the weights are
initialized by Uniform(-s, s), where
:math:`s = \\sqrt{\\frac{3}{fan_{in}}}` and :math:`fan_{in}` is the
number of input units. This corresponds to LeCunUniform in Chainer
but not GlorotUniform.
Attributes:
~GoogLeNet.available_layers (list of str): The list of available layer
names used by ``__call__`` and ``extract`` methods.
"""
def __init__(self, pretrained_model='auto'):
if pretrained_model:
# As a sampling process is time-consuming,
# we employ a zero initializer for faster computation.
kwargs = {'initialW': constant.Zero()}
else:
# employ default initializers used in BVLC. For more detail, see
# https://github.com/chainer/chainer/pull/2424#discussion_r109642209
kwargs = {'initialW': uniform.LeCunUniform(scale=1.0)}
super(GoogLeNet, self).__init__(
conv1=Convolution2D(3, 64, 7, stride=2, pad=3, **kwargs),
conv2_reduce=Convolution2D(64, 64, 1, **kwargs),
conv2=Convolution2D(64, 192, 3, stride=1, pad=1, **kwargs),
inc3a=Inception(192, 64, 96, 128, 16, 32, 32),
inc3b=Inception(256, 128, 128, 192, 32, 96, 64),
inc4a=Inception(480, 192, 96, 208, 16, 48, 64),
inc4b=Inception(512, 160, 112, 224, 24, 64, 64),
inc4c=Inception(512, 128, 128, 256, 24, 64, 64),
inc4d=Inception(512, 112, 144, 288, 32, 64, 64),
inc4e=Inception(528, 256, 160, 320, 32, 128, 128),
inc5a=Inception(832, 256, 160, 320, 32, 128, 128),
inc5b=Inception(832, 384, 192, 384, 48, 128, 128),
loss3_fc=Linear(1024, 1000, **kwargs),
loss1_conv=Convolution2D(512, 128, 1, **kwargs),
loss1_fc1=Linear(2048, 1024, **kwargs),
loss1_fc2=Linear(1024, 1000, **kwargs),
loss2_conv=Convolution2D(528, 128, 1, **kwargs),
loss2_fc1=Linear(2048, 1024, **kwargs),
loss2_fc2=Linear(1024, 1000, **kwargs)
)
if pretrained_model == 'auto':
_retrieve(
'bvlc_googlenet.npz',
'http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel',
self)
elif pretrained_model:
npz.load_npz(pretrained_model, self)
@property
def functions(self):
return collections.OrderedDict([
('conv1', [self.conv1, relu]),
('pool1', [_max_pooling_2d, _local_response_normalization]),
('conv2_reduce', [self.conv2_reduce, relu]),
('conv2', [self.conv2, relu, _local_response_normalization]),
('pool2', [_max_pooling_2d]),
('inception_3a', [self.inc3a]),
('inception_3b', [self.inc3b]),
('pool3', [_max_pooling_2d]),
('inception_4a', [self.inc4a]),
('inception_4b', [self.inc4b]),
('inception_4c', [self.inc4c]),
('inception_4d', [self.inc4d]),
('inception_4e', [self.inc4e]),
('pool4', [_max_pooling_2d]),
('inception_5a', [self.inc5a]),
('inception_5b', [self.inc5b]),
('pool5', [_average_pooling_2d_k7]),
('loss3_fc', [_dropout, self.loss3_fc]),
('prob', [softmax]),
# Since usually the following outputs are not used, they are put
# after 'prob' to be skipped for efficiency.
('loss1_fc2', [_average_pooling_2d_k5, self.loss1_conv, relu,
self.loss1_fc1, relu, self.loss1_fc2]),
('loss2_fc2', [_average_pooling_2d_k5, self.loss2_conv, relu,
self.loss2_fc1, relu, self.loss2_fc2])
])
@property
def available_layers(self):
return list(self.functions.keys())
@classmethod
def convert_caffemodel_to_npz(cls, path_caffemodel, path_npz):
"""Converts a pre-trained caffemodel to a chainer model.
Args:
path_caffemodel (str): Path of the pre-trained caffemodel.
path_npz (str): Path of the converted chainer model.
"""
# As CaffeFunction uses shortcut symbols,
# we import CaffeFunction here.
from chainer.links.caffe.caffe_function import CaffeFunction
caffemodel = CaffeFunction(path_caffemodel)
chainermodel = cls(pretrained_model=None)
_transfer_googlenet(caffemodel, chainermodel)
npz.save_npz(path_npz, chainermodel, compression=False)
def __call__(self, x, layers=['prob'], **kwargs):
"""__call__(self, x, layers=['prob'])
Computes all the feature maps specified by ``layers``.
.. warning::
``train`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('train', train)``.
See :func:`chainer.using_config`.
Args:
x (~chainer.Variable): Input variable. It should be prepared by
``prepare`` function.
layers (list of str): The list of layer names you want to extract.
Returns:
Dictionary of ~chainer.Variable: A directory in which
the key contains the layer name and the value contains
the corresponding feature map variable.
"""
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
h = x
activations = {}
inception_4a_cache = None
inception_4d_cache = None
target_layers = set(layers)
for key, funcs in self.functions.items():
if len(target_layers) == 0:
break
if key == 'loss1_fc2':
h = inception_4a_cache
elif key == 'loss2_fc2':
h = inception_4d_cache
for func in funcs:
h = func(h)
if key in target_layers:
activations[key] = h
target_layers.remove(key)
if key == 'inception_4a':
inception_4a_cache = h
elif key == 'inception_4d':
inception_4d_cache = h
return activations
def extract(self, images, layers=['pool5'], size=(224, 224), **kwargs):
"""extract(self, images, layers=['pool5'], size=(224, 224))
Extracts all the feature maps of given images.
The difference of directly executing ``__call__`` is that
it directly accepts images as an input and automatically
transforms them to a proper variable. That is,
it is also interpreted as a shortcut method that implicitly calls
``prepare`` and ``__call__`` functions.
.. warning::
``train`` and ``volatile`` arguments are not supported anymore since
v2.
Instead, use ``chainer.using_config('train', train)`` and
``chainer.using_config('enable_backprop', not volatile)``
respectively.
See :func:`chainer.using_config`.
Args:
images (iterable of PIL.Image or numpy.ndarray): Input images.
layers (list of str): The list of layer names you want to extract.
size (pair of ints): The resolution of resized images used as
an input of CNN. All the given images are not resized
if this argument is ``None``, but the resolutions of
all the images should be the same.
Returns:
Dictionary of ~chainer.Variable: A directory in which
the key contains the layer name and the value contains
the corresponding feature map variable.
"""
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config',
volatile='volatile argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
x = concat_examples([prepare(img, size=size) for img in images])
x = Variable(self.xp.asarray(x))
return self(x, layers=layers)
def predict(self, images, oversample=True):
"""Computes all the probabilities of given images.
Args:
images (iterable of PIL.Image or numpy.ndarray): Input images.
oversample (bool): If ``True``, it averages results across
center, corners, and mirrors. Otherwise, it uses only the
center.
Returns:
~chainer.Variable: Output that contains the class probabilities
of given images.
"""
x = concat_examples([prepare(img, size=(256, 256)) for img in images])
if oversample:
x = imgproc.oversample(x, crop_dims=(224, 224))
else:
x = x[:, :, 16:240, 16:240]
# Use no_backprop_mode to reduce memory consumption
with function.no_backprop_mode(), chainer.using_config('train', False):
x = Variable(self.xp.asarray(x))
y = self(x, layers=['prob'])['prob']
if oversample:
n = y.data.shape[0] // 10
y_shape = y.data.shape[1:]
y = reshape(y, (n, 10) + y_shape)
y = average(y, axis=1)
return y
def prepare(image, size=(224, 224)):
"""Converts the given image to the numpy array for GoogLeNet.
Note that you have to call this method before ``__call__``
because the pre-trained GoogLeNet model requires to resize the given
image, covert the RGB to the BGR, subtract the mean,
and permute the dimensions before calling.
Args:
image (PIL.Image or numpy.ndarray): Input image.
If an input is ``numpy.ndarray``, its shape must be
``(height, width)``, ``(height, width, channels)``,
or ``(channels, height, width)``, and
the order of the channels must be RGB.
size (pair of ints): Size of converted images.
If ``None``, the given image is not resized.
Returns:
numpy.ndarray: The converted output array.
"""
if not available:
raise ImportError('PIL cannot be loaded. Install Pillow!\n'
'The actual import error is as follows:\n' +
str(_import_error))
if isinstance(image, numpy.ndarray):
if image.ndim == 3:
if image.shape[0] == 1:
image = image[0, :, :]
elif image.shape[0] == 3:
image = image.transpose((1, 2, 0))
image = Image.fromarray(image.astype(numpy.uint8))
image = image.convert('RGB')
if size:
image = image.resize(size)
image = numpy.asarray(image, dtype=numpy.float32)
image = image[:, :, ::-1]
image -= numpy.array([104.0, 117.0, 123.0], dtype=numpy.float32) # BGR
image = image.transpose((2, 0, 1))
return image
def _transfer_inception(src, dst, names):
for name in names:
chain = getattr(dst, 'inc{}'.format(name))
src_prefix = 'inception_{}/'.format(name)
chain.conv1.W.data[:] = src[src_prefix + '1x1'].W.data
chain.conv1.b.data[:] = src[src_prefix + '1x1'].b.data
chain.proj3.W.data[:] = src[src_prefix + '3x3_reduce'].W.data
chain.proj3.b.data[:] = src[src_prefix + '3x3_reduce'].b.data
chain.conv3.W.data[:] = src[src_prefix + '3x3'].W.data
chain.conv3.b.data[:] = src[src_prefix + '3x3'].b.data
chain.proj5.W.data[:] = src[src_prefix + '5x5_reduce'].W.data
chain.proj5.b.data[:] = src[src_prefix + '5x5_reduce'].b.data
chain.conv5.W.data[:] = src[src_prefix + '5x5'].W.data
chain.conv5.b.data[:] = src[src_prefix + '5x5'].b.data
chain.projp.W.data[:] = src[src_prefix + 'pool_proj'].W.data
chain.projp.b.data[:] = src[src_prefix + 'pool_proj'].b.data
def _transfer_googlenet(src, dst):
# 1 #################################################################
dst.conv1.W.data[:] = src['conv1/7x7_s2'].W.data
dst.conv1.b.data[:] = src['conv1/7x7_s2'].b.data
# 2 #################################################################
dst.conv2_reduce.W.data[:] = src['conv2/3x3_reduce'].W.data
dst.conv2_reduce.b.data[:] = src['conv2/3x3_reduce'].b.data
dst.conv2.W.data[:] = src['conv2/3x3'].W.data
dst.conv2.b.data[:] = src['conv2/3x3'].b.data
# 3, 4, 5 ###########################################################
_transfer_inception(src, dst, ['3a', '3b',
'4a', '4b', '4c', '4d', '4e',
'5a', '5b'])
# outputs ############################################################
dst.loss1_conv.W.data[:] = src['loss1/conv'].W.data
dst.loss1_conv.b.data[:] = src['loss1/conv'].b.data
dst.loss1_fc1.W.data[:] = src['loss1/fc'].W.data
dst.loss1_fc1.b.data[:] = src['loss1/fc'].b.data
dst.loss1_fc2.W.data[:] = src['loss1/classifier'].W.data
dst.loss1_fc2.b.data[:] = src['loss1/classifier'].b.data
dst.loss2_conv.W.data[:] = src['loss2/conv'].W.data
dst.loss2_conv.b.data[:] = src['loss2/conv'].b.data
dst.loss2_fc1.W.data[:] = src['loss2/fc'].W.data
dst.loss2_fc1.b.data[:] = src['loss2/fc'].b.data
dst.loss2_fc2.W.data[:] = src['loss2/classifier'].W.data
dst.loss2_fc2.b.data[:] = src['loss2/classifier'].b.data
dst.loss3_fc.W.data[:] = src['loss3/classifier'].W.data
dst.loss3_fc.b.data[:] = src['loss3/classifier'].b.data
def _max_pooling_2d(x):
return max_pooling_2d(x, ksize=3, stride=2)
def _local_response_normalization(x):
return local_response_normalization(x, n=5, k=1, alpha=1e-4 / 5)
def _average_pooling_2d_k5(x):
return average_pooling_2d(x, ksize=5, stride=3)
def _average_pooling_2d_k7(x):
return average_pooling_2d(x, ksize=7, stride=1)
def _dropout(x):
return dropout(x, ratio=0.4)
def _make_npz(path_npz, url, model):
path_caffemodel = download.cached_download(url)
print('Now loading caffemodel (usually it may take few minutes)')
GoogLeNet.convert_caffemodel_to_npz(path_caffemodel, path_npz)
npz.load_npz(path_npz, model)
return model
def _retrieve(name_npz, url, model):
root = download.get_dataset_directory('pfnet/chainer/models/')
path = os.path.join(root, name_npz)
return download.cache_or_load_file(
path, lambda path: _make_npz(path, url, model),
lambda path: npz.load_npz(path, model))
| 41.024664
| 80
| 0.615183
|
e01d7f2fb3d7dea2928271a9ce27690cf02144dd
| 10,156
|
py
|
Python
|
AdminServer/appscale/admin/push_worker_manager.py
|
Honcharov12/appscale
|
be1cf90fcd24f1a5a88848f7eb73331b6e4e66d9
|
[
"Apache-2.0"
] | null | null | null |
AdminServer/appscale/admin/push_worker_manager.py
|
Honcharov12/appscale
|
be1cf90fcd24f1a5a88848f7eb73331b6e4e66d9
|
[
"Apache-2.0"
] | null | null | null |
AdminServer/appscale/admin/push_worker_manager.py
|
Honcharov12/appscale
|
be1cf90fcd24f1a5a88848f7eb73331b6e4e66d9
|
[
"Apache-2.0"
] | null | null | null |
""" Keeps track of queue configuration details for push workers. """
import logging
import json
import os
from datetime import timedelta
from kazoo.exceptions import ZookeeperError
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.options import options
from appscale.common.async_retrying import (
retry_children_watch_coroutine, retry_coroutine, retry_data_watch_coroutine
)
from appscale.common.constants import (CONFIG_DIR, LOG_DIR, MonitStates,
VAR_DIR)
from appscale.common.monit_app_configuration import create_config_file
from appscale.common.monit_app_configuration import MONIT_CONFIG_DIR
from .utils import ensure_path
# The number of tasks the Celery worker can handle at a time.
CELERY_CONCURRENCY = 1000
# The directory where Celery configuration files are stored.
CELERY_CONFIG_DIR = os.path.join(CONFIG_DIR, 'celery', 'configuration')
# The safe memory in MB per Celery worker.
CELERY_SAFE_MEMORY = 1000
# The directory where Celery persists state.
CELERY_STATE_DIR = os.path.join('/', 'opt', 'appscale', 'celery')
# The working directory for Celery workers.
CELERY_WORKER_DIR = os.path.join(CONFIG_DIR, 'celery', 'workers')
# The directory that workers use for logging.
CELERY_WORKER_LOG_DIR = os.path.join(LOG_DIR, 'celery_workers')
# The time limit of a running task in seconds. Extra time over the soft limit
# allows it to catch up to interrupts.
HARD_TIME_LIMIT = 610
# The soft time limit of a running task.
TASK_SOFT_TIME_LIMIT = 600
# The worker script for Celery to use.
WORKER_MODULE = 'appscale.taskqueue.push_worker'
logger = logging.getLogger('appscale-admin')
class ProjectPushWorkerManager(object):
""" Manages the Celery worker for a single project. """
def __init__(self, zk_client, monit_operator, project_id):
""" Creates a new ProjectPushWorkerManager.
Args:
zk_client: A KazooClient.
monit_operator: A MonitOperator.
project_id: A string specifying a project ID.
"""
self.zk_client = zk_client
self.project_id = project_id
self.monit_operator = monit_operator
self.queues_node = '/appscale/projects/{}/queues'.format(project_id)
self.watch = zk_client.DataWatch(self.queues_node, self._update_worker)
self.monit_watch = 'celery-{}'.format(project_id)
self._stopped = False
@gen.coroutine
def update_worker(self, queue_config):
""" Updates a worker's configuration and restarts it.
Args:
queue_config: A JSON string specifying queue configuration.
"""
self._write_worker_configuration(queue_config)
status = yield self._wait_for_stable_state()
pid_location = os.path.join(VAR_DIR, 'celery-{}.pid'.format(self.project_id))
try:
with open(pid_location) as pidfile:
old_pid = int(pidfile.read().strip())
except IOError:
old_pid = None
# Start the worker if it doesn't exist. Restart it if it does.
if status == MonitStates.MISSING:
command = self.celery_command()
env_vars = {'APP_ID': self.project_id, 'HOST': options.load_balancers[0],
'C_FORCE_ROOT': True}
create_config_file(self.monit_watch, command, pid_location,
env_vars=env_vars, max_memory=CELERY_SAFE_MEMORY)
logger.info('Starting push worker for {}'.format(self.project_id))
yield self.monit_operator.reload()
else:
logger.info('Restarting push worker for {}'.format(self.project_id))
yield self.monit_operator.send_command(self.monit_watch, 'restart')
start_future = self.monit_operator.ensure_running(self.monit_watch)
yield gen.with_timeout(timedelta(seconds=60), start_future,
IOLoop.current())
try:
yield self.ensure_pid_changed(old_pid, pid_location)
except AssertionError:
# Occasionally, Monit will get interrupted during a restart. Retry the
# restart if the Celery worker PID is the same.
logger.warning(
'{} worker PID did not change. Restarting it.'.format(self.project_id))
yield self.update_worker(queue_config)
@staticmethod
@retry_coroutine(retrying_timeout=10, retry_on_exception=[AssertionError])
def ensure_pid_changed(old_pid, pid_location):
try:
with open(pid_location) as pidfile:
new_pid = int(pidfile.read().strip())
except IOError:
new_pid = None
if new_pid == old_pid:
raise AssertionError
@gen.coroutine
def stop_worker(self):
""" Removes the monit configuration for the project's push worker. """
status = yield self._wait_for_stable_state()
if status == MonitStates.RUNNING:
logger.info('Stopping push worker for {}.'.format(self.project_id))
yield self.monit_operator.send_command(self.monit_watch, 'stop')
watch_file = '{}/appscale-{}.cfg'.format(MONIT_CONFIG_DIR, self.monit_watch)
os.remove(watch_file)
else:
logger.debug('Not stopping push worker for {} since it is not running.'.format(self.project_id))
def celery_command(self):
""" Generates the Celery command for a project's push worker. """
log_file = os.path.join(CELERY_WORKER_LOG_DIR,
'{}.log'.format(self.project_id))
pidfile = os.path.join(VAR_DIR, 'celery-{}.pid'.format(self.project_id))
state_db = os.path.join(CELERY_STATE_DIR,
'worker___{}.db'.format(self.project_id))
return ' '.join([
'celery', 'worker',
'--app', WORKER_MODULE,
'--pool=eventlet',
'--concurrency={}'.format(CELERY_CONCURRENCY),
'--hostname', self.project_id,
'--workdir', CELERY_WORKER_DIR,
'--logfile', log_file,
'--pidfile', pidfile,
'--time-limit', str(HARD_TIME_LIMIT),
'--soft-time-limit', str(TASK_SOFT_TIME_LIMIT),
'--statedb', state_db,
'-Ofair'
])
def ensure_watch(self):
""" Restart the watch if it has been cancelled. """
if self._stopped:
self._stopped = False
self.watch = self.zk_client.DataWatch(self.queues_node,
self._update_worker)
@gen.coroutine
def _wait_for_stable_state(self):
""" Waits until the worker's state is not pending. """
stable_states = (MonitStates.MISSING, MonitStates.RUNNING,
MonitStates.UNMONITORED)
status_future = self.monit_operator.wait_for_status(
self.monit_watch, stable_states)
status = yield gen.with_timeout(timedelta(seconds=60), status_future,
IOLoop.current())
raise gen.Return(status)
def _write_worker_configuration(self, queue_config):
""" Writes a worker's configuration file.
Args:
queue_config: A JSON string specifying queue configuration.
"""
if queue_config is None:
rates = {'default': '5/s'}
else:
queues = json.loads(queue_config)['queue']
rates = {
queue_name: queue['rate'] for queue_name, queue in queues.items()
if 'mode' not in queue or queue['mode'] == 'push'}
config_location = os.path.join(CELERY_CONFIG_DIR,
'{}.json'.format(self.project_id))
with open(config_location, 'w') as config_file:
json.dump(rates, config_file)
def _update_worker(self, queue_config, _):
""" Handles updates to a queue configuration node.
Since this runs in a separate thread, it doesn't change any state directly.
Instead, it just acts as a bridge back to the main IO loop.
Args:
queue_config: A JSON string specifying queue configuration.
"""
# Prevent further watches if they are no longer needed.
if queue_config is None:
try:
project_exists = self.zk_client.exists(
'/appscale/projects/{}'.format(self.project_id)) is not None
except ZookeeperError:
# If the project has been deleted, an extra "exists" watch will remain.
project_exists = True
if not project_exists:
self._stopped = True
return False
persistent_update_worker = retry_data_watch_coroutine(
self.queues_node, self.update_worker
)
main_io_loop = IOLoop.instance()
main_io_loop.add_callback(persistent_update_worker, queue_config)
class GlobalPushWorkerManager(object):
""" Manages the Celery workers for all projects. """
def __init__(self, zk_client, monit_operator):
""" Creates a new GlobalPushWorkerManager. """
self.zk_client = zk_client
self.monit_operator = monit_operator
self.projects = {}
ensure_path(CELERY_CONFIG_DIR)
ensure_path(CELERY_WORKER_DIR)
ensure_path(CELERY_WORKER_LOG_DIR)
ensure_path(CELERY_STATE_DIR)
zk_client.ensure_path('/appscale/projects')
zk_client.ChildrenWatch('/appscale/projects', self._update_projects)
@gen.coroutine
def update_projects(self, new_project_list):
""" Establishes watches for each project's queue configuration.
Args:
new_project_list: A fresh list of strings specifying existing
project IDs.
"""
to_stop = [project for project in self.projects
if project not in new_project_list]
for project_id in to_stop:
yield self.projects[project_id].stop_worker()
del self.projects[project_id]
for new_project_id in new_project_list:
if new_project_id not in self.projects:
self.projects[new_project_id] = ProjectPushWorkerManager(
self.zk_client, self.monit_operator, new_project_id)
# Handle changes that happen between watches.
self.projects[new_project_id].ensure_watch()
def _update_projects(self, new_projects):
""" Handles creation and deletion of projects.
Since this runs in a separate thread, it doesn't change any state directly.
Instead, it just acts as a bridge back to the main IO loop.
Args:
new_projects: A list of strings specifying all existing project IDs.
"""
persistent_update_project = retry_children_watch_coroutine(
'/appscale/projects', self.update_projects
)
main_io_loop = IOLoop.instance()
main_io_loop.add_callback(persistent_update_project, new_projects)
| 36.532374
| 102
| 0.697026
|
1a3216d5de8a1019665eaddf0dfdc2e7d2377833
| 7,102
|
py
|
Python
|
FP_Growth/FPGrowth.py
|
Vyzrala/ZTED_Algorithms
|
bc4c70d4d350d0bb644e2973b6c6994b336d94c6
|
[
"MIT"
] | null | null | null |
FP_Growth/FPGrowth.py
|
Vyzrala/ZTED_Algorithms
|
bc4c70d4d350d0bb644e2973b6c6994b336d94c6
|
[
"MIT"
] | null | null | null |
FP_Growth/FPGrowth.py
|
Vyzrala/ZTED_Algorithms
|
bc4c70d4d350d0bb644e2973b6c6994b336d94c6
|
[
"MIT"
] | null | null | null |
from typing import List, Dict, Tuple
from collections import Counter
from datasets import translation
class Node:
def __init__(self, key: str, counter: int, parent_node) -> None:
self.key = key
self.counter = counter
self.parent = parent_node
self.childs: Dict[str, Node] = {}
self.link = None
def increment_counter(self):
pass
def display(self, index: int=0) -> None:
# print("{} [{}: {}]\n".format(" -"*(index), translation.get(self.key, self.key), self.counter))
print("{} [{}: {}]\n".format(" -"*(index), self.key, self.counter))
for child in self.childs.values():
child.display(index+1)
def display_linked(self):
current_node = self
while current_node != None:
print("[Key = {}]".format(current_node.key), end='')
if current_node.link: print(" => ", end='')
current_node = current_node.link
print()
class FPG:
def __init__(self, min_support: int=2) -> None:
self.minimum_support = min_support
self.root_node = None
self.support = None
self.clean_dataset = None
self.header_table: Dict[str, list] = {}
self.conditional_pattern_base = {}
self.fis = None
def run(self, dataset: List[list]) -> Tuple[List[list], Dict[frozenset, int]]:
self.initial_dataset = dataset
wset = self.initial_dataset
wset = [list(set(transaction)) for transaction in wset] # Make sure that items in transaction are uniqe
ui = self.get_unique_items(wset)
self.support = self.get_support(wset, ui)
self.clean_dataset = self.preprocess_dataset(wset)
return self.clean_dataset
def display_info(self) -> None:
# print("Initial dataset (minimum support = {}):".format(self.minimum_support), *self.initial_dataset, sep='\n')
# print("Support:", *{list(k)[0]:v for k,v in self.support.items()}.items(), sep='\n')
print("Cleaned and sorted dataset:", *self.clean_dataset, sep='\n')
# print("Support table:")
# print(*self.support.items(), sep='\n')
print("\nTree:")
self.print_tree()
if self.header_table != {}:
print("Header Table:")
print(*self.header_table.items(), sep='\n')
# print("Linked nodes:")
# for v in self.header_table.values():
# v['nodes'][0].display_linked()
if self.conditional_pattern_base != {}:
print("Conditional pattern base:")
print(*self.conditional_pattern_base.items(), sep='\n')
if self.fis:
print("Frequent item sets:", len(self.fis))
print(*self.fis, sep='\n')
def print_tree(self) -> None:
try:
self.root_node.display()
except:
print("\tNo root node.\n")
def get_unique_items(self, wset: List[list]) -> List[set]:
unique_items = list(set(sum(wset, [])))
return [frozenset([x]) for x in unique_items]
def get_support(self, dataset: List[list], candidates: List[frozenset]) -> Dict[frozenset, int]:
# support = {}
# for transaction in dataset:
# for item in candidates:
# if item.issubset(transaction):
# sub = frozenset(item)
# if sub in support.keys():
# support[sub] += 1
# else:
# support[sub] = 1
# support = sorted(support.items(), key=lambda x: x[1], reverse=True) # Sorting by value
# support = {k:v for k, v in support if v >= self.minimum_support} # Filtering by minimum support value
support = Counter(item for item in sum(dataset, []))
support = filter(lambda item: item[1]>=self.minimum_support, support.items())
support = sorted(support, key=lambda x:x[0])
support = sorted(support, key=lambda x:x[1], reverse=True)
# support = {frozenset([k]):v for k,v in support}
support = dict(support)
return support
def preprocess_dataset(self, dataset: List[list]) -> List[list]:
# Cleaning and sorting dataset
clean_dataset = []
# mask = [x for x in list(self.support)]
mask = list(self.support.keys())
for transaction in dataset:
clean_dataset.append(list(filter(lambda item: item in mask, transaction)))
clean_dataset[-1].sort(key=lambda i: mask.index(i))
return clean_dataset
def build_tree(self, dataset: List[list]) -> None:
for k in self.support:
self.header_table[k] = {'support': self.support[k], 'nodes': []}
self.root_node = Node('NULL', 0, None)
for transaction in dataset:
self.insert_transaction(transaction, self.root_node)
# Linking nodes
for v in self.header_table.values():
if len(v['nodes']) > 1:
for i in range(len(v['nodes'])-1):
v['nodes'][i].link = v['nodes'][i+1]
def insert_transaction(self, transaction: List[str], node: Node) -> None:
if len(transaction) < 1: return
key = transaction[0]
if key in node.childs.keys():
node.childs[key].counter += 1 ################################################## increment by support
else:
node.childs[key] = Node(key, 1, node)
self.header_table[key]['nodes'].append(node.childs[key])
if len(transaction) > 1:
self.insert_transaction(transaction[1:], node.childs[key])
def get_prefix(self, node: Node):
paths = []
while node:
path = self.traverse_root(node)
if len(path) > 1:
paths.append([path[1:], node.counter])
node = node.link
return paths
def traverse_root(self, node: Node) -> list:
tmp = node
path = []
while tmp is not self.root_node:
path.append(tmp.key)
tmp = tmp.parent
return path
def get_CPB(self, key:str) -> List[list]:
start_node = self.header_table[key]['nodes'][0]
paths = self.get_prefix(start_node)
dataset = []
for item in paths:
dataset.append(item[0])
self.conditional_pattern_base[key] = dataset
return dataset
def mine_fis(self, header_parent, prefix, fis):
reverse_header_keys = list(header_parent.keys())[::-1]
for key in reverse_header_keys:
new_fis = prefix.copy()
new_fis.add(key)
fis.append(new_fis)
CPB = self.get_CPB(key)
# Generate sub-tree
tmp_fpg = FPG(self.minimum_support)
tmp_clean_dataset = tmp_fpg.run(CPB)
tmp_fpg.build_tree(tmp_clean_dataset)
if tmp_fpg.header_table != {}:
self.mine_fis(tmp_fpg.header_table, new_fis, fis)
self.fis = fis
| 36.797927
| 120
| 0.558434
|
c5a71e1a26ed5ac11bfe58e7492eee40ff45f762
| 186
|
py
|
Python
|
POO/EX010/EX10B.py
|
0Fernando0/CursoPython
|
1dcfdb6556e41c6dedcba2857aa4382b2f81aa59
|
[
"MIT"
] | null | null | null |
POO/EX010/EX10B.py
|
0Fernando0/CursoPython
|
1dcfdb6556e41c6dedcba2857aa4382b2f81aa59
|
[
"MIT"
] | null | null | null |
POO/EX010/EX10B.py
|
0Fernando0/CursoPython
|
1dcfdb6556e41c6dedcba2857aa4382b2f81aa59
|
[
"MIT"
] | null | null | null |
class A:
def __new__(cls,*args,**kwargs):
cls.nome = 'Fernando'
return super().__new__(cls)
def __init__(self):
print('inicio...')
| 18.6
| 36
| 0.489247
|
b5d65e6553b5d8232ba618fef57743de51392303
| 2,665
|
py
|
Python
|
tests/unit/cli/test_ranged_argument.py
|
adrianfusco/cibyl
|
421fb537e21abddeecdde42c18f4de60425cc804
|
[
"Apache-2.0"
] | 3
|
2022-02-17T18:07:07.000Z
|
2022-03-19T10:22:38.000Z
|
tests/unit/cli/test_ranged_argument.py
|
adrianfusco/cibyl
|
421fb537e21abddeecdde42c18f4de60425cc804
|
[
"Apache-2.0"
] | 58
|
2022-02-14T14:41:22.000Z
|
2022-03-31T10:54:28.000Z
|
tests/unit/cli/test_ranged_argument.py
|
adrianfusco/cibyl
|
421fb537e21abddeecdde42c18f4de60425cc804
|
[
"Apache-2.0"
] | 6
|
2022-02-14T19:21:26.000Z
|
2022-03-29T09:31:31.000Z
|
"""
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
from unittest import TestCase
from cibyl.cli.ranged_argument import (EXPRESSION_PATTERN, RANGE_OPERATORS,
VALID_OPS, Range)
def is_valid_regex(match_obj, operator, operand):
"""Check that a string is matched correctly by the regex."""
operator_str = match_obj.group(1)
operand_str = match_obj.group(2)
return operand_str == operand and operator == operator_str
class TestRange(TestCase):
"""Test helper module for ranged Arguments."""
def test_regex(self):
"""Check that the range regex works as intended."""
self.assertIsNone(EXPRESSION_PATTERN.search("abcd"))
matched_str = EXPRESSION_PATTERN.search(">>4")
self.assertTrue(is_valid_regex(matched_str, ">>", "4"))
matched_str = EXPRESSION_PATTERN.search("!=4")
self.assertTrue(is_valid_regex(matched_str, "!=", "4"))
matched_str = EXPRESSION_PATTERN.search("<=45")
self.assertTrue(is_valid_regex(matched_str, "<=", "45"))
def test_Range(self):
"""Test that Range namedtuple works as intended."""
range1 = Range(">", "2")
range2 = Range("<", "2")
self.assertEqual(range1.operator, ">")
self.assertEqual(range1.operand, "2")
self.assertEqual(range2.operator, "<")
self.assertEqual(range2.operand, "2")
self.assertNotEqual(range1, range2)
def test_range_operators(self):
"""Test that range operator dictionary works as intended."""
self.assertTrue(RANGE_OPERATORS["<"](2, 3))
self.assertTrue(RANGE_OPERATORS[">"](5, 3))
self.assertTrue(RANGE_OPERATORS[">="](3, 3))
self.assertTrue(RANGE_OPERATORS["<="](3, 3))
self.assertTrue(RANGE_OPERATORS["=="](3, 3))
self.assertTrue(RANGE_OPERATORS["="](3, 3))
self.assertTrue(RANGE_OPERATORS["!="](2, 3))
def test_valid_operators_str(self):
"""Test that VALID_OPS string contains all supported operators."""
for op in RANGE_OPERATORS:
self.assertIn(op, VALID_OPS)
| 40.378788
| 78
| 0.657786
|
5093a8930e14019680620baa6093d3207f2e29ec
| 9,010
|
py
|
Python
|
experiments/tests/faker_populator.py
|
neuromat/portal
|
78c1f8c9e20c46d283b693d54d009bd19bda90de
|
[
"CC-BY-4.0"
] | 3
|
2017-12-14T12:09:28.000Z
|
2021-07-02T02:26:55.000Z
|
experiments/tests/faker_populator.py
|
neuromat/portal
|
78c1f8c9e20c46d283b693d54d009bd19bda90de
|
[
"CC-BY-4.0"
] | null | null | null |
experiments/tests/faker_populator.py
|
neuromat/portal
|
78c1f8c9e20c46d283b693d54d009bd19bda90de
|
[
"CC-BY-4.0"
] | 1
|
2018-06-13T19:55:33.000Z
|
2018-06-13T19:55:33.000Z
|
from datetime import datetime
from random import randint, choice
from subprocess import call
from django.contrib.auth import models
from faker import Factory
from experiments.models import Experiment, Study, Group, Researcher, \
Questionnaire
# python manage.py shell < experiments/tests/faker_populator.py
# TODO: when executing from bash command line, final line identifier breaks
# imports. Kepping in Collaborator in same line.
from experiments.models import Gender, ClassificationOfDiseases, Keyword, \
Step, TMSSetting, TMSDevice, CoilModel, TMSDeviceSetting
from experiments.tests.tests_helper import create_classification_of_diseases, \
create_questionnaire, create_researcher
from experiments.tests.tests_helper import create_experimental_protocol
from experiments.tests.tests_helper import create_group, \
create_ethics_committee_info, create_step, create_tms_setting, \
create_tms_device, create_coil_model, create_tms_device_setting, \
create_tmsdata_objects_to_test_search, create_questionnaire_language
from experiments.tests.tests_helper import create_keyword
from experiments.tests.tests_helper import create_participant
from nep import settings
from nep.local_settings import BASE_DIR
# Clear sqlite database and run migrate
call(['rm', BASE_DIR + '/db.sqlite3'])
call([BASE_DIR + '/manage.py', 'migrate'])
fake = Factory.create()
# Create api clients users (experiment owners)
owner1 = models.User.objects.create_user(username='lab1', password='nep-lab1')
owner2 = models.User.objects.create_user(username='lab2', password='nep-lab2')
# Create group trustees
group = models.Group.objects.create(name='trustees')
# Create 2 trustee users and add them to trustees group
trustee1 = models.User.objects.create_user(
username='claudia', first_name='Claudia', last_name='Vargas',
password='passwd'
)
trustee2 = models.User.objects.create_user(
username='roque', first_name='Antonio', last_name='Roque',
password='passwd'
)
group.user_set.add(trustee1)
group.user_set.add(trustee2)
for i in range(1, 4):
experiment_owner1 = Experiment.objects.create(
title=fake.word().title(),
description=fake.text(),
nes_id=i,
owner=owner1, version=1,
sent_date=datetime.utcnow(),
status=Experiment.TO_BE_ANALYSED,
data_acquisition_done=choice([True, False])
)
Study.objects.create(
title=fake.word().title(),
description=fake.text(),
start_date=datetime.utcnow(), experiment=experiment_owner1
)
# to test search (necessary to approve experiment(s) in front-end or
# directly in database)
if i == 1:
study = Study.objects.last()
study.description = 'The brachial artery is the major blood vessel ' \
'of the (upper) arm. It\'s correlated with ' \
'plexus.'
# We put a keyword with the string 'brachial plexus' in the study to
# also be found by search test
study.keywords.add('brachial plexus')
study.save()
create_ethics_committee_info(experiment_owner1)
create_group(randint(1, 3), experiment_owner1)
for i in range(4, 6):
experiment_owner2 = Experiment.objects.create(
title=fake.word().title(),
description=fake.text(),
nes_id=i,
owner=owner2, version=1,
sent_date=datetime.utcnow(),
status=Experiment.TO_BE_ANALYSED
)
# to test search (necessary to approve experiment(s) in front-end or
# directly in database)
if i == 4:
experiment_owner2.title = 'Brachial Plexus'
experiment_owner2.save()
if i == 5:
experiment_owner2.description = \
'Brachial plexus repair by peripheral nerve ' \
'grafts directly into the spinal cord in rats ' \
'Behavioral and anatomical evidence of ' \
'functional recovery'
experiment_owner2.save()
Study.objects.create(
title=fake.word().title(),
description=fake.text(),
start_date=datetime.utcnow(), experiment=experiment_owner2
)
create_group(randint(1, 3), experiment_owner2)
# to test search (necessary to approve experiment(s) in front-end or
# directly in database)
group = Group.objects.first()
group.description = 'Plexus brachial (com EMG) is written in wrong order. ' \
'Correct is Brachial plexus.'
ic = ClassificationOfDiseases.objects.create(
code='BP', description='brachial Plexus',
abbreviated_description='brachial Plexus',
parent=None
)
group.inclusion_criteria.add(ic)
group.save()
# to test search with filter (necessary to approve experiment(s) in
# front-end or directly in database)
create_step(1, group, Step.EMG)
# to test search with filter (necessary to approve experiment(s) in
# front-end or directly in database)
group = Group.objects.last()
group.title = 'Brachial Plexus com EEG'
group.save()
create_step(1, group, Step.EEG)
# to test search with filter (necessary to approve experiment(s) in
# front-end or directly in database)
group = Group.objects.get(
id=(Group.objects.last().id + Group.objects.first().id) // 2
)
group.title = 'Brachial Plexus com EEG e EMG'
group.save()
create_step(1, group, Step.EEG)
create_step(1, group, Step.EMG)
# Create researchers associated to studies created above
for study in Study.objects.all():
Researcher.objects.create(name=fake.name(),
email='claudia.portal.neuromat@gmail.com',
study=study)
# to test search (necessary to approve experiment(s) in front-end or
# directly in database)
researcher = Researcher.objects.last()
researcher.name = 'Yolanda Fuentes'
researcher.save()
# Create study collaborators (requires creating studies before)
for study in Study.objects.all():
create_researcher(study, 'Pero', 'Vaz')
# To test search (necessary to approve experiment(s) in front-end or
# directly in database)
# Create some keywords to associate with studies
create_keyword(10)
# Associate keywords with studies
for study in Study.objects.all():
kw1 = choice(Keyword.objects.all())
kw2 = choice(Keyword.objects.all())
kw3 = choice(Keyword.objects.all())
study.keywords.add(kw1, kw2, kw3)
# Create some entries for ClassificationOfDiseases
create_classification_of_diseases(10)
# Create genders
gender1 = Gender.objects.create(name='male')
gender2 = Gender.objects.create(name='female')
# Create groups' experimental protocols and participants
for group in Group.objects.all():
create_experimental_protocol(group)
create_participant(
randint(3, 7), group,
gender1 if randint(1, 2) == 1 else gender2
)
ic1 = choice(ClassificationOfDiseases.objects.all())
ic2 = choice(ClassificationOfDiseases.objects.all())
group.inclusion_criteria.add(ic1, ic2)
# Create TMSSetting from an experiment Approved, to test search.
# Obs.: TO VERIFY SEARCH TMS things, change Experiment status to APPROVED
# after run this faker populator
experiment = Experiment.objects.first()
create_tms_setting(1, experiment)
tms_setting = TMSSetting.objects.last()
tms_setting.name = 'tmssettingname'
tms_setting.save()
# Create TMSDeviceSetting from a TMSSetting to test search
# Required creating TMSSetting from experiment Approved, first
create_tms_device(1)
tms_device = TMSDevice.objects.last()
create_coil_model(1)
coil_model = CoilModel.objects.last()
create_tms_device_setting(1, tms_setting, tms_device, coil_model)
tms_device_setting = TMSDeviceSetting.objects.last()
tms_device_setting.pulse_stimulus_type = 'single_pulse'
tms_device_setting.save()
# Create TMSDevice to test search
tms_device.manufacturer_name = 'Siemens'
tms_device.save()
# Create another TMSSetting and associate with same TMSDeviceSetting
# created above to test searching
create_tms_setting(1, experiment)
tms_setting = TMSSetting.objects.last()
tmsds = create_tms_device_setting(1, tms_setting, tms_device, coil_model)
# Create TMSData to test search
create_tmsdata_objects_to_test_search()
##
# Create questionnaires
##
experiment = Experiment.objects.last()
group = experiment.groups.first()
create_questionnaire(1, 'q1', group)
questionnaire1 = Questionnaire.objects.last()
# create valid questionnaire in English
create_questionnaire_language(
questionnaire1,
settings.BASE_DIR + '/experiments/tests/questionnaire1.csv',
'en'
)
# create valid questionnaire in Portuguese
create_questionnaire_language(
questionnaire1,
settings.BASE_DIR + '/experiments/tests/questionnaire1_pt-br.csv',
'pt_br'
)
# create valid questionnaire in French
create_questionnaire_language(
questionnaire1,
settings.BASE_DIR + '/experiments/tests/questionnaire1_fr.csv',
'fr'
)
# TODO: After populating models we call 'manage.py rebuild_index --noinput' to
# TODO: rebuild haystack search index - to manually test searching.
# TODO: why is necessary to keep two blank lines for script run until the end?
| 35.753968
| 79
| 0.734628
|
3a465ee1345fd954066261bdfb000a18869e6742
| 1,472
|
py
|
Python
|
Coding-interview-problems/Programs/prog03.py
|
gnsalok/algo-ds-python
|
6e37f9a536c634673451d9acaeb4968536fb0b8b
|
[
"MIT"
] | 3
|
2021-12-17T17:12:23.000Z
|
2022-03-29T13:41:21.000Z
|
Coding-interview-problems/Programs/prog03.py
|
gnsalok/algo-ds-python
|
6e37f9a536c634673451d9acaeb4968536fb0b8b
|
[
"MIT"
] | null | null | null |
Coding-interview-problems/Programs/prog03.py
|
gnsalok/algo-ds-python
|
6e37f9a536c634673451d9acaeb4968536fb0b8b
|
[
"MIT"
] | null | null | null |
''' programs on fibonacci series '''
def fibonacci(size):
a = 0
b = 1
print(a)
print(b)
for i in range(size-1):
c = a + b
a = b
b = c
print(c)
def fibonacci_nth(n):
a = 0
b = 1
if n < 0:
print('invalid')
return
elif n == 0:
return a
elif n == 1:
return b
else:
for i in range(2, n):
c = a + b
a = b
b = c
print(c) # returning b not c
def fibonacci_nth_rec(n):
if n < 0:
print('invalid number')
if n == 0:
return 0
if n == 1 :
return 1
return fibonacci_nth_rec(n-2) + fibonacci_nth_rec(n-1)
fibstore = [0, 1]
def fibonacci_memo(n):
if n < 0:
print('invalid')
if n <= len(fibstore):
return fibstore[n-1]
else:
fib = fibonacci_memo(n-2) + fibonacci_memo(n-1)
fibstore.append(fib)
return fib
fibdict = {0: 0, 1: 1}
def fibonacci_memo_2(n):
if n < 0:
print('invalid')
return
if n in fibdict.keys():
print('fib already in dict')
return fibdict[n]
else:
fib = fibonacci_memo_2(n-2) + fibonacci_memo_2(n-1)
fibdict[n] = fib
return fib
if __name__ == '__main__':
fibonacci(8)
print('*' * 20)
fibonacci_nth(9)
print('*' * 20)
print(fibonacci_nth_rec(9))
# print(fibonacci_memo(10))
#print(fibonacci_memo_2(100))
| 18.871795
| 59
| 0.501359
|
6091cf291ef2a7972a015fb0bc955004a5dfdf74
| 2,336
|
py
|
Python
|
eng/tox/import_all.py
|
dubiety/azure-sdk-for-python
|
62ffa839f5d753594cf0fe63668f454a9d87a346
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
eng/tox/import_all.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
eng/tox/import_all.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# This script is used to verify package dependency by importing all modules
import sys
import argparse
import logging
import os
from tox_helper_tasks import get_package_details
from subprocess import check_call
logging.getLogger().setLevel(logging.INFO)
root_dir = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "..", ".."))
# keyvault has dependency issue when loading private module _BearerTokenCredentialPolicyBase from azure.core.pipeline.policies
# azure.core.tracing.opencensus and azure.eventhub.checkpointstoreblob.aio are skipped due to a known issue in loading azure.core.tracing.opencensus
excluded_packages = [
"azure",
"azure-mgmt",
]
def should_run_import_all(package_name):
return not (package_name in excluded_packages or "nspkg" in package_name)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Import all modules in package")
parser.add_argument(
"-t",
"--target",
dest="target_package",
help="The target package directory on disk.",
required=True,
)
args = parser.parse_args()
# get target package name from target package path
pkg_dir = os.path.abspath(args.target_package)
package_name, namespace, _, _, _ = get_package_details(os.path.join(pkg_dir, 'setup.py'))
if should_run_import_all(package_name):
# import all modules from current package
logging.info(
"Importing all modules from namespace [{0}] to verify dependency".format(
namespace
)
)
import_script_all = "from {0} import *".format(namespace)
commands = [
sys.executable,
"-c",
import_script_all
]
check_call(commands, cwd= root_dir)
logging.info("Verified module dependency, no issues found")
else:
pass
logging.error("Package {} is excluded from dependency check".format(package_name))
| 35.938462
| 148
| 0.636558
|
104399b42b3d6c624bc52fbaa571560bd48b4104
| 2,057
|
py
|
Python
|
plugins/lastlog.py
|
Perdu/poezio-1
|
ed645d9403ecf7bebfe2406f914868c3bbd1ef34
|
[
"Zlib"
] | null | null | null |
plugins/lastlog.py
|
Perdu/poezio-1
|
ed645d9403ecf7bebfe2406f914868c3bbd1ef34
|
[
"Zlib"
] | null | null | null |
plugins/lastlog.py
|
Perdu/poezio-1
|
ed645d9403ecf7bebfe2406f914868c3bbd1ef34
|
[
"Zlib"
] | null | null | null |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 Maxime “pep” Buquet
# Copyright © 2019 Madhur Garg
#
# Distributed under terms of the zlib license. See the COPYING file.
"""
Search provided string in the buffer and return all results on the screen
"""
import re
from typing import Optional
from datetime import datetime
from poezio.plugin import BasePlugin
from poezio import tabs
from poezio.text_buffer import Message, TextBuffer
def add_line(
text_buffer: TextBuffer,
text: str,
datetime: Optional[datetime] = None,
) -> None:
"""Adds a textual entry in the TextBuffer"""
text_buffer.add_message(
text,
datetime, # Time
None, # Nickname
None, # Nick Color
False, # History
None, # User
False, # Highlight
None, # Identifier
None, # str_time
None, # Jid
)
class Plugin(BasePlugin):
"""Lastlog Plugin"""
def init(self):
for tab in tabs.DynamicConversationTab, tabs.StaticConversationTab, tabs.PrivateTab, tabs.MucTab:
self.api.add_tab_command(
tab,
'lastlog',
self.command_lastlog,
usage='<keyword>',
help='Search <keyword> in the buffer and returns results'
'on the screen')
def command_lastlog(self, input_):
"""Define lastlog command"""
text_buffer = self.api.current_tab()._text_buffer
search_re = re.compile(input_, re.I)
res = []
add_line(text_buffer, "Lastlog:")
for message in text_buffer.messages:
if message.nickname is not None and \
search_re.search(message.txt) is not None:
res.append(message)
add_line(text_buffer, "%s> %s" % (message.nickname, message.txt), message.time)
add_line(text_buffer, "End of Lastlog")
self.api.current_tab().text_win.pos = 0
self.api.current_tab().core.refresh_window()
| 28.569444
| 105
| 0.605737
|
03bc36335c57a7145118276da7b0c913c57dfb06
| 930
|
py
|
Python
|
tests/test_tradehub_get_username_check.py
|
Mai-Te-Pora/tradehub-python
|
8355b862f5cabeb9f5ee3d17682941116c95d08c
|
[
"MIT"
] | 7
|
2021-01-18T07:50:22.000Z
|
2022-01-16T15:14:08.000Z
|
tests/test_tradehub_get_username_check.py
|
Mai-Te-Pora/tradehub-python
|
8355b862f5cabeb9f5ee3d17682941116c95d08c
|
[
"MIT"
] | 13
|
2021-01-22T12:26:56.000Z
|
2021-03-07T09:24:49.000Z
|
tests/test_tradehub_get_username_check.py
|
Mai-Te-Pora/tradehub-python
|
8355b862f5cabeb9f5ee3d17682941116c95d08c
|
[
"MIT"
] | 1
|
2022-01-16T15:14:13.000Z
|
2022-01-16T15:14:13.000Z
|
from tests import APITestCase, mainnet_client, USERNAME_DEVEL
class TestTradeHubGetUsernameCheck(APITestCase):
def setUp(self) -> None:
self._client = mainnet_client
def test_get_username_check_structure(self):
"""
Check if response match expected dict structure.
:return:
"""
result: bool = self._client.get_username_check(USERNAME_DEVEL)
self.assertIsInstance(result, bool, msg=f"Expected result to be type bool, got {type(result)} instead.")
# TODO need test wallet
self.assertTrue(result, msg=f"Expected username {USERNAME_DEVEL} to be taken.")
result: bool = self._client.get_username_check(USERNAME_DEVEL.upper())
self.assertIsInstance(result, bool, msg=f"Expected result to be type bool, got {type(result)} instead.")
self.assertFalse(result, msg=f"Expected username {USERNAME_DEVEL.upper()} to be not taken.")
| 37.2
| 112
| 0.698925
|
5d364aaffe15cdc117d31ab482f13f23b3b560c7
| 895
|
py
|
Python
|
utils/utils.py
|
LuChenLab/SCAPE
|
49c4063f1ec3ac2d72f935b61de4a18c66db754d
|
[
"MIT"
] | 3
|
2022-03-15T05:22:29.000Z
|
2022-03-21T18:32:04.000Z
|
utils/utils.py
|
LuChenLab/SCAPE
|
49c4063f1ec3ac2d72f935b61de4a18c66db754d
|
[
"MIT"
] | 3
|
2022-02-20T04:43:18.000Z
|
2022-03-19T12:19:56.000Z
|
utils/utils.py
|
LuChenLab/SCAPE
|
49c4063f1ec3ac2d72f935b61de4a18c66db754d
|
[
"MIT"
] | 1
|
2022-03-21T18:32:15.000Z
|
2022-03-21T18:32:15.000Z
|
from collections import defaultdict
from itertools import islice
# This two fucntion were used for wraper the nested dict not lambda function
def dict_list():
return defaultdict(list)
def wrapdict():
return defaultdict(dict_list)
def window(seq, n=2):
"""
Return a sliding window over a list
"""
it = iter(seq)
result = list(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + list((elem,))
yield result
class AttrDict(dict):
def __init__(self):
dict.__init__(self)
def __setattr__(self, name, value):
self[name] = value
def __getattr__(self, name):
return self[name]
class dotdict(dict):
"""
dot.notation access to dictionary attributes
"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
| 19.456522
| 76
| 0.642458
|
b207694baa808f6e49262a4ebd284a4c21dda7a2
| 8,635
|
py
|
Python
|
art/attacks/evasion/targeted_universal_perturbation.py
|
david-shmailov/adversarial-robustness-toolbox
|
ad8b94d3928abe218cd6ab2eed1c5c21f1d6e420
|
[
"MIT"
] | 1
|
2022-01-31T15:17:20.000Z
|
2022-01-31T15:17:20.000Z
|
art/attacks/evasion/targeted_universal_perturbation.py
|
david-shmailov/adversarial-robustness-toolbox
|
ad8b94d3928abe218cd6ab2eed1c5c21f1d6e420
|
[
"MIT"
] | 1
|
2022-03-18T00:41:02.000Z
|
2022-03-18T00:41:02.000Z
|
art/attacks/evasion/targeted_universal_perturbation.py
|
david-shmailov/adversarial-robustness-toolbox
|
ad8b94d3928abe218cd6ab2eed1c5c21f1d6e420
|
[
"MIT"
] | 1
|
2022-03-22T05:30:31.000Z
|
2022-03-22T05:30:31.000Z
|
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements the universal adversarial perturbations attack `TargetedUniversalPerturbation`.
| Paper link: https://arxiv.org/abs/1911.06502
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import random
import types
from typing import Any, Dict, Optional, Union, TYPE_CHECKING
import numpy as np
from art.attacks.attack import EvasionAttack
from art.estimators.estimator import BaseEstimator
from art.estimators.classification.classifier import ClassifierMixin
from art.utils import projection
if TYPE_CHECKING:
from art.utils import CLASSIFIER_TYPE
logger = logging.getLogger(__name__)
class TargetedUniversalPerturbation(EvasionAttack):
"""
Implementation of the attack from Hirano and Takemoto (2019). Computes a fixed perturbation to be applied to all
future inputs. To this end, it can use any adversarial attack method.
| Paper link: https://arxiv.org/abs/1911.06502
"""
attacks_dict = {
"fgsm": "art.attacks.evasion.fast_gradient.FastGradientMethod",
"simba": "art.attacks.evasion.simba.SimBA",
}
attack_params = EvasionAttack.attack_params + ["attacker", "attacker_params", "delta", "max_iter", "eps", "norm"]
_estimator_requirements = (BaseEstimator, ClassifierMixin)
def __init__(
self,
classifier: "CLASSIFIER_TYPE",
attacker: str = "fgsm",
attacker_params: Optional[Dict[str, Any]] = None,
delta: float = 0.2,
max_iter: int = 20,
eps: float = 10.0,
norm: Union[int, float, str] = np.inf,
):
"""
:param classifier: A trained classifier.
:param attacker: Adversarial attack name. Default is 'deepfool'. Supported names: 'fgsm'.
:param attacker_params: Parameters specific to the adversarial attack. If this parameter is not specified,
the default parameters of the chosen attack will be used.
:param delta: desired accuracy
:param max_iter: The maximum number of iterations for computing universal perturbation.
:param eps: Attack step size (input variation)
:param norm: The norm of the adversarial perturbation. Possible values: "inf", np.inf, 2
"""
super().__init__(estimator=classifier)
self.attacker = attacker
self.attacker_params = attacker_params
self.delta = delta
self.max_iter = max_iter
self.eps = eps
self.norm = norm
self._targeted = True
self._check_params()
def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray:
"""
Generate adversarial samples and return them in an array.
:param x: An array with the original inputs.
:param y: An array with the targeted labels.
:return: An array holding the adversarial examples.
"""
if y is None:
raise ValueError("Labels `y` cannot be None.")
if self.estimator.nb_classes == 2 and y.shape[1] == 1:
raise ValueError(
"This attack has not yet been tested for binary classification with a single output classifier."
)
logger.info("Computing targeted universal perturbation based on %s attack.", self.attacker)
# Init universal perturbation
noise = np.zeros_like(x[[0]])
fooling_rate = 0.0
targeted_success_rate = 0.0
nb_instances = len(x)
# Instantiate the middle attacker and get the predicted labels
attacker = self._get_attack(self.attacker, self.attacker_params)
pred_y = self.estimator.predict(x, batch_size=1)
pred_y_max = np.argmax(pred_y, axis=1)
# Start to generate the adversarial examples
nb_iter = 0
while targeted_success_rate < 1.0 - self.delta and nb_iter < self.max_iter:
# Go through all the examples randomly
rnd_idx = random.sample(range(nb_instances), nb_instances)
# Go through the data set and compute the perturbation increments sequentially
for _, (e_x, e_y) in enumerate(zip(x[rnd_idx], y[rnd_idx])):
x_i = e_x[None, ...]
y_i = e_y[None, ...]
current_label = np.argmax(self.estimator.predict(x_i + noise)[0])
target_label = np.argmax(y_i)
if current_label != target_label:
# Compute adversarial perturbation
adv_xi = attacker.generate(x_i + noise, y=y_i)
new_label = np.argmax(self.estimator.predict(adv_xi)[0])
# If the class has changed, update v
if new_label == target_label:
noise = adv_xi - x_i
# Project on L_p ball
noise = projection(noise, self.eps, self.norm)
nb_iter += 1
# Apply attack and clip
x_adv = x + noise
if hasattr(self.estimator, "clip_values") and self.estimator.clip_values is not None:
clip_min, clip_max = self.estimator.clip_values
x_adv = np.clip(x_adv, clip_min, clip_max)
# Compute the error rate
y_adv = np.argmax(self.estimator.predict(x_adv, batch_size=1), axis=1)
fooling_rate = np.sum(pred_y_max != y_adv) / nb_instances
targeted_success_rate = np.sum(y_adv == np.argmax(y, axis=1)) / nb_instances
self.fooling_rate = fooling_rate
self.targeted_success_rate = targeted_success_rate
self.converged = nb_iter < self.max_iter
self.noise = noise
logger.info("Fooling rate of universal perturbation attack: %.2f%%", 100 * fooling_rate)
logger.info("Targeted success rate of universal perturbation attack: %.2f%%", 100 * targeted_success_rate)
return x_adv
def _check_params(self) -> None:
if not isinstance(self.delta, (float, int)) or self.delta < 0 or self.delta > 1:
raise ValueError("The desired accuracy must be in the range [0, 1].")
if not isinstance(self.max_iter, int) or self.max_iter <= 0:
raise ValueError("The number of iterations must be a positive integer.")
if not isinstance(self.eps, (float, int)) or self.eps <= 0:
raise ValueError("The eps coefficient must be a positive float.")
def _get_attack(self, a_name: str, params: Optional[Dict[str, Any]] = None) -> EvasionAttack:
"""
Get an attack object from its name.
:param a_name: attack name.
:param params: attack params.
:return: attack object
"""
try:
attack_class = self._get_class(self.attacks_dict[a_name])
a_instance = attack_class(self.estimator) # type: ignore
if params:
a_instance.set_params(**params)
return a_instance
except KeyError:
raise NotImplementedError(f"{a_name} attack not supported") from KeyError
@staticmethod
def _get_class(class_name: str) -> types.ModuleType:
"""
Get a class module from its name.
:param class_name: Full name of a class.
:return: The class `module`.
"""
sub_mods = class_name.split(".")
module_ = __import__(".".join(sub_mods[:-1]), fromlist=sub_mods[-1])
class_module = getattr(module_, sub_mods[-1])
return class_module
| 40.924171
| 120
| 0.651882
|
25aa5db4427e94d5a28b3047c44378aab71003d6
| 736
|
py
|
Python
|
data/products.py
|
IlyaLyamin/buynet
|
8cef028cbe06af8ce25bd2c9980a9a4cf94aac79
|
[
"MIT"
] | null | null | null |
data/products.py
|
IlyaLyamin/buynet
|
8cef028cbe06af8ce25bd2c9980a9a4cf94aac79
|
[
"MIT"
] | null | null | null |
data/products.py
|
IlyaLyamin/buynet
|
8cef028cbe06af8ce25bd2c9980a9a4cf94aac79
|
[
"MIT"
] | null | null | null |
import sqlalchemy
from .db_session import SqlAlchemyBase
from sqlalchemy import orm
from flask_login import UserMixin
from sqlalchemy_serializer import SerializerMixin
class Products(SqlAlchemyBase, UserMixin, SerializerMixin):
__tablename__ = 'products'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True, autoincrement=True)
user_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('users.id'))
product = sqlalchemy.Column(sqlalchemy.String, default='без названия')
price = sqlalchemy.Column(sqlalchemy.Integer)
photo = sqlalchemy.Column(sqlalchemy.String, default='default.jpg')
about = sqlalchemy.Column(sqlalchemy.Text, default='Без описания')
author = orm.relation('User')
| 40.888889
| 86
| 0.785326
|
3e6c902c81dee9cfcceb72611973d4e85842a12b
| 14,125
|
py
|
Python
|
imagenet/augment/augment_ops.py
|
sinemmy/fixmatch
|
3386d275fe59782936570b047dc66821ff96d5de
|
[
"Apache-2.0"
] | null | null | null |
imagenet/augment/augment_ops.py
|
sinemmy/fixmatch
|
3386d275fe59782936570b047dc66821ff96d5de
|
[
"Apache-2.0"
] | null | null | null |
imagenet/augment/augment_ops.py
|
sinemmy/fixmatch
|
3386d275fe59782936570b047dc66821ff96d5de
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various ops for augmentation."""
import math
import tensorflow.compat.v1 as tf
import tensorflow_addons as tfa
# Default replace value
REPLACE_VALUE = 128
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
A value of factor 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor.
image2: An image Tensor.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor.
"""
image1 = tf.cast(image1, tf.float32)
image2 = tf.cast(image2, tf.float32)
return tf.saturate_cast(image1 + factor * (image2 - image1), tf.uint8)
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
def unwrap(image):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = tf.expand_dims(flattened_image[:, image_shape[2] - 1], 1)
replace = tf.constant([REPLACE_VALUE, REPLACE_VALUE, REPLACE_VALUE, 1],
image.dtype)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(
image,
[0, 0, 0],
[image_shape[0], image_shape[1], image_shape[2] - 1])
return image
def solarize(image, threshold=128):
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
threshold = tf.saturate_cast(threshold, image.dtype)
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image, addition=0, threshold=128):
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128
threshold = tf.saturate_cast(threshold, image.dtype)
added_im = tf.cast(image, tf.int32) + tf.cast(addition, tf.int32)
added_im = tf.saturate_cast(added_im, tf.uint8)
return tf.where(image < threshold, added_im, image)
def invert(image):
"""Inverts the image pixels."""
return 255 - tf.convert_to_tensor(image)
def invert_blend(image, factor):
"""Implements blend of invert with original image."""
return blend(invert(image), image, factor)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, factor):
"""Equivalent of PIL Contrast."""
grayscale_im = tf.image.rgb_to_grayscale(image)
mean = tf.reduce_mean(tf.cast(grayscale_im, tf.float32))
mean = tf.saturate_cast(mean + 0.5, tf.uint8)
degenerate = tf.ones_like(grayscale_im, dtype=tf.uint8) * mean
degenerate = tf.image.grayscale_to_rgb(degenerate)
return blend(degenerate, image, factor)
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, bits):
"""Equivalent of PIL Posterize."""
shift = tf.cast(8 - bits, image.dtype)
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def rotate(image, degrees):
"""Equivalent of PIL Rotation."""
# Convert from degrees to radians
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = tfa.image.transform_ops.rotate(wrap(image), radians)
return unwrap(image)
def translate_x(image, pixels):
"""Equivalent of PIL Translate in X dimension."""
image = tfa.image.translate_ops.translate(wrap(image), [-pixels, 0])
return unwrap(image)
def translate_y(image, pixels):
"""Equivalent of PIL Translate in Y dimension."""
image = tfa.image.translate_ops.translate(wrap(image), [0, -pixels])
return unwrap(image)
def shear_x(image, level):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1]
image = tfa.image.transform_ops.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image)
def shear_y(image, level):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1]
image = tfa.image.transform_ops.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image)
def autocontrast(image):
"""Implements Autocontrast function from PIL using TF ops."""
def scale_channel(channel):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.cast(tf.reduce_min(channel), tf.float32)
hi = tf.cast(tf.reduce_max(channel), tf.float32)
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.cast(im, tf.float32) * scale + offset
return tf.saturate_cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(channel), lambda: channel)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def autocontrast_blend(image, factor):
"""Implements blend of autocontrast with original image."""
return blend(autocontrast(image), image, factor)
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
orig_im = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel
kernel = tf.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', dilations=[1, 1])
degenerate = tf.squeeze(tf.saturate_cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_im)
# Blend the final result
return blend(result, orig_im, factor)
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(tf.equal(step, 0),
lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def equalize_blend(image, factor):
"""Implements blend of equalize with original image."""
return blend(equalize(image), image, factor)
def _convolve_image_with_kernel(image, kernel):
num_channels = tf.shape(image)[-1]
kernel = tf.tile(kernel, [1, 1, num_channels, 1])
image = tf.expand_dims(image, axis=0)
convolved_im = tf.nn.depthwise_conv2d(
tf.cast(image, tf.float32), kernel, strides=[1, 1, 1, 1], padding='SAME')
# adding 0.5 for future rounding, same as in PIL:
# https://github.com/python-pillow/Pillow/blob/555e305a60d7fcefd1ad4aa6c8fd879e2f474192/src/libImaging/Filter.c#L101 # pylint: disable=line-too-long
convolved_im = convolved_im + 0.5
return tf.squeeze(convolved_im, axis=0)
def blur(image, factor):
"""Blur with the same kernel as ImageFilter.BLUR."""
# See https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageFilter.py # pylint: disable=line-too-long
# class BLUR(BuiltinFilter):
# name = "Blur"
# # fmt: off
# filterargs = (5, 5), 16, 0, (
# 1, 1, 1, 1, 1,
# 1, 0, 0, 0, 1,
# 1, 0, 0, 0, 1,
# 1, 0, 0, 0, 1,
# 1, 1, 1, 1, 1,
# )
# # fmt: on
#
# filterargs are following:
# (kernel_size_x, kernel_size_y), divisor, offset, kernel
#
blur_kernel = tf.constant([[1., 1., 1., 1., 1.],
[1., 0., 0., 0., 1.],
[1., 0., 0., 0., 1.],
[1., 0., 0., 0., 1.],
[1., 1., 1., 1., 1.]],
dtype=tf.float32,
shape=[5, 5, 1, 1]) / 16.0
blurred_im = _convolve_image_with_kernel(image, blur_kernel)
return blend(image, blurred_im, factor)
def smooth(image, factor):
"""Smooth with the same kernel as ImageFilter.SMOOTH."""
# See https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageFilter.py # pylint: disable=line-too-long
# class SMOOTH(BuiltinFilter):
# name = "Smooth"
# # fmt: off
# filterargs = (3, 3), 13, 0, (
# 1, 1, 1,
# 1, 5, 1,
# 1, 1, 1,
# )
# # fmt: on
#
# filterargs are following:
# (kernel_size_x, kernel_size_y), divisor, offset, kernel
#
smooth_kernel = tf.constant([[1., 1., 1.],
[1., 5., 1.],
[1., 1., 1.]],
dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.0
smoothed_im = _convolve_image_with_kernel(image, smooth_kernel)
return blend(image, smoothed_im, factor)
def rescale(image, level):
"""Rescales image and enlarged cornet."""
# TODO(kurakin): should we do center crop instead?
# TODO(kurakin): add support of other resize methods
# See tf.image.ResizeMethod for full list
size = image.shape[:2]
scale = level * 0.25
scale_height = tf.cast(scale * size[0], tf.int32)
scale_width = tf.cast(scale * size[1], tf.int32)
cropped_image = tf.image.crop_to_bounding_box(
image,
offset_height=scale_height,
offset_width=scale_width,
target_height=size[0] - scale_height,
target_width=size[1] - scale_width)
rescaled = tf.image.resize(cropped_image, size, tf.image.ResizeMethod.BICUBIC)
return tf.saturate_cast(rescaled, tf.uint8)
NAME_TO_FUNC = {
'Identity': tf.identity,
'AutoContrast': autocontrast,
'AutoContrastBlend': autocontrast_blend,
'Equalize': equalize,
'EqualizeBlend': equalize_blend,
'Invert': invert,
'InvertBlend': invert_blend,
'Rotate': rotate,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x,
'TranslateY': translate_y,
'Blur': blur,
'Smooth': smooth,
'Rescale': rescale,
}
| 33.630952
| 151
| 0.661027
|
53504463a5832d9541809f18f90330f3b3262a82
| 44,269
|
py
|
Python
|
rl_server/myAlgo_1_server.py
|
888yzbt888/Pensieve-multiagent
|
b5409c949a4855afedc910de5dd6eabe076567cc
|
[
"MIT"
] | 2
|
2018-06-20T07:23:15.000Z
|
2019-05-17T08:13:57.000Z
|
rl_server/myAlgo_1_server.py
|
888yzbt888/Pensieve-multiagent
|
b5409c949a4855afedc910de5dd6eabe076567cc
|
[
"MIT"
] | null | null | null |
rl_server/myAlgo_1_server.py
|
888yzbt888/Pensieve-multiagent
|
b5409c949a4855afedc910de5dd6eabe076567cc
|
[
"MIT"
] | null | null | null |
"""
post_data:lastRequest(the order number of last chunk)
lastquality(0-5,0 is the lowest quality)
lastChunkSize(bit,kb/1024)
lastChunkStartTime(ms from 1970.1.1)
lastChunkFinishTime(ms from 1970.1.1)
buffer(s)
bufferAdjust(buffer time except the chunk played now)
RebufferTime(s)
bandwidthEst(kb/s)
type(???)
pastThroughput
heartbeat
input_dict:'log_file'
'last_bit_rate'
'last_total_rebuf'
'video_chunk_coount'
's_batch'
"""
#!/usr/bin/env python
import multiprocessing
import time
import copy
import socket
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import SocketServer
import base64
import urllib
import sys
import os
import json
import time
import fcntl
import matplotlib.pyplot as plt
os.environ['CUDA_VISIBLE_DEVICES']=''
import numpy as np
import time
import itertools
################## ROBUST MPC ###################
S_INFO = 5 # bit_rate, buffer_size, rebuffering_time, bandwidth_measurement, chunk_til_video_end
S_LEN = 8 # take how many frames in the past
MPC_FUTURE_CHUNK_COUNT = 5
VIDEO_BIT_RATE = [300,750,1200,1850,2850,4300] # Kbps
BITRATE_REWARD = [1, 2, 3, 12, 15, 20]
BITRATE_REWARD_MAP = {0: 0, 300: 1, 750: 2, 1200: 3, 1850: 12, 2850: 15, 4300: 20}
M_IN_K = 1000.0
BUFFER_NORM_FACTOR = 10.0
CHUNK_TIL_VIDEO_END_CAP = 48.0
TOTAL_VIDEO_CHUNKS = 448
DEFAULT_QUALITY = 0 # default video quality without agent
REBUF_PENALTY = 4.3 # 1 sec rebuffering -> this number of Mbps
SMOOTH_PENALTY = 1
TRAIN_SEQ_LEN = 100 # take as a train batch
MODEL_SAVE_INTERVAL = 100
RANDOM_SEED = 42
RAND_RANGE = 1000
SUMMARY_DIR = './results'
LOG_FILE = './results/log'
# in format of time_stamp bit_rate buffer_size rebuffer_time video_chunk_size download_time reward
NN_MODEL = None
CHUNK_COMBO_OPTIONS = []
# past errors in bandwidth
past_errors = []
past_bandwidth_ests = []
################################
#multiprocessing share variables
manager=multiprocessing.Manager()
Que1=manager.list()
Que2=manager.list()
Dict1=manager.dict()
Dict2=manager.dict()
begin_time=time.time()
QueOnline=manager.list()
DictOnline={}#remember last quality for each IP
MultiClientState={}# format:{"IP":[(int)heartbeat_time_not_request_time,(int)quality]}
################################
# video chunk sizes
'''
size_video1 = [2354772, 2123065, 2177073, 2160877, 2233056, 1941625, 2157535, 2290172, 2055469, 2169201, 2173522, 2102452, 2209463, 2275376, 2005399, 2152483, 2289689, 2059512, 2220726, 2156729, 2039773, 2176469, 2221506, 2044075, 2186790, 2105231, 2395588, 1972048, 2134614, 2164140, 2113193, 2147852, 2191074, 2286761, 2307787, 2143948, 1919781, 2147467, 2133870, 2146120, 2108491, 2184571, 2121928, 2219102, 2124950, 2246506, 1961140, 2155012, 1433658]
size_video2 = [1728879, 1431809, 1300868, 1520281, 1472558, 1224260, 1388403, 1638769, 1348011, 1429765, 1354548, 1519951, 1422919, 1578343, 1231445, 1471065, 1491626, 1358801, 1537156, 1336050, 1415116, 1468126, 1505760, 1323990, 1383735, 1480464, 1547572, 1141971, 1498470, 1561263, 1341201, 1497683, 1358081, 1587293, 1492672, 1439896, 1139291, 1499009, 1427478, 1402287, 1339500, 1527299, 1343002, 1587250, 1464921, 1483527, 1231456, 1364537, 889412]
size_video3 = [1034108, 957685, 877771, 933276, 996749, 801058, 905515, 1060487, 852833, 913888, 939819, 917428, 946851, 1036454, 821631, 923170, 966699, 885714, 987708, 923755, 891604, 955231, 968026, 874175, 897976, 905935, 1076599, 758197, 972798, 975811, 873429, 954453, 885062, 1035329, 1026056, 943942, 728962, 938587, 908665, 930577, 858450, 1025005, 886255, 973972, 958994, 982064, 830730, 846370, 598850]
size_video4 = [668286, 611087, 571051, 617681, 652874, 520315, 561791, 709534, 584846, 560821, 607410, 594078, 624282, 687371, 526950, 587876, 617242, 581493, 639204, 586839, 601738, 616206, 656471, 536667, 587236, 590335, 696376, 487160, 622896, 641447, 570392, 620283, 584349, 670129, 690253, 598727, 487812, 575591, 605884, 587506, 566904, 641452, 599477, 634861, 630203, 638661, 538612, 550906, 391450]
size_video5 = [450283, 398865, 350812, 382355, 411561, 318564, 352642, 437162, 374758, 362795, 353220, 405134, 386351, 434409, 337059, 366214, 360831, 372963, 405596, 350713, 386472, 399894, 401853, 343800, 359903, 379700, 425781, 277716, 400396, 400508, 358218, 400322, 369834, 412837, 401088, 365161, 321064, 361565, 378327, 390680, 345516, 384505, 372093, 438281, 398987, 393804, 331053, 314107, 255954]
size_video6 = [181801, 155580, 139857, 155432, 163442, 126289, 153295, 173849, 150710, 139105, 141840, 156148, 160746, 179801, 140051, 138313, 143509, 150616, 165384, 140881, 157671, 157812, 163927, 137654, 146754, 153938, 181901, 111155, 153605, 149029, 157421, 157488, 143881, 163444, 179328, 159914, 131610, 124011, 144254, 149991, 147968, 161857, 145210, 172312, 167025, 160064, 137507, 118421, 112270]
'''
size_video1=[1756806,3091206,2210154,1845731,1817275,2069902,2117640,2061264,2237179,2132441,2044975,3789197,3250223,2487213,2149619,1765477,
2505293,2673223,2084351,2069989,1855189,2478422,2580412,2065841,2585352,1351167,1398486,1725385,2897186,4738096,1670320,1756062,
3048206,4866144,1843384,1584205,1884317,1858789,1038538,798577,2117675,2528940,1398909,3205655,2983891,2201743,2366969,2553838,
1501437,1267022,1644497,1367567,1203298,3427696,1968656,3096706,2066317,2634682,1694746,1434945,3173242,1693021,1682424,2113373,
3103217,2462552,2256905,2226073,1980055,2037901,2470135,2128194,2434345,1714265,1330462,2102803,1015863,865084,1634635,1229781,
1227461,1383375,1572941,1624467,1260343,2100804,1782111,3028204,1845902,1283138,1529032,1782594,1613129,1621860,1702228,1935782,
1908470,1820040,1542276,2025509,1672002,1681633,3771816,5057431,3537995,2803543,3831917,2709325,3627028,2349666,2466424,2215131,
2249004,1704399,1689433,1362005,1565350,2242939,2378241,2021904,2019269,1054954,2328052,2211591,2104177,2280895,1991117,1857563,
2209717,1711273,1293338,1289551,1976534,2284536,1925431,2869117,2469558,1435620,1240014,1811217,2988838,2552354,2365918,2065200,
2555376,1779921,2281000,2856816,2252510,1331370,1565606,1548717,3429310,1957226,1744946,1736383,2170169,2128049,2573352,2262965,
2877128,2632416,2110319,2309152,2087447,2303868,3110829,4470951,4276187,2646215,2596715,1701057,2932345,2622505,2362883,2360631,
3172401,3599259,2951048,1968506,2345232,1739989,1303134,1273197,1463247,1841675,2594747,3307177,1289034,2849319,2067334,1658999,
1451249,2074198,1510216,2665640,2975156,1903333,3534501,4269578,4256969,4212087,983135,527732,391690,1403108,1438881,1452557,
1917879,1613934,1871217,1188694,2512090,2858958,1531701,1008854,1003379,1815850,1586467,980482,1439500,2289989,2449550,3404941,
3365845,2830877,4573090,2648680,4028108,5273438,3649905,3386154,2446726,2965108,2245612,1832510,2071735,1755973,2019058,1360561,
1039489,1894295,1999107,1666014,2242594,1746466,2870885,2284279,1714119,2282746,1999381,2436148,1828361,2833893,2132959,1585105,
2275927,2131090,2951419,2197713,2049883,1657043,2195265,2978021,2007940,1712613,1729774,1533013,3056849,3034214,3327704,3120601,
2265234,1983515,2468537,2171814,1750435,1885298,2056222,2409637,1384308,1073859,1993041,2524543,2684433,2749667,1487433,2299203,
1711371,1882897,1979814,2600016,2829907,2024223,2435121,1745680,1733204,2311748,2360093,2962846,2530685,2333345,2573975,2688803,
1674837,2328829,2654846,2177220,1983637,1826992,1554600,1742047,1015182,1327517,1392909,1997961,2777906,2151277,1385355,1841831,
2576036,2248077,1670266,1921688,2513568,2592109,1866077,2254994,3076104,2892882,2637278,2258700,1223635,905654,900966,532695,
678430,1684441,1272715,1174559,1071726,1261171,1574531,1726304,1393375,1612197,1577541,1178594,1331352,1471475,1258708,1417142,
1337069,1753784,3098761,1712958,1487216,1749591,2094655,1655374,1838915,1632130,4455112,1103313,4325538,4260027,3363232,1966800,
2387229,2734086,2389536,2457011,2795839,2917015,2516264,2127460,2593348,3241121,3966814,3003788,1984507,2589085,2196063,1610600,
1378770,2396778,1976157,1717434,669393,1027820,1375132,1464032,1326640,1729066,1534541,1787945,2596315,3393474,2786962,3161567,
2753054,2801599,3086005,2440861,3156653,4016406,3399126,3785131,4186971,3408842,2612351,2792930,2184320,1364863,1291497,958698,
1640227,1815859,1795500,2069010,2016002,1406199,1373710,1718790,980021,862871,990244,1247321,1934872,1727416,1281950,1283997,
2167162,1437622,911988,1208836,1855819,1746139,2142901,3077141,2097075,1667617,2375729,1176383,1534788,2019092,1649060,1119606,
2066820]
size_video2=[1248166,1909948,1437354,1206293,1202036,1374260,1394562,1352039,1499553,1420399,1360662,2352325,2206383,1618768,
1455386,1204706,1713574,1864652,1448970,1398569,1293903,1662378,1778570,1405415,1767145,754576,789631,1047145,1830919,3283497,
1110502,1143921,2082236,3252018,1219923,1071692,1295207,1266141,656576,503078,1354183,1699087,927720,2208172,2011759,1494987,
1602807,1716281,996382,808047,975928,884332,755695,2258444,1301747,2091230,1441438,1791927,1142314,948784,2118602,1134808,1088077,
1419506,2094634,1666971,1573121,1445975,1315146,1393944,1676874,1438847,1587400,1082750,855365,1309596,616101,522811,1009092,
755328,744447,856311,990560,994122,741287,1350804,1149553,2095051,1184299,762583,968586,1179001,1003173,998496,1057590,1243591,
1237504,1117387,937314,1261624,1166183,1171457,2696482,3460711,2432287,1831251,2639863,1888769,2576440,1610171,1708230,1492094,
1538209,1132001,1123038,874553,1004636,1426699,1544177,1349606,1360880,645082,1354293,1398892,1451433,1504901,1328553,1263252,
1509891,1153670,855640,864167,1392355,1511324,1301036,1948238,1647259,955411,816968,1185012,2007860,1648783,1522896,1335718,
1707248,1085428,1457959,1994052,1475727,828972,948348,933982,2382507,1225258,1097507,1118835,1448416,1390061,1695141,1496810,
1954410,1774003,1366911,1524592,1368957,1501570,2095420,3114760,2838416,1502515,1694876,1053663,2100929,1903225,1667629,1663218,
2248474,2551140,2051397,1347603,1626107,1164880,871909,857484,973494,1264289,1741906,2304449,845899,1950152,1361535,1096620,
956379,1374366,979791,1713882,1980346,1253742,2331705,2782848,2771738,2807548,644361,352430,247261,924748,983983,978337,1273457,
1072491,1233180,753303,1719760,1976297,1020941,643472,632199,1212648,1033471,622503,954344,1418860,1581120,2280953,2273723,
1722839,3004290,1786110,2762113,3508086,2471169,2290623,1631933,2022588,1501694,1221686,1392053,1162530,1350142,916630,692591,
1272848,1376995,1130650,1511110,1188451,1956043,1553905,1190117,1536041,1334153,1620445,1229638,1904189,1437879,1043343,1484736,
1389038,1962114,1379569,1348907,1083199,1464620,1986660,1331590,1086919,1129684,1020726,2049670,2077307,2244912,2092287,1502555,
1329093,1638317,1432601,1186820,1259056,1378272,1592067,894118,702494,1328338,1707818,1858005,1814721,965118,1491287,1130946,
1245095,1297373,1761282,1887826,1337368,1614799,1121034,1145238,1497043,1606601,2025110,1710529,1583480,1723662,1810776,1113208,
1547386,1774950,1421925,1206322,1187183,1004007,1147471,676151,894621,880733,1266385,1848743,1457129,887321,1185256,1683346,
1454053,1091702,1298560,1702106,1712364,1162421,1518078,2105991,1963481,1783520,1462072,721990,579786,589643,344866,427515,
1117244,806288,741042,675112,787869,1011434,1126209,885267,1055611,1018506,773227,870077,912214,776772,883886,862865,1150468,
2067548,1099289,945530,1150026,1362064,1050127,1197301,1075450,2836687,702922,2875327,2778004,2245324,1287876,1575207,1779274,
1563888,1703575,1879597,1981220,1706876,1336949,1679947,2160617,2693480,2009306,1332161,1758489,1457012,1054975,926778,1589787,
1315164,1139932,406770,664625,936523,928176,835472,1167407,994739,1185573,1740000,2319760,1837859,2103152,1854032,1873751,2125146,
1614715,2116308,2777412,2292582,2515009,2837060,2395144,1790486,1913686,1448776,902340,828891,617586,1081453,1195033,1179707,
1339413,1300244,935908,880962,1098413,618451,537171,620261,773863,1240249,1093356,802481,790748,1415323,837047,545014,773276,
1225405,1133886,1437142,2045825,1351366,1027020,1495764,704275,989618,1287214,1087634,718747,1318691]
size_video3=[846285,1168830,924155,782361,776921,896171,904410,867529,987852,931970,884019,1388977,1386547,1061921,985293,802234,
1169255,1286193,995130,938950,889120,1097258,1199443,945496,1179962,430164,436726,624519,1113671,2138958,731588,732163,1371730,
2110792,788301,712212,865112,846544,419881,323168,854227,1103578,603737,1462476,1328702,995325,1062304,1130531,658362,515203,
564263,576357,481669,1439148,831514,1345162,986175,1206557,761735,621067,1371358,739751,691765,948480,1381127,1093177,1075045,
921199,858138,939164,1113522,952278,1002220,678313,545154,794368,368560,322602,627633,465516,454419,527661,617205,601121,418400,
838302,720424,1421671,743692,444613,590983,767637,605102,586040,629895,773906,770305,679673,553179,767895,798336,717269,1749944,
2232941,1630935,1191422,1750938,1335785,1831757,1108036,1190875,1006044,1040709,746704,736186,559337,646623,884342,996032,902282,
898520,388061,706020,837590,997771,984903,869629,841845,1003621,765322,549112,567129,962434,983686,849944,1297068,1068550,630926,
534534,753751,1297143,1033674,972729,860044,1146757,643290,916479,1371688,950221,503853,565079,558122,1579179,764904,684818,714375,
958026,897292,1095530,976392,1284670,1157384,849960,983202,885117,949242,1378747,2093615,1794015,892920,1070196,636000,1427417,
1358293,1161687,1148764,1556485,1755196,1391857,901239,1101441,767029,575457,571960,640246,852139,1153342,1551623,552146,1303983,
884697,728329,631483,890909,629541,1057592,1264644,812359,1495774,1802682,1794299,1809999,421592,234510,162002,598631,660455,
650412,831883,704816,796782,469916,1141450,1332339,673944,405808,393579,790772,668101,391316,620897,855778,987162,1437210,1494618,
1000189,1977624,1160710,1853267,2272158,1620476,1512714,1065616,1349832,985649,800298,916009,747151,878787,611733,458891,824552,
936781,763908,1005463,805397,1309198,1027202,824776,1018133,878999,1059264,816116,1245755,950480,675165,934743,881605,1262539,
836769,868241,689535,960324,1290799,875221,677750,719309,673009,1332185,1381609,1467929,1364835,972063,879023,1062308,925128,
796868,822789,917077,1038227,572879,460030,870647,1135715,1267450,1170787,608866,932475,718075,794316,835131,1173614,1226376,
873792,1039123,698256,744176,962960,1076340,1357311,1134278,1063750,1129502,1193512,731147,1008405,1172782,916351,714909,746975,
628955,733798,452985,599131,547008,788141,1187992,947166,556402,745185,1072325,919245,703608,867170,1130427,1110818,720520,1007762,
1397415,1311440,1185457,919927,415043,381670,384138,221070,272611,721164,508382,463087,418721,476494,636457,721220,555097,676089,
633209,496792,565895,553631,472079,531680,549381,738800,1333841,682133,579828,733952,859037,656064,756593,693793,1828137,431863,
1810452,1836670,1447052,837477,1007940,1130632,997037,1164277,1231827,1316193,1135411,817342,1051188,1391898,1762282,1306967,
877949,1172156,944666,677181,614653,1029902,861520,751279,251924,434194,637408,585673,517743,779377,624265,767662,1141932,1552512,
1182714,1350835,1216575,1221492,1437167,1047801,1352884,1866550,1498852,1594916,1933364,1666636,1216493,1299406,946556,587152,
523357,398282,698490,768546,747186,839672,816283,609526,551500,685818,385510,332617,384081,472836,784876,681576,495325,478054,
910864,486727,327909,490384,787676,714464,934579,1322102,836378,608941,898288,419176,631361,777189,710660,463377,848825]
size_video4=[547035,706404,596043,524098,504228,582524,590858,552807,649725,609806,581924,835167,856359,720885,648993,549888,798544,
890208,680375,621228,612247,714936,801526,636640,781633,258480,256981,381833,668878,1316285,483083,470324,887319,1346096,488576,
476883,574255,560775,275294,213942,544631,721930,394905,956401,866807,668112,707053,752293,439005,328990,332676,381240,315599,
905000,536920,856841,676222,814761,511744,406110,872426,478738,441067,638082,902857,705191,735017,597647,564153,640146,744700,
634426,622405,429916,348023,473333,223233,207060,398798,297699,289124,338019,386894,376068,247323,529278,458771,954008,469848,
268451,367008,510493,384351,336696,365757,469230,466878,397890,306208,392038,480889,427503,1061331,1462570,1107397,788212,1201905,
958934,1296354,764232,834022,684159,703462,494688,476757,358278,421053,547982,651712,605673,604769,247633,362988,500301,679289,
636811,569262,554524,657393,500344,353603,370888,654913,640820,555403,854536,682544,425652,353977,482904,831613,646249,623250,
570778,781138,395629,591756,919672,608636,315279,348908,341251,1028395,493213,433388,461614,633669,582445,710571,635445,829185,
740760,520948,625161,572429,587024,885619,1366909,1096009,549068,693014,384613,967739,961765,802806,786390,1063204,1193221,938432,
594814,738128,514183,385394,386211,419937,569630,759702,1035614,363332,867267,584199,495296,418710,579747,407271,643695,793432,
532780,953519,1181184,1173164,1150240,278260,158326,109243,391560,447495,432372,541903,462974,514903,297437,746687,889772,446977,
261064,245091,514842,433432,248997,401709,510992,623671,875583,954252,565854,1282428,760254,1230934,1471145,1041466,1007408,
700685,908906,647372,531923,604648,480567,571680,415481,311725,528791,648577,526915,676767,544984,877852,681274,584479,682400,
587249,697584,541523,819236,635454,439248,575534,558134,795960,507237,560309,435884,630696,842280,584377,418701,452008,447495,
855620,910486,955619,874290,634816,588917,688253,601008,545601,546370,622967,696809,377403,307085,582646,767567,881993,759744,
380057,569142,450995,500151,533009,787180,796757,579408,665424,428991,486141,634709,724968,910350,755342,723301,744499,791097,
486696,650661,775896,589564,417632,460207,386577,461058,309090,401728,335814,488570,758867,599018,354581,449831,677583,583268,
452635,579431,752699,725899,457825,661835,924337,879308,792148,572914,236078,252664,248583,143285,173576,464535,323435,290071,
259483,286196,396866,459208,346403,429612,379429,317461,373328,334657,285622,316216,347387,474325,846736,421261,358587,460670,
540837,418151,473605,443747,1142146,266099,1139106,1226865,912006,544488,637168,726559,633507,783324,803464,874546,749552,490660,
644883,880869,1134430,839081,575502,778336,608858,437231,411106,666015,563343,500243,160495,290749,441946,380307,327141,528851,
386873,499151,742431,1004036,756402,854695,798836,797035,965829,672367,837390,1234139,962167,972983,1314591,1183313,847271,900132,
623507,383196,331639,259707,448397,491216,470078,535948,506772,404948,343057,429095,241972,208979,237532,289286,502020,428997,
308660,291778,588501,298147,204497,313212,504692,445722,619353,831848,511452,357941,535866,252048,403999,477594,454970,301303,551953]
size_video5=[323113,418441,382004,337817,318822,366200,363903,346976,404249,383861,369141,500281,492772,467460,412406,364336,530546,
595068,453373,400416,406242,447605,508492,416723,492336,153985,149450,221825,389137,790219,302059,288733,540456,825815,285915,
304614,354511,356853,174974,139405,344879,446520,249322,594647,540016,434577,456950,491623,284629,206793,194787,245465,201172,
543139,328951,533104,446793,532154,333255,259306,513006,294784,273182,414589,562032,426081,491024,375053,356030,434816,485000,
415484,363173,267232,217152,268349,130234,129844,244414,183197,181289,211852,230048,232458,147458,323339,286466,621150,292710,
157388,224852,330448,244658,189794,208443,272864,272767,219585,160716,199810,281265,234643,623111,905443,715137,496016,757193,
653100,866715,509267,565709,439095,458179,317013,299723,220237,265702,326004,406891,398108,396428,161148,189747,289152,438311,
391808,350823,342642,404291,312421,215746,231048,419638,401633,350467,540680,413555,274948,226952,298374,504645,399332,385815,
376112,518000,240102,380381,592007,379115,193082,217973,203101,629581,312102,266984,289355,406154,364723,444534,405512,503590,
445920,301669,381944,350196,336701,533864,849909,638562,325653,440403,227952,636997,657734,525502,518535,689114,782104,610917,
376978,476526,340219,251135,252753,267845,366877,470621,673027,231795,555250,369340,329086,269267,364173,255834,373785,469492,
336207,598436,747522,744086,688734,182335,102883,71090,251738,294267,277818,338245,291138,317642,182073,467537,572660,290618,
163813,145742,323299,269998,151414,247136,299386,379185,511734,583799,309771,794744,474007,777870,916062,639704,663002,444759,
596148,410568,350269,389119,296238,363553,277452,211307,324543,445667,365955,459618,364370,580715,438804,412688,454548,384954,
449872,351636,532810,418362,278056,331408,337389,468421,287027,339677,265929,405248,543069,387402,240196,265294,288498,506694,
574841,596509,526249,403785,389295,440901,377555,376321,360924,424678,470015,246729,206103,391925,514724,604960,481393,227540,
320553,265336,292954,332903,526009,509974,379518,402580,247420,307887,399296,490999,599427,493224,485382,474936,511692,327348,
403054,509642,370452,220414,268766,223824,273431,210187,260215,194459,282781,458374,361910,222321,254376,398627,353268,277424,
373952,485170,458908,283968,415847,594244,581598,513771,336212,123056,159415,152039,84419,96964,296357,197550,174412,150205,
163490,234384,276420,206155,251134,207262,189865,234699,190492,162133,172192,208515,294919,506806,243271,208423,266189,317494,
252397,272579,266038,626921,160573,687288,805076,516668,334312,382256,432601,382803,509989,497589,559731,472280,271315,372954,
517170,690202,505692,358051,497198,379108,274271,264254,417412,356246,329139,100180,192502,302659,248706,201499,350511,223655,
308401,454270,637270,464928,511545,498959,503850,626394,410515,466441,761200,580059,554024,831652,823388,590577,625131,404481,
242549,205265,168423,277268,309949,278503,325049,292610,262838,201999,257126,143254,124497,137758,167697,308527,256226,182915,
168765,363172,179420,124656,188561,300983,262333,396335,493415,295359,207622,306053,145571,246429,285851,275563,186508,346649]
size_video6=[122566,141690,156437,151455,131958,141687,134848,143568,169611,155749,144962,187567,189741,191607,169931,160854,236280,
279955,203736,174786,193874,187167,207081,196253,203820,58306,52004,70463,114188,248094,120126,105738,187819,288450,107224,132126,
132775,150099,72040,63120,142264,179063,113063,237672,222641,210179,206024,226841,125166,86270,76277,111752,86103,186977,124488,
195494,209856,232665,151864,114023,181418,107522,111914,191996,231947,145572,228523,165245,154746,217987,232697,199480,132247,
114355,92243,101533,44432,46959,92051,64667,69881,82966,70706,91967,52126,115033,106804,257487,110329,52198,86248,137809,98071,
59563,67579,89812,87619,65049,51508,66553,97090,69339,218786,350602,282395,196655,294150,274147,350502,229885,264751,188592,194004,
138597,129254,90055,113934,119577,163598,176947,176958,64953,63686,94317,174842,133878,119038,116797,143402,114567,79187,85619,
158887,158149,136588,211814,149475,111228,90166,110685,182666,164383,153601,193728,240841,89363,172541,249048,155912,72714,96738,
76146,210967,138516,104483,112952,166011,143486,173754,163990,184907,157542,102142,138713,132187,103266,186551,302474,233690,114527,
183684,86990,275527,303484,247110,243197,306068,333494,259092,161551,219694,163689,115479,115867,110157,165717,206413,316094,106605,
258595,167706,161871,126251,164223,106360,140197,171683,142022,226802,274115,317194,289925,80931,38396,28340,124143,139033,128434,
145168,122302,127194,68553,208520,246036,119157,62046,49114,123744,104524,56056,81724,107806,129717,178197,219082,87764,309996,
175234,291302,381763,260114,311747,197184,285496,184984,171407,180922,127859,167708,142347,108401,127627,229023,194597,231589,
188967,293808,207290,225385,222372,182989,208632,165647,262519,198122,119059,136057,151258,207737,126195,142675,116189,196934,
273298,169687,80087,89952,116953,203808,258544,276055,251654,191358,176143,185613,174725,183381,183890,208329,222059,115871,103659,
194619,263618,323870,232819,101175,148358,120409,137639,169775,286516,266060,186239,185178,111048,131835,191865,248460,308506,
263337,268120,252697,279984,174154,193877,250368,165544,97614,128553,106663,133692,98249,131557,84157,120094,191725,157144,106115,
103896,189100,153325,105096,185534,243798,242423,135512,204760,313395,292357,286477,158682,36035,72722,58693,21160,29201,149424,
93095,73211,52395,60533,84569,100012,78060,95461,63814,66318,90387,64036,46982,48426,64363,108625,183411,70708,64343,82518,105266,
82540,70162,71644,64605,51629,207652,169915,122208,106258,133986,162789,140802,190933,160253,206255,174223,70660,113933,173128,
261541,173884,115544,179952,131746,92096,84877,151907,131972,127129,27791,55798,115167,97179,63504,113963,41194,72340,149359,
210948,145277,142456,148052,171092,235134,102985,129884,278803,214629,183098,306658,352088,282790,309863,185129,100329,81350,
64536,120000,135855,104350,136764,97760,99442,67417,84531,36782,30662,33807,40182,96727,72553,43191,38019,107349,45983,30115,
45931,84315,65096,123915,152798,77492,43261,76665,36196,69589,62195,61628,33154,80528]
def get_chunk_size(quality, index):
if ( index < 0 or index > 448 ):
return 0
# note that the quality and video labels are inverted (i.e., quality 8 is highest and this pertains to video1)
sizes = {5: size_video1[index], 4: size_video2[index], 3: size_video3[index], 2: size_video4[index], 1: size_video5[index], 0: size_video6[index]}
return sizes[quality]
class my_socketserver(SocketServer.ThreadingTCPServer):
allow_reuse_address=True
daemon_threads=True
def __init__(self,server_address,RequestHandlerClass):
SocketServer.ThreadingTCPServer.__init__(self,server_address,RequestHandlerClass)
def make_request_handler(input_dict):
print('make req hndlr')
class Request_Handler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):#args: <socketobject>,('IP',port),<BaseHTTPServer>
#print('init')
self.input_dict = input_dict
self.log_file = input_dict['log_file']
#self.saver = input_dict['saver']
self.s_batch = input_dict['s_batch']
#self.a_batch = input_dict['a_batch']
#self.r_batch = input_dict['r_batch']
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
#print('init base class')
########### the broken pipe error ######## not debug yet
def handle(self):
try:
BaseHTTPRequestHandler.handle(self)
except socket.error:
pass
############################################
def do_POST(self):
#print('do_post')
global Que1
content_length = int(self.headers['Content-Length'])
post_data = json.loads(self.rfile.read(content_length))
#print post_data
if ( 'pastThroughput' in post_data ):
# @Hongzi: this is just the summary of throughput/quality at the end of the load
# so we don't want to use this information to send back a new quality
print "Summary: ", post_data
elif('heartbeat' in post_data):
if self.client_address[0] not in Que1:
Que1.append(self.client_address[0])
#print('Que1',Que1[:])
#print self.client_address
send_data="receive hb"
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', len(send_data))
self.send_header('Access-Control-Allow-Origin', "*")
self.end_headers()
self.wfile.write(send_data)
else:
########## Algorithm write here! Now you get all info! #########
global begin_time
t = float(time.time() - begin_time)
q = int(post_data['lastquality'])
global Dict1,Dict2
if self.client_address[0] in Dict1.keys():
tmp=Dict1[self.client_address[0]]
tmp.append(t)
Dict1[self.client_address[0]]=tmp
tmp=Dict2[self.client_address[0]]
tmp.append(q)
Dict2[self.client_address[0]]=tmp
else:
Dict1[self.client_address[0]]=[t]
Dict2[self.client_address[0]]=[q]
#print(Dict1[self.client_address[0]],Dict2[self.client_address[0]])
global DictOnline,QueOnline
for k in DictOnline:
if k not in QueOnline[:]:
DictOnline.pop(k)
DictOnline[self.client_address[0]]=q
# option 1. reward for just quality
# reward = post_data['lastquality']
# option 2. combine reward for quality and rebuffer time
# tune up the knob on rebuf to prevent it more
# reward = post_data['lastquality'] - 0.1 * (post_data['RebufferTime'] - self.input_dict['last_total_rebuf'])
# option 3. give a fixed penalty if video is stalled
# this can reduce the variance in reward signal
# reward = post_data['lastquality'] - 10 * ((post_data['RebufferTime'] - self.input_dict['last_total_rebuf']) > 0)
# option 4. use the metric in SIGCOMM MPC paper
rebuffer_time = float(post_data['RebufferTime'] -self.input_dict['last_total_rebuf'])
# --linear reward--
reward = VIDEO_BIT_RATE[post_data['lastquality']] / M_IN_K \
- REBUF_PENALTY * rebuffer_time / M_IN_K \
- SMOOTH_PENALTY * np.abs(VIDEO_BIT_RATE[post_data['lastquality']] -
self.input_dict['last_bit_rate']) / M_IN_K
# --log reward--
# log_bit_rate = np.log(VIDEO_BIT_RATE[post_data['lastquality']] / float(VIDEO_BIT_RATE[0]))
# log_last_bit_rate = np.log(self.input_dict['last_bit_rate'] / float(VIDEO_BIT_RATE[0]))
# reward = log_bit_rate \
# - 4.3 * rebuffer_time / M_IN_K \
# - SMOOTH_PENALTY * np.abs(log_bit_rate - log_last_bit_rate)
# --hd reward--
# reward = BITRATE_REWARD[post_data['lastquality']] \
# - 8 * rebuffer_time / M_IN_K - np.abs(BITRATE_REWARD[post_data['lastquality']] - BITRATE_REWARD_MAP[self.input_dict['last_bit_rate']])
self.input_dict['last_bit_rate'] = VIDEO_BIT_RATE[post_data['lastquality']]
self.input_dict['last_total_rebuf'] = post_data['RebufferTime']
# retrieve previous state
if len(self.s_batch) == 0:
state = [np.zeros((S_INFO, S_LEN))]
else:
state = np.array(self.s_batch[-1], copy=True)
# compute bandwidth measurement
video_chunk_fetch_time = post_data['lastChunkFinishTime'] - post_data['lastChunkStartTime']
video_chunk_size = post_data['lastChunkSize']
# compute number of video chunks left
video_chunk_remain = TOTAL_VIDEO_CHUNKS - self.input_dict['video_chunk_coount']
self.input_dict['video_chunk_coount'] += 1
# dequeue history record
state = np.roll(state, -1, axis=1)
# this should be S_INFO number of terms
try:
state[0, -1] = VIDEO_BIT_RATE[post_data['lastquality']] / float(np.max(VIDEO_BIT_RATE))
state[1, -1] = post_data['buffer'] / BUFFER_NORM_FACTOR
state[2, -1] = rebuffer_time / M_IN_K
state[3, -1] = float(video_chunk_size) / float(video_chunk_fetch_time) / M_IN_K # kilo byte / ms
state[4, -1] = np.minimum(video_chunk_remain, CHUNK_TIL_VIDEO_END_CAP) / float(CHUNK_TIL_VIDEO_END_CAP)
curr_error = 0 # defualt assumes that this is the first request so error is 0 since we have never predicted bandwidth
if ( len(past_bandwidth_ests) > 0 ):
curr_error = abs(past_bandwidth_ests[-1]-state[3,-1])/float(state[3,-1])
past_errors.append(curr_error)
except ZeroDivisionError:
# this should occur VERY rarely (1 out of 3000), should be a dash issue
# in this case we ignore the observation and roll back to an eariler one
past_errors.append(0)
if len(self.s_batch) == 0:
state = [np.zeros((S_INFO, S_LEN))]
else:
state = np.array(self.s_batch[-1], copy=True)
# log wall_time, bit_rate, buffer_size, rebuffer_time, video_chunk_size, download_time, reward
self.log_file.write(str(time.time()) + '\t' +
str(VIDEO_BIT_RATE[post_data['lastquality']]) + '\t' +
str(post_data['buffer']) + '\t' +
str(rebuffer_time / M_IN_K) + '\t' +
str(video_chunk_size) + '\t' +
str(video_chunk_fetch_time) + '\t' +
str(reward) + '\n')
self.log_file.flush()
# pick bitrate according to MPC
# first get harmonic mean of last 5 bandwidths
past_bandwidths = state[3,-5:]
while past_bandwidths[0] == 0.0:
past_bandwidths = past_bandwidths[1:]
#if ( len(state) < 5 ):
# past_bandwidths = state[3,-len(state):]
#else:
# past_bandwidths = state[3,-5:]
bandwidth_sum = 0
for past_val in past_bandwidths:
bandwidth_sum += (1/float(past_val))
harmonic_bandwidth = 1.0/(bandwidth_sum/len(past_bandwidths))
# future bandwidth prediction
# divide by 1 + max of last 5 (or up to 5) errors
max_error = 0
error_pos = -5
if ( len(past_errors) < 5 ):
error_pos = -len(past_errors)
max_error = float(max(past_errors[error_pos:]))
future_bandwidth = harmonic_bandwidth/(1+max_error)
past_bandwidth_ests.append(harmonic_bandwidth)
# future chunks length (try 4 if that many remaining)
last_index = int(post_data['lastRequest'])
future_chunk_length = MPC_FUTURE_CHUNK_COUNT
if ( TOTAL_VIDEO_CHUNKS - last_index < 5 ):
future_chunk_length = TOTAL_VIDEO_CHUNKS - last_index
# all possible combinations of 5 chunk bitrates (9^5 options)
# iterate over list and for each, compute reward and store max reward combination
max_reward = -100000000
best_combo = ()
start_buffer = float(post_data['buffer'])
#start = time.time()
for full_combo in CHUNK_COMBO_OPTIONS:
combo = full_combo[0:future_chunk_length]
# calculate total rebuffer time for this combination (start with start_buffer and subtract
# each download time and add 2 seconds in that order)
curr_rebuffer_time = 0
curr_buffer = start_buffer
bitrate_sum = 0
smoothness_diffs = 0
last_quality = int(post_data['lastquality'])
for position in range(0, len(combo)):
chunk_quality = combo[position]
index = last_index + position + 1 # e.g., if last chunk is 3, then first iter is 3+0+1=4
download_time = (get_chunk_size(chunk_quality, index)/1000000.)/future_bandwidth # this is MB/MB/s --> seconds
if ( curr_buffer < download_time ):
curr_rebuffer_time += (download_time - curr_buffer)
curr_buffer = 0
else:
curr_buffer -= download_time
curr_buffer += 4
# linear reward
bitrate_sum += VIDEO_BIT_RATE[chunk_quality]
smoothness_diffs += abs(VIDEO_BIT_RATE[chunk_quality] - VIDEO_BIT_RATE[last_quality])
# log reward
# log_bit_rate = np.log(VIDEO_BIT_RATE[chunk_quality] / float(VIDEO_BIT_RATE[0]))
# log_last_bit_rate = np.log(VIDEO_BIT_RATE[last_quality] / float(VIDEO_BIT_RATE[0]))
# bitrate_sum += log_bit_rate
# smoothness_diffs += abs(log_bit_rate - log_last_bit_rate)
# hd reward
#bitrate_sum += BITRATE_REWARD[chunk_quality]
#smoothness_diffs += abs(BITRATE_REWARD[chunk_quality] - BITRATE_REWARD[last_quality])
last_quality = chunk_quality
# compute reward for this combination (one reward per 5-chunk combo)
# bitrates are in Mbits/s, rebuffer in seconds, and smoothness_diffs in Mbits/s
# linear reward
reward = (bitrate_sum/1000.) - (4.3*curr_rebuffer_time) - (smoothness_diffs/1000.)
# log reward
# reward = (bitrate_sum) - (4.3*curr_rebuffer_time) - (smoothness_diffs)
# hd reward
#reward = bitrate_sum - (8*curr_rebuffer_time) - (smoothness_diffs)
# Additional portion from multiagent mechanism
# avg_btr=0
# adjust_weight=0.02
# for (k,v) in DictOnline.items():
# avg_btr+=VIDEO_BIT_RATE[v]
# if len(DictOnline)!=0:
# avg_btr/=len(DictOnline)
# r_=combo[0]-avg_btr # + when hd
# reward-=adjust_weight*r_
# #################
if ( reward > max_reward ):
max_reward = reward
best_combo = combo
#print (best_combo[0],max_reward)
# send data to html side (first chunk of best combo)
send_data = 0 # no combo had reward better than -1000000 (ERROR) so send 0
if ( best_combo != () ): # some combo was good
send_data = str(best_combo[0])#combo:(5,4,3,2,5),combo[0]:5
end = time.time()
#print "TOOK: " + str(end-start)
end_of_video = False
if ( post_data['lastRequest'] == TOTAL_VIDEO_CHUNKS ):
send_data = "REFRESH"
end_of_video = True
self.input_dict['last_total_rebuf'] = 0
self.input_dict['last_bit_rate'] = DEFAULT_QUALITY
self.input_dict['video_chunk_coount'] = 0
self.log_file.write('\n') # so that in the log we know where video ends
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', len(send_data))
self.send_header('Access-Control-Allow-Origin', "*")
self.end_headers()
self.wfile.write(send_data)
# record [state, action, reward]
# put it here after training, notice there is a shift in reward storage
if end_of_video:
self.s_batch = [np.zeros((S_INFO, S_LEN))]
else:
self.s_batch.append(state)
def do_GET(self):
print('do_get')
print >> sys.stderr, 'GOT REQ'
self.send_response(200)
#self.send_header('Cache-Control', 'Cache-Control: no-cache, no-store, must-revalidate max-age=0')
self.send_header('Cache-Control', 'max-age=3000')
self.send_header('Content-Length', 20)
self.end_headers()
self.wfile.write("console.log('here');")
def log_message(self, format, *args):
return
return Request_Handler
###### onlineCheck #######
def onlineCheck(Que1_,Que2_,QueOL):
while True:
#print('updateQue')
updateQue(Que1_,Que2_,QueOL)
global Dict1,Dict2,MultiClientState,begin_time
f=open("OLlist.json",'r')
fcntl.flock(f,fcntl.LOCK_EX)
try:
MultiClientState=json.load(f)
print(MultiClientState)
except:
MultiClientState={}
for ip in MultiClientState.keys():
if int(time.time())-MultiClientState[ip][0]-10>0:
MultiClientState.pop(ip)
tmp={}
try:
tmp[QueOL[:][0]]=[time.time(),max(max(Dict2.values()))]
except:
pass
MultiClientState.update(tmp)
print(MultiClientState)
fcntl.flock(f,fcntl.LOCK_UN)
f.close()
f=open("OLlist.json",'w')
fcntl.flock(f,fcntl.LOCK_EX)
json.dump(MultiClientState,f)
fcntl.flock(f,fcntl.LOCK_UN)
f.close()
plot(Dict1,Dict2)
time.sleep(5)
def updateQue(Que1_,Que2_,QueOL):
#print('_Que1',Que1_[:])
#print('_Que2',Que2_[:])
#print('_QueOnline',QueOL[:])
QueOL[:]=Que1_[:]+[item for item in Que2_[:] if item not in Que1_[:]]
Que2_[:]=copy.copy(Que1_[:])
Que1_[:]=[]
#print('Que1_',Que1_[:])
#print('Que2_',Que2_[:])
print('QueOnline_',QueOL[:])
##########################
###########plot###########
def plot(Dictt,Dictq):
color_ = ['black', 'red', 'blue', 'green', 'gold', 'm']
c=0
for k in Dictt.keys():
plt.plot(Dictt[k], Dictq[k], color=color_[c%6])
#print(Dictt[k],Dictq[k])
plt.scatter(Dictt[k], Dictq[k], color=color_[c%6])
plt.title("MPC_1")
plt.axis([-1,max(Dictt[k])*1.1,0,6])
c=c+1
plt.pause(1)
##########################
def run(server_class=HTTPServer, port=8333, log_file_path=LOG_FILE):#port=8333 also needs to change in dash.all.min.js
np.random.seed(RANDOM_SEED)
if not os.path.exists(SUMMARY_DIR):
os.makedirs(SUMMARY_DIR)
# make chunk combination options
for combo in itertools.product([0,1,2,3,4,5], repeat=5):#produce 6^5 samples like (5,5,3,3,1)
CHUNK_COMBO_OPTIONS.append(combo)
with open(log_file_path, 'wb') as log_file:
s_batch = [np.zeros((S_INFO, S_LEN))]
last_bit_rate = DEFAULT_QUALITY
last_total_rebuf = 0
# need this storage, because observation only contains total rebuffering time
# we compute the difference to get
video_chunk_count = 0
input_dict = {'log_file': log_file,
'last_bit_rate': last_bit_rate,
'last_total_rebuf': last_total_rebuf,
'video_chunk_coount': video_chunk_count,
's_batch': s_batch}
# interface to abr_rl server
handler_class = make_request_handler(input_dict=input_dict)
server_address = ('', port)#'localhost'
#httpd = server_class(server_address, handler_class)
httpd = my_socketserver(server_address, handler_class)
print 'Listening on port ' + str(port)
####### onlineCheck ######
global Que1
global Que2
global QueOnline
p = multiprocessing.Process(target=onlineCheck,args=(Que1,Que2,QueOnline))
p.start()
p.deamon = True
##########################
httpd.serve_forever()
def main():
if len(sys.argv) == 2:
trace_file = sys.argv[1]
run(log_file_path=LOG_FILE + '_robustMPC_' + trace_file)
else:
run()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print "Keyboard interrupted."
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 64.626277
| 455
| 0.702794
|
8d4cb18ce2bee1b8d97e6219c898212a80f7b2e2
| 4,263
|
py
|
Python
|
plasma/cli/cli.py
|
bakaoh/plasma-mvp
|
91e04bd4e3a618fde009b27afaef9024b0515c6d
|
[
"Apache-2.0"
] | null | null | null |
plasma/cli/cli.py
|
bakaoh/plasma-mvp
|
91e04bd4e3a618fde009b27afaef9024b0515c6d
|
[
"Apache-2.0"
] | null | null | null |
plasma/cli/cli.py
|
bakaoh/plasma-mvp
|
91e04bd4e3a618fde009b27afaef9024b0515c6d
|
[
"Apache-2.0"
] | null | null | null |
import click
from ethereum import utils
from plasma_core.constants import NULL_ADDRESS
from plasma_core.transaction import Transaction
from plasma_core.utils.utils import confirm_tx
from plasma.client.client import Client
from plasma.client.exceptions import ChildChainServiceError
CONTEXT_SETTINGS = dict(
help_option_names=['-h', '--help']
)
@click.group(context_settings=CONTEXT_SETTINGS)
@click.pass_context
def cli(ctx):
ctx.obj = Client()
def client_call(fn, argz=(), successmessage=""):
try:
output = fn(*argz)
if successmessage:
print(successmessage)
return output
except ChildChainServiceError as err:
print("Error:", err)
print("additional details can be found from the child chain's server output")
@cli.command()
@click.argument('amount', required=True, type=int)
@click.argument('address', required=True)
@click.pass_obj
def deposit(client, amount, address):
client.deposit(amount, address)
print("Deposited {0} to {1}".format(amount, address))
@cli.command()
@click.argument('blknum1', type=int)
@click.argument('txindex1', type=int)
@click.argument('oindex1', type=int)
@click.argument('blknum2', type=int)
@click.argument('txindex2', type=int)
@click.argument('oindex2', type=int)
@click.argument('cur12', default="0x0")
@click.argument('newowner1')
@click.argument('amount1', type=int)
@click.argument('newowner2')
@click.argument('amount2', type=int)
@click.argument('key1')
@click.argument('key2', required=False)
@click.pass_obj
def sendtx(client,
blknum1, txindex1, oindex1,
blknum2, txindex2, oindex2,
cur12,
amount1, newowner1,
amount2, newowner2,
key1, key2):
if cur12 == "0x0":
cur12 = NULL_ADDRESS
if newowner1 == "0x0":
newowner1 = NULL_ADDRESS
if newowner2 == "0x0":
newowner2 = NULL_ADDRESS
# Form a transaction
tx = Transaction(blknum1, txindex1, oindex1,
blknum2, txindex2, oindex2,
utils.normalize_address(cur12),
utils.normalize_address(newowner1), amount1,
utils.normalize_address(newowner2), amount2)
# Sign it
if key1:
tx.sign1(utils.normalize_key(key1))
if key2:
tx.sign2(utils.normalize_key(key2))
client_call(client.apply_transaction, [tx], "Sent transaction")
@cli.command()
@click.argument('key', required=True)
@click.pass_obj
def submitblock(client, key):
# Get the current block, already decoded by client
block = client_call(client.get_current_block)
# Sign the block
block.make_mutable()
normalized_key = utils.normalize_key(key)
block.sign(normalized_key)
client_call(client.submit_block, [block], "Submitted current block")
@cli.command()
@click.argument('blknum', required=True, type=int)
@click.argument('txindex', required=True, type=int)
@click.argument('oindex', required=True, type=int)
@click.argument('key1')
@click.argument('key2', required=False)
@click.pass_obj
def withdraw(client,
blknum, txindex, oindex,
key1, key2):
# Get the transaction's block, already decoded by client
block = client_call(client.get_block, [blknum])
# Create a Merkle proof
tx = block.transaction_set[txindex]
block.merklize_transaction_set()
proof = block.merkle.create_membership_proof(tx.merkle_hash)
# Create the confirmation signatures
confirmSig1, confirmSig2 = b'', b''
if key1:
confirmSig1 = confirm_tx(tx, block.merkle.root, utils.normalize_key(key1))
if key2:
confirmSig2 = confirm_tx(tx, block.merkle.root, utils.normalize_key(key2))
sigs = tx.sig1 + tx.sig2 + confirmSig1 + confirmSig2
client.withdraw(blknum, txindex, oindex, tx, proof, sigs)
print("Submitted withdrawal")
@cli.command()
@click.argument('owner', required=True)
@click.argument('blknum', required=True, type=int)
@click.argument('amount', required=True, type=int)
@click.pass_obj
def withdrawdeposit(client, owner, blknum, amount):
deposit_pos = blknum * 1000000000
client.withdraw_deposit(owner, deposit_pos, amount)
print("Submitted withdrawal")
if __name__ == '__main__':
cli()
| 29.19863
| 85
| 0.688717
|
3a110b86aed7d9bbecbfadfa07d87fc44967fdb0
| 4,098
|
py
|
Python
|
application/app.py
|
mortbauer/webapp
|
e8d70bcf7e20f50a8e5ce7e6132f8b48dc7d150e
|
[
"MIT"
] | null | null | null |
application/app.py
|
mortbauer/webapp
|
e8d70bcf7e20f50a8e5ce7e6132f8b48dc7d150e
|
[
"MIT"
] | null | null | null |
application/app.py
|
mortbauer/webapp
|
e8d70bcf7e20f50a8e5ce7e6132f8b48dc7d150e
|
[
"MIT"
] | null | null | null |
import json
from flask import request, render_template, jsonify, url_for, redirect, g, Response
from sqlalchemy.exc import IntegrityError
from .utils.auth import generate_token, requires_auth, verify_token
from .import models
from .schemas import UserSchema, GroupSchema
from .import constants
from .import app, db, sockets
user_schema = UserSchema()
users_schema = UserSchema(many=True)
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/<path:path>', methods=['GET'])
def any_root_path(path):
return render_template('index.html')
@app.route('/api/current_user')
@requires_auth
def get_current_user():
return jsonify(result=g.current_user)
@app.route("/api/users", methods=["GET"])
@requires_auth
def get_users():
users = models.User.query.filter().all()
result = users_schema.dump(users)
return jsonify(result=result.data)
@app.route("/api/user/<int:user_id>", methods=["GET"])
@requires_auth
def get_user(user_id):
user = models.User.query.filter_by(id=user_id).first()
result = user_schema.dump(user)
return jsonify(result=result.data)
@app.route("/api/user/<int:user_id>", methods=["PUT"])
@requires_auth
def set_usermeta(user_id):
incoming = request.get_json()
user = models.User.query.filter_by(id=user_id).first()
user.username = incoming['username']
user.surname = incoming['surname']
db.session.commit()
return jsonify({'msg':'updated usermeta %s'%user_id})
@app.route("/api/user", methods=["POST"])
def create_user():
incoming = request.get_json()
if incoming and 'email' in incoming and 'password' in incoming:
user = models.User(
email=incoming["email"],
password=incoming["password"]
)
db.session.add(user)
try:
db.session.commit()
except IntegrityError:
return jsonify(message="User with that email already exists"), 409
new_user = models.User.query.filter_by(email=incoming["email"]).first()
return jsonify(id=user.id,token=generate_token(new_user))
else:
return jsonify(error=True), 400
@app.route("/api/get_token", methods=["POST"])
def get_token():
incoming = request.get_json()
if incoming and 'email' in incoming and 'password' in incoming:
user = models.User.get_user_with_email_and_password(
incoming["email"], incoming["password"])
else:
user = None
if user:
return jsonify(token=generate_token(user))
return jsonify(error=True), 403
@app.route("/api/is_token_valid", methods=["POST"])
def is_token_valid():
incoming = request.get_json()
is_valid = verify_token(incoming["token"])
if is_valid:
return jsonify(token_is_valid=True)
else:
return jsonify(token_is_valid=False), 403
@app.route("/api/transactions", methods=["GET"])
@requires_auth
def get_transactions():
serialized = []
for tr in models.Transaction.query.filter().all():
serialized.append({
'id':tr.id,
'amount':tr.amount,
'bic_blz':tr.bic_blz,
'iban_knr':tr.iban_knr,
'our_iban':tr.our_iban,
'date':tr.date.isoformat(),
'comment':tr.comment,
'transaction_number':tr.transaction_number,
})
return jsonify({'result':serialized})
@sockets.route('/')
def test_socket(ws):
print('connected to {:}'.format(ws))
tr = {
'id':1,
'amount':1,
'bic_blz':1,
'iban_knr':1,
'our_iban':1,
'date':1,
'comment':'heeeello',
'transaction_number':1,
}
#ws.send(json.dumps({'action':{'type':constants.PUT_TRANSACTION,'payload':tr}}))
i = 0
payload = {'action':{'type':constants.TRANSACTIONS_PUT,'payload':tr}}
while not ws.closed:
message = ws.receive()
print('got %s'%message)
if i == 0:
msg = json.dumps(payload)
print('sending: %s'%msg)
ws.send(msg)
i += 1
| 28.859155
| 84
| 0.630796
|
5b5beb06456c4202a95b04d36af44ff311cc420f
| 6,770
|
py
|
Python
|
test/util/bitcoin-util-test.py
|
im818/litecoin
|
c95cea081e28a1bfd6aae4a96d1f336ddffac08d
|
[
"MIT"
] | null | null | null |
test/util/bitcoin-util-test.py
|
im818/litecoin
|
c95cea081e28a1bfd6aae4a96d1f336ddffac08d
|
[
"MIT"
] | null | null | null |
test/util/bitcoin-util-test.py
|
im818/litecoin
|
c95cea081e28a1bfd6aae4a96d1f336ddffac08d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2014 BitPay Inc.
# Copyright 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test framework for litecoin utils.
Runs automatically during `make check`.
Can also be run manually."""
import argparse
import binascii
import configparser
import difflib
import json
import logging
import os
import pprint
import subprocess
import sys
def main():
config = configparser.ConfigParser()
config.optionxform = str
config.read_file(open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8"))
env_conf = dict(config.items('environment'))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
# Add the format/level to the logger
logging.basicConfig(format=formatter, level=level)
bctester(os.path.join(env_conf["SRCDIR"], "test", "util", "data"), "bitcoin-util-test.json", env_conf)
def bctester(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename, encoding="utf8").read()
input_data = json.loads(raw_data)
failed_testcases = []
for testObj in input_data:
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
except:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
if failed_testcases:
error_message = "FAILED_TESTCASES:\n"
error_message += pprint.pformat(failed_testcases, width=400)
logging.error(error_message)
sys.exit(1)
else:
sys.exit(0)
def bctest(testDir, testObj, buildenv):
"""Runs a single test, comparing output and RC to expected output and RC.
Raises an error if input can't be read, executable fails, or output/RC
are not as expected. Error is caught by bctester() and reported.
"""
# Get the exec names and arguments
execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
execargs = testObj['args']
execrun = [execprog] + execargs
# Read the input data (if there is any)
stdinCfg = None
inputData = None
if "input" in testObj:
filename = os.path.join(testDir, testObj["input"])
inputData = open(filename, encoding="utf8").read()
stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
outputFn = None
outputData = None
outputType = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
try:
outputData = open(os.path.join(testDir, outputFn), encoding="utf8").read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
if not outputData:
logging.error("Output data missing for " + outputFn)
raise Exception
if not outputType:
logging.error("Output file %s does not have a file extension" % outputFn)
raise Exception
# Run the test
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
logging.error("OSError, Failed to execute " + execprog)
raise
if outputData:
data_mismatch, formatting_mismatch = False, False
# Parse command output and expected output
try:
a_parsed = parse_output(outs[0], outputType)
except Exception as e:
logging.error('Error parsing command output as %s: %s' % (outputType, e))
raise
try:
b_parsed = parse_output(outputData, outputType)
except Exception as e:
logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))
raise
# Compare data
if a_parsed != b_parsed:
logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
data_mismatch = True
# Compare formatting
if outs[0] != outputData:
error_message = "Output formatting mismatch for " + outputFn + ":\n"
error_message += "".join(difflib.context_diff(outputData.splitlines(True),
outs[0].splitlines(True),
fromfile=outputFn,
tofile="returned"))
logging.error(error_message)
formatting_mismatch = True
assert not data_mismatch and not formatting_mismatch
# Compare the return code to the expected return code
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
logging.error("Return code mismatch for " + outputFn)
raise Exception
if "error_txt" in testObj:
want_error = testObj["error_txt"]
# Compare error text
# TODO: ideally, we'd compare the strings exactly and also assert
# That stderr is empty if no errors are expected. However, bitcoin-tx
# emits DISPLAY errors when running as a windows application on
# linux through wine. Just assert that the expected error text appears
# somewhere in stderr.
if want_error not in outs[1]:
logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
raise Exception
def parse_output(a, fmt):
"""Parse the output according to specified format.
Raise an error if the output can't be parsed."""
if fmt == 'json': # json: compare parsed data
return json.loads(a)
elif fmt == 'hex': # hex: parse and compare binary data
return binascii.a2b_hex(a.strip())
else:
raise NotImplementedError("Don't know how to compare %s" % fmt)
if __name__ == '__main__':
main()
| 38.465909
| 126
| 0.617134
|
cba923fa5a4f8ed9c2e8e052e7a4116d87e45d5b
| 1,760
|
py
|
Python
|
demos/demo_1_alignment/faces/make_collages.py
|
SourCherries/auto-face-align
|
365bd01c22da6f3a44190261786fcc585687ea50
|
[
"Apache-2.0"
] | 13
|
2021-11-11T04:36:14.000Z
|
2022-03-08T00:20:54.000Z
|
demos/demo_1_alignment/faces/make_collages.py
|
SourCherries/auto-face-align
|
365bd01c22da6f3a44190261786fcc585687ea50
|
[
"Apache-2.0"
] | null | null | null |
demos/demo_1_alignment/faces/make_collages.py
|
SourCherries/auto-face-align
|
365bd01c22da6f3a44190261786fcc585687ea50
|
[
"Apache-2.0"
] | null | null | null |
import os
import shutil
import random
from collage_maker import make_collage
# ------------------------------------------------------------------------
# Helper function
def getAllFiles(dirName):
listOfFiles = list()
for (dirpath, dirnames, filenames) in os.walk(dirName):
listOfFiles += [os.path.join(dirpath, file) for file in filenames]
return listOfFiles
# ------------------------------------------------------------------------
# Temporarily copy images from all subfolders into a single folder
temp_folder = "all-together"
os.mkdir(temp_folder)
for sub_folder in ["premium", "popular"]:
listOfFiles = getAllFiles(sub_folder)
images_A = [fn for fn in listOfFiles if os.path.splitext(fn)[1].lower() in ('.jpg', '.jpeg', '.png')]
for fn in images_A:
shutil.copy(fn, temp_folder)
# listOfFiles = getAllFiles("popular")
# images_B = [fn for fn in listOfFiles if os.path.splitext(fn)[1].lower() in ('.jpg', '.jpeg', '.png')]
# for fn in images_B:
# shutil.copy(fn, temp_folder)
# ------------------------------------------------------------------------
# Create collage
output = "collage_originals.png"
width = 1024*2 #800
init_height = 256 #250
files = [os.path.join(temp_folder, fn) for fn in os.listdir(temp_folder)]
images = [fn for fn in files if os.path.splitext(fn)[1].lower() in ('.jpg', '.jpeg', '.png')]
random.shuffle(images)
make_collage(images, output, width, init_height)
# ------------------------------------------------------------------------
# Remove temporary folder
for fn in images:
os.remove(fn)
os.rmdir(temp_folder)
# END
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
| 32.592593
| 105
| 0.522159
|
bc142b60a66fe3f76ccf4328b376b87e2f44a338
| 179
|
py
|
Python
|
xpulumi/pulumi_cli/__init__.py
|
sammck/xpulumi
|
1e25229cc5de113c6d4655ba81e391c29014aa5f
|
[
"MIT"
] | null | null | null |
xpulumi/pulumi_cli/__init__.py
|
sammck/xpulumi
|
1e25229cc5de113c6d4655ba81e391c29014aa5f
|
[
"MIT"
] | null | null | null |
xpulumi/pulumi_cli/__init__.py
|
sammck/xpulumi
|
1e25229cc5de113c6d4655ba81e391c29014aa5f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) 2022 Samuel J. McKelvie
#
# MIT License - See LICENSE file accompanying this package.
#
"""Tools for working with standard Pulumi CLI"""
| 19.888889
| 59
| 0.715084
|
8e7d54568d44a3f6b2edb1725362f6de7e2aea13
| 775
|
py
|
Python
|
src/tests/part2/q234_test_linked_palin.py
|
hychrisli/PyAlgorithms
|
71e537180f3b371d0d2cc47b11cb68ec13a8ac68
|
[
"Apache-2.0"
] | null | null | null |
src/tests/part2/q234_test_linked_palin.py
|
hychrisli/PyAlgorithms
|
71e537180f3b371d0d2cc47b11cb68ec13a8ac68
|
[
"Apache-2.0"
] | null | null | null |
src/tests/part2/q234_test_linked_palin.py
|
hychrisli/PyAlgorithms
|
71e537180f3b371d0d2cc47b11cb68ec13a8ac68
|
[
"Apache-2.0"
] | null | null | null |
from src.base.test_cases import TestCases
from src.mappers.list2linkedlist import to_linkedlist
class LinkedPalinTestCases(TestCases):
def __init__(self):
super(LinkedPalinTestCases, self).__init__()
self.__add_test_case__('Test 1', to_linkedlist(['a', 'a', 'b', 'a', 'a']), True)
self.__add_test_case__('Test 2', to_linkedlist(['a', 'a', 'b', 'b', 'a', 'a']), True)
self.__add_test_case__('Test 3', to_linkedlist(['a', 'a', 'b', 'a', 'c']), False)
self.__add_test_case__('Test 4', to_linkedlist(['a', 'a', 'b']), False)
self.__add_test_case__('Test 4', to_linkedlist(['a', 'a']), True)
self.__add_test_case__('Test 4', to_linkedlist(['a']), True)
self.__add_test_case__('Test 4', to_linkedlist([]), True)
| 55.357143
| 93
| 0.63871
|
ce769dd2b588b49967e95c7511bae175dfc0462b
| 5,580
|
py
|
Python
|
headpose_estimation_pairs.py
|
MitchellX/deep-head-pose
|
b26ca3a437cf8fb201d35184ea14be681c0a8231
|
[
"Apache-2.0"
] | 9
|
2021-05-10T11:42:58.000Z
|
2022-03-17T05:51:41.000Z
|
headpose_estimation_pairs.py
|
MitchellX/deep-head-pose
|
b26ca3a437cf8fb201d35184ea14be681c0a8231
|
[
"Apache-2.0"
] | null | null | null |
headpose_estimation_pairs.py
|
MitchellX/deep-head-pose
|
b26ca3a437cf8fb201d35184ea14be681c0a8231
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
import dlib
import sys, os, argparse
import numpy as np
import cv2
import torch
from torch.autograd import Variable
from torchvision import transforms
import torch.backends.cudnn as cudnn
import torchvision
import torch.nn.functional as F
from PIL import Image
sys.path.append('code/')
import datasets, hopenet, utils
join = os.path.join
class HeadPose:
def __init__(self):
cudnn.enabled = True
batch_size = 1
self.gpu = 0
snapshot_path = '/home/xiangmingcan/notespace/deep-head-pose/hopenet_robust_alpha1.pkl'
input_path = '/home/xiangmingcan/notespace/cvpr_data/celeba/'
output = 'output/celeba.txt'
face_model = '/home/xiangmingcan/notespace/deep-head-pose/mmod_human_face_detector.dat'
out_dir = os.path.split(output)[0]
name = os.path.split(output)[1]
write_path = join(out_dir, "images_" + name[:-4])
if not os.path.exists(write_path):
os.makedirs(write_path)
if not os.path.exists(input_path):
sys.exit('Folder does not exist')
# ResNet50 structure
self.model = hopenet.Hopenet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], 66)
# Dlib face detection model
self.cnn_face_detector = dlib.cnn_face_detection_model_v1(face_model)
print 'Loading snapshot.'
# Load snapshot
saved_state_dict = torch.load(snapshot_path)
self.model.load_state_dict(saved_state_dict)
print 'Loading data.'
self.transformations = transforms.Compose([transforms.Scale(224),
transforms.CenterCrop(224), transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.model.cuda(self.gpu)
print 'Ready to test network.'
# Test the Model
self.model.eval() # Change model to 'eval' mode (BN uses moving mean/var).
total = 0
self.idx_tensor = [idx for idx in range(66)]
self.idx_tensor = torch.FloatTensor(self.idx_tensor).cuda(self.gpu)
# -------------- for image operation ------------------
def estimate(self, image):
# image 是完整的路径
image = cv2.imread(image)
cv2_frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Dlib detect
dets = self.cnn_face_detector(cv2_frame, 1)
yaw_predicted, pitch_predicted, roll_predicted = None, None, None
for idx, det in enumerate(dets):
# Get x_min, y_min, x_max, y_max, conf
x_min = det.rect.left()
y_min = det.rect.top()
x_max = det.rect.right()
y_max = det.rect.bottom()
conf = det.confidence
bbox_width = abs(x_max - x_min)
bbox_height = abs(y_max - y_min)
x_min -= 2 * bbox_width / 4
x_max += 2 * bbox_width / 4
y_min -= 3 * bbox_height / 4
y_max += bbox_height / 4
x_min = max(x_min, 0); y_min = max(y_min, 0)
x_max = min(image.shape[1], x_max); y_max = min(image.shape[0], y_max)
# Crop image
img = cv2_frame[y_min:y_max,x_min:x_max]
img = Image.fromarray(img)
# Transform
img = self.transformations(img)
img_shape = img.size()
img = img.view(1, img_shape[0], img_shape[1], img_shape[2])
img = Variable(img).cuda(self.gpu)
yaw, pitch, roll = self.model(img)
yaw_predicted = F.softmax(yaw)
pitch_predicted = F.softmax(pitch)
roll_predicted = F.softmax(roll)
# Get continuous predictions in degrees.
yaw_predicted = torch.sum(yaw_predicted.data[0] * self.idx_tensor) * 3 - 99
pitch_predicted = torch.sum(pitch_predicted.data[0] * self.idx_tensor) * 3 - 99
roll_predicted = torch.sum(roll_predicted.data[0] * self.idx_tensor) * 3 - 99
# # Print new frame with cube and axis
# drawed_img = utils.draw_axis(image, yaw_predicted, pitch_predicted, roll_predicted, tdx =(x_min + x_max) / 2, tdy=(y_min + y_max) / 2, size =bbox_height / 2)
return [yaw_predicted, pitch_predicted, roll_predicted]
if __name__ == '__main__':
dataset = sys.argv[1]
method = sys.argv[2]
flag = 0
if dataset == "ffhq":
flag = 1
src = "/home/xiangmingcan/notespace/cvpr_data/" + dataset
tgt = "/home/xiangmingcan/notespace/cvpr_result/" + dataset + '/' + method
save_log = os.path.join("headPose/", dataset, method + ".txt")
path = os.path.join("headPose/", dataset)
if not os.path.exists(path):
os.makedirs(path)
logFile = open(save_log, 'w')
img_list = os.listdir(tgt)
sorted(img_list)
headpose = HeadPose()
for input_img in img_list:
if '_mask' in input_img:
continue
eular_angles_result = headpose.estimate(os.path.join(tgt, input_img))
print(eular_angles_result)
# reference image 的欧拉角
refer_img = input_img.split('-')[1]
if flag:
refer_img = refer_img[:-3] + 'png'
eular_angles_refer = headpose.estimate(os.path.join(src, refer_img))
print(eular_angles_refer)
vec1 = np.array(eular_angles_result)
vec2 = np.array(eular_angles_refer)
if (None in vec1) or (None in vec2):
continue
distance = np.linalg.norm(vec1 - vec2)
print(distance)
print('\n')
logFile.write(str(distance))
logFile.write('\n')
logFile.close()
| 32.631579
| 171
| 0.608781
|
3ad912f405c96df8a68213ac2052d7e82fe4870e
| 42,007
|
py
|
Python
|
numpy/core/setup.py
|
udaygp/numpy
|
1f35a1d6320fc4b30b62a619dd64d20aa7911334
|
[
"BSD-3-Clause"
] | null | null | null |
numpy/core/setup.py
|
udaygp/numpy
|
1f35a1d6320fc4b30b62a619dd64d20aa7911334
|
[
"BSD-3-Clause"
] | null | null | null |
numpy/core/setup.py
|
udaygp/numpy
|
1f35a1d6320fc4b30b62a619dd64d20aa7911334
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import division, print_function
import imp
import os
import sys
import shutil
import pickle
import copy
import warnings
import re
from os.path import join
from numpy.distutils import log
from distutils.dep_util import newer
from distutils.sysconfig import get_config_var
from setup_common import *
# Set to True to enable multiple file compilations (experimental)
ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0")
# Set to True to enable relaxed strides checking. This (mostly) means
# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0")
# XXX: ugly, we use a class to avoid calling twice some expensive functions in
# config.h/numpyconfig.h. I don't see a better way because distutils force
# config.h generation inside an Extension class, and as such sharing
# configuration informations between extensions is not easy.
# Using a pickled-based memoize does not work because config_cmd is an instance
# method, which cPickle does not like.
#
# Use pickle in all cases, as cPickle is gone in python3 and the difference
# in time is only in build. -- Charles Harris, 2013-03-30
class CallOnceOnly(object):
def __init__(self):
self._check_types = None
self._check_ieee_macros = None
self._check_complex = None
def check_types(self, *a, **kw):
if self._check_types is None:
out = check_types(*a, **kw)
self._check_types = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_types))
return out
def check_ieee_macros(self, *a, **kw):
if self._check_ieee_macros is None:
out = check_ieee_macros(*a, **kw)
self._check_ieee_macros = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_ieee_macros))
return out
def check_complex(self, *a, **kw):
if self._check_complex is None:
out = check_complex(*a, **kw)
self._check_complex = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_complex))
return out
PYTHON_HAS_UNICODE_WIDE = True
def pythonlib_dir():
"""return path where libpython* is."""
if sys.platform == 'win32':
return os.path.join(sys.prefix, "libs")
else:
return get_config_var('LIBDIR')
def is_npy_no_signal():
"""Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
header."""
return sys.platform == 'win32'
def is_npy_no_smp():
"""Return True if the NPY_NO_SMP symbol must be defined in public
header (when SMP support cannot be reliably enabled)."""
# Python 2.3 causes a segfault when
# trying to re-acquire the thread-state
# which is done in error-handling
# ufunc code. NPY_ALLOW_C_API and friends
# cause the segfault. So, we disable threading
# for now.
if sys.version[:5] < '2.4.2':
nosmp = 1
else:
# Perhaps a fancier check is in order here.
# so that threads are only enabled if there
# are actually multiple CPUS? -- but
# threaded code can be nice even on a single
# CPU so that long-calculating code doesn't
# block.
try:
nosmp = os.environ['NPY_NOSMP']
nosmp = 1
except KeyError:
nosmp = 0
return nosmp == 1
def win32_checks(deflist):
from numpy.distutils.misc_util import get_build_architecture
a = get_build_architecture()
# Distutils hack on AMD64 on windows
print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %
(a, os.name, sys.platform))
if a == 'AMD64':
deflist.append('DISTUTILS_USE_SDK')
# On win32, force long double format string to be 'g', not
# 'Lg', since the MS runtime does not support long double whose
# size is > sizeof(double)
if a == "Intel" or a == "AMD64":
deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')
def check_math_capabilities(config, moredefs, mathlibs):
def check_func(func_name):
return config.check_func(func_name, libraries=mathlibs,
decl=True, call=True)
def check_funcs_once(funcs_name):
decl = dict([(f, True) for f in funcs_name])
st = config.check_funcs_once(funcs_name, libraries=mathlibs,
decl=decl, call=decl)
if st:
moredefs.extend([(fname2def(f), 1) for f in funcs_name])
return st
def check_funcs(funcs_name):
# Use check_funcs_once first, and if it does not work, test func per
# func. Return success only if all the functions are available
if not check_funcs_once(funcs_name):
# Global check failed, check func per func
for f in funcs_name:
if check_func(f):
moredefs.append((fname2def(f), 1))
return 0
else:
return 1
#use_msvc = config.check_decl("_MSC_VER")
if not check_funcs_once(MANDATORY_FUNCS):
raise SystemError("One of the required function to build numpy is not"
" available (the list is %s)." % str(MANDATORY_FUNCS))
# Standard functions which may not be available and for which we have a
# replacement implementation. Note that some of these are C99 functions.
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
for f in OPTIONAL_STDFUNCS_MAYBE:
if config.check_decl(fname2def(f),
headers=["Python.h", "math.h"]):
OPTIONAL_STDFUNCS.remove(f)
check_funcs(OPTIONAL_STDFUNCS)
for h in OPTIONAL_HEADERS:
if config.check_func("", decl=False, call=False, headers=[h]):
moredefs.append((fname2def(h).replace(".", "_"), 1))
for tup in OPTIONAL_INTRINSICS:
headers = None
if len(tup) == 2:
f, args = tup
else:
f, args, headers = tup[0], tup[1], [tup[2]]
if config.check_func(f, decl=False, call=True, call_args=args,
headers=headers):
moredefs.append((fname2def(f), 1))
for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:
if config.check_gcc_function_attribute(dec, fn):
moredefs.append((fname2def(fn), 1))
for fn in OPTIONAL_VARIABLE_ATTRIBUTES:
if config.check_gcc_variable_attribute(fn):
m = fn.replace("(", "_").replace(")", "_")
moredefs.append((fname2def(m), 1))
# C99 functions: float and long double versions
check_funcs(C99_FUNCS_SINGLE)
check_funcs(C99_FUNCS_EXTENDED)
def check_complex(config, mathlibs):
priv = []
pub = []
try:
if os.uname()[0] == "Interix":
warnings.warn("Disabling broken complex support. See #1365")
return priv, pub
except:
# os.uname not available on all platforms. blanket except ugly but safe
pass
# Check for complex support
st = config.check_header('complex.h')
if st:
priv.append(('HAVE_COMPLEX_H', 1))
pub.append(('NPY_USE_C99_COMPLEX', 1))
for t in C99_COMPLEX_TYPES:
st = config.check_type(t, headers=["complex.h"])
if st:
pub.append(('NPY_HAVE_%s' % type2def(t), 1))
def check_prec(prec):
flist = [f + prec for f in C99_COMPLEX_FUNCS]
decl = dict([(f, True) for f in flist])
if not config.check_funcs_once(flist, call=decl, decl=decl,
libraries=mathlibs):
for f in flist:
if config.check_func(f, call=True, decl=True,
libraries=mathlibs):
priv.append((fname2def(f), 1))
else:
priv.extend([(fname2def(f), 1) for f in flist])
check_prec('')
check_prec('f')
check_prec('l')
return priv, pub
def check_ieee_macros(config):
priv = []
pub = []
macros = []
def _add_decl(f):
priv.append(fname2def("decl_%s" % f))
pub.append('NPY_%s' % fname2def("decl_%s" % f))
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
_macros = ["isnan", "isinf", "signbit", "isfinite"]
for f in _macros:
py_symbol = fname2def("decl_%s" % f)
already_declared = config.check_decl(py_symbol,
headers=["Python.h", "math.h"])
if already_declared:
if config.check_macro_true(py_symbol,
headers=["Python.h", "math.h"]):
pub.append('NPY_%s' % fname2def("decl_%s" % f))
else:
macros.append(f)
# Normally, isnan and isinf are macro (C99), but some platforms only have
# func, or both func and macro version. Check for macro only, and define
# replacement ones if not found.
# Note: including Python.h is necessary because it modifies some math.h
# definitions
for f in macros:
st = config.check_decl(f, headers = ["Python.h", "math.h"])
if st:
_add_decl(f)
return priv, pub
def check_types(config_cmd, ext, build_dir):
private_defines = []
public_defines = []
# Expected size (in number of bytes) for each type. This is an
# optimization: those are only hints, and an exhaustive search for the size
# is done if the hints are wrong.
expected = {}
expected['short'] = [2]
expected['int'] = [4]
expected['long'] = [8, 4]
expected['float'] = [4]
expected['double'] = [8]
expected['long double'] = [8, 12, 16]
expected['Py_intptr_t'] = [4, 8]
expected['PY_LONG_LONG'] = [8]
expected['long long'] = [8]
expected['off_t'] = [4, 8]
# Check we have the python header (-dev* packages on Linux)
result = config_cmd.check_header('Python.h')
if not result:
raise SystemError(
"Cannot compile 'Python.h'. Perhaps you need to "\
"install python-dev|python-devel.")
res = config_cmd.check_header("endian.h")
if res:
private_defines.append(('HAVE_ENDIAN_H', 1))
public_defines.append(('NPY_HAVE_ENDIAN_H', 1))
# Check basic types sizes
for type in ('short', 'int', 'long'):
res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers = ["Python.h"])
if res:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type)))
else:
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
for type in ('float', 'double', 'long double'):
already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type),
headers = ["Python.h"])
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
if not already_declared and not type == 'long double':
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# Compute size of corresponding complex type: used to check that our
# definition is binary compatible with C99 complex type (check done at
# build time in npy_common.h)
complex_def = "struct {%s __x; %s __y;}" % (type, type)
res = config_cmd.check_type_size(complex_def, expected=2*expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % complex_def)
for type in ('Py_intptr_t', 'off_t'):
res = config_cmd.check_type_size(type, headers=["Python.h"],
library_dirs=[pythonlib_dir()],
expected=expected[type])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# We check declaration AND type because that's how distutils does it.
if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):
res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],
library_dirs=[pythonlib_dir()],
expected=expected['PY_LONG_LONG'])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG')
res = config_cmd.check_type_size('long long',
expected=expected['long long'])
if res >= 0:
#private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'long long')
if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):
raise RuntimeError(
"Config wo CHAR_BIT is not supported"\
", please contact the maintainers")
return private_defines, public_defines
def check_mathlib(config_cmd):
# Testing the C math library
mathlibs = []
mathlibs_choices = [[], ['m'], ['cpml']]
mathlib = os.environ.get('MATHLIB')
if mathlib:
mathlibs_choices.insert(0, mathlib.split(','))
for libs in mathlibs_choices:
if config_cmd.check_func("exp", libraries=libs, decl=True, call=True):
mathlibs = libs
break
else:
raise EnvironmentError("math library missing; rerun "
"setup.py after setting the "
"MATHLIB env variable")
return mathlibs
def visibility_define(config):
"""Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
string)."""
if config.check_compiler_gcc4():
return '__attribute__((visibility("hidden")))'
else:
return ''
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, dot_join
from numpy.distutils.system_info import get_info, default_lib_dirs
config = Configuration('core', parent_package, top_path)
local_dir = config.local_path
codegen_dir = join(local_dir, 'code_generators')
if is_released(config):
warnings.simplefilter('error', MismatchCAPIWarning)
# Check whether we have a mismatch between the set C API VERSION and the
# actual C API VERSION
check_api_version(C_API_VERSION, codegen_dir)
generate_umath_py = join(codegen_dir, 'generate_umath.py')
n = dot_join(config.name, 'generate_umath')
generate_umath = imp.load_module('_'.join(n.split('.')),
open(generate_umath_py, 'U'), generate_umath_py,
('.py', 'U', 1))
header_dir = 'include/numpy' # this is relative to config.path_in_package
cocache = CallOnceOnly()
def generate_config_h(ext, build_dir):
target = join(build_dir, header_dir, 'config.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)
# Check math library and C99 math funcs availability
mathlibs = check_mathlib(config_cmd)
moredefs.append(('MATHLIB', ','.join(mathlibs)))
check_math_capabilities(config_cmd, moredefs, mathlibs)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])
# Signal check
if is_npy_no_signal():
moredefs.append('__NPY_PRIVATE_NO_SIGNAL')
# Windows checks
if sys.platform=='win32' or os.name=='nt':
win32_checks(moredefs)
# Inline check
inline = config_cmd.check_inline()
# Check whether we need our own wide character support
if not config_cmd.check_decl('Py_UNICODE_WIDE', headers=['Python.h']):
PYTHON_HAS_UNICODE_WIDE = True
else:
PYTHON_HAS_UNICODE_WIDE = False
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('ENABLE_SEPARATE_COMPILATION', 1))
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
# Get long double representation
if sys.platform != 'darwin':
rep = check_long_double_representation(config_cmd)
if rep in ['INTEL_EXTENDED_12_BYTES_LE',
'INTEL_EXTENDED_16_BYTES_LE',
'MOTOROLA_EXTENDED_12_BYTES_BE',
'IEEE_QUAD_LE', 'IEEE_QUAD_BE',
'IEEE_DOUBLE_LE', 'IEEE_DOUBLE_BE',
'DOUBLE_DOUBLE_BE', 'DOUBLE_DOUBLE_LE']:
moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
else:
raise ValueError("Unrecognized long double format: %s" % rep)
# Py3K check
if sys.version_info[0] == 3:
moredefs.append(('NPY_PY3K', 1))
# Generate the config.h file from moredefs
target_f = open(target, 'w')
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# define inline to our keyword, or nothing
target_f.write('#ifndef __cplusplus\n')
if inline == 'inline':
target_f.write('/* #undef inline */\n')
else:
target_f.write('#define inline %s\n' % inline)
target_f.write('#endif\n')
# add the guard to make sure config.h is never included directly,
# but always through npy_config.h
target_f.write("""
#ifndef _NPY_NPY_CONFIG_H_
#error config.h should never be included directly, include npy_config.h instead
#endif
""")
target_f.close()
print('File:', target)
target_f = open(target)
print(target_f.read())
target_f.close()
print('EOF')
else:
mathlibs = []
target_f = open(target)
for line in target_f:
s = '#define MATHLIB'
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
target_f.close()
# Ugly: this can be called within a library and not an extension,
# in which case there is no libraries attributes (and none is
# needed).
if hasattr(ext, 'libraries'):
ext.libraries.extend(mathlibs)
incl_dir = os.path.dirname(target)
if incl_dir not in config.numpy_include_dirs:
config.numpy_include_dirs.append(incl_dir)
return target
def generate_numpyconfig_h(ext, build_dir):
"""Depends on config.h: generate_config_h has to be called before !"""
# put private include directory in build_dir on search path
# allows using code generation in headers headers
config.add_include_dirs(join(build_dir, "src", "private"))
target = join(build_dir, header_dir, '_numpyconfig.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)
if is_npy_no_signal():
moredefs.append(('NPY_NO_SIGNAL', 1))
if is_npy_no_smp():
moredefs.append(('NPY_NO_SMP', 1))
else:
moredefs.append(('NPY_NO_SMP', 0))
mathlibs = check_mathlib(config_cmd)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1))
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
# Check wether we can use inttypes (C99) formats
if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']):
moredefs.append(('NPY_USE_C99_FORMATS', 1))
# visibility check
hidden_visibility = visibility_define(config_cmd)
moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))
# Add the C API/ABI versions
moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
# Add moredefs to header
target_f = open(target, 'w')
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# Define __STDC_FORMAT_MACROS
target_f.write("""
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS 1
#endif
""")
target_f.close()
# Dump the numpyconfig.h header to stdout
print('File: %s' % target)
target_f = open(target)
print(target_f.read())
target_f.close()
print('EOF')
config.add_data_files((header_dir, target))
return target
def generate_api_func(module_name):
def generate_api(ext, build_dir):
script = join(codegen_dir, module_name + '.py')
sys.path.insert(0, codegen_dir)
try:
m = __import__(module_name)
log.info('executing %s', script)
h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
finally:
del sys.path[0]
config.add_data_files((header_dir, h_file),
(header_dir, doc_file))
return (h_file,)
return generate_api
generate_numpy_api = generate_api_func('generate_numpy_api')
generate_ufunc_api = generate_api_func('generate_ufunc_api')
config.add_include_dirs(join(local_dir, "src", "private"))
config.add_include_dirs(join(local_dir, "src"))
config.add_include_dirs(join(local_dir))
config.add_data_files('include/numpy/*.h')
config.add_include_dirs(join('src', 'npymath'))
config.add_include_dirs(join('src', 'multiarray'))
config.add_include_dirs(join('src', 'umath'))
config.add_include_dirs(join('src', 'npysort'))
config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
config.add_define_macros([("_FILE_OFFSET_BITS", "64")])
config.add_define_macros([('_LARGEFILE_SOURCE', '1')])
config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])
config.numpy_include_dirs.extend(config.paths('include'))
deps = [join('src', 'npymath', '_signbit.c'),
join('include', 'numpy', '*object.h'),
'include/numpy/fenv/fenv.c',
'include/numpy/fenv/fenv.h',
join(codegen_dir, 'genapi.py'),
]
# Don't install fenv unless we need them.
if sys.platform == 'cygwin':
config.add_data_dir('include/numpy/fenv')
#######################################################################
# dummy module #
#######################################################################
# npymath needs the config.h and numpyconfig.h files to be generated, but
# build_clib cannot handle generate_config_h and generate_numpyconfig_h
# (don't ask). Because clib are generated before extensions, we have to
# explicitly add an extension which has generate_config_h and
# generate_numpyconfig_h as sources *before* adding npymath.
config.add_extension('_dummy',
sources = [join('src', 'dummymodule.c'),
generate_config_h,
generate_numpyconfig_h,
generate_numpy_api]
)
#######################################################################
# npymath library #
#######################################################################
subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])
def get_mathlib_info(*args):
# Another ugly hack: the mathlib info is known once build_src is run,
# but we cannot use add_installed_pkg_config here either, so we only
# update the substition dictionary during npymath build
config_cmd = config.get_config_cmd()
# Check that the toolchain works, to fail early if it doesn't
# (avoid late errors with MATHLIB which are confusing if the
# compiler does not work).
st = config_cmd.try_link('int main(void) { return 0;}')
if not st:
raise RuntimeError("Broken toolchain: cannot link a simple C program")
mlibs = check_mathlib(config_cmd)
posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
subst_dict["posix_mathlib"] = posix_mlib
subst_dict["msvc_mathlib"] = msvc_mlib
npymath_sources = [join('src', 'npymath', 'npy_math.c.src'),
join('src', 'npymath', 'ieee754.c.src'),
join('src', 'npymath', 'npy_math_complex.c.src'),
join('src', 'npymath', 'halffloat.c')]
config.add_installed_library('npymath',
sources=npymath_sources + [get_mathlib_info],
install_dir='lib')
config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
subst_dict)
config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
subst_dict)
#######################################################################
# npysort library #
#######################################################################
# This library is created for the build but it is not installed
npysort_sources=[join('src', 'npysort', 'quicksort.c.src'),
join('src', 'npysort', 'mergesort.c.src'),
join('src', 'npysort', 'heapsort.c.src'),
join('src', 'private', 'npy_partition.h.src'),
join('src', 'npysort', 'selection.c.src'),
join('src', 'private', 'npy_binsearch.h.src'),
join('src', 'npysort', 'binsearch.c.src'),
]
config.add_library('npysort',
sources=npysort_sources,
include_dirs=[])
#######################################################################
# multiarray module #
#######################################################################
# Multiarray version: this function is needed to build foo.c from foo.c.src
# when foo.c is included in another file and as such not in the src
# argument of build_ext command
def generate_multiarray_templated_sources(ext, build_dir):
from numpy.distutils.misc_util import get_cmd
subpath = join('src', 'multiarray')
sources = [join(local_dir, subpath, 'scalartypes.c.src'),
join(local_dir, subpath, 'arraytypes.c.src'),
join(local_dir, subpath, 'nditer_templ.c.src'),
join(local_dir, subpath, 'lowlevel_strided_loops.c.src'),
join(local_dir, subpath, 'einsum.c.src')]
# numpy.distutils generate .c from .c.src in weird directories, we have
# to add them there as they depend on the build_dir
config.add_include_dirs(join(build_dir, subpath))
cmd = get_cmd('build_src')
cmd.ensure_finalized()
cmd.template_sources(sources, ext)
multiarray_deps = [
join('src', 'multiarray', 'arrayobject.h'),
join('src', 'multiarray', 'arraytypes.h'),
join('src', 'multiarray', 'array_assign.h'),
join('src', 'multiarray', 'buffer.h'),
join('src', 'multiarray', 'calculation.h'),
join('src', 'multiarray', 'common.h'),
join('src', 'multiarray', 'templ_common.h.src'),
join('src', 'multiarray', 'convert_datatype.h'),
join('src', 'multiarray', 'convert.h'),
join('src', 'multiarray', 'conversion_utils.h'),
join('src', 'multiarray', 'ctors.h'),
join('src', 'multiarray', 'descriptor.h'),
join('src', 'multiarray', 'getset.h'),
join('src', 'multiarray', 'hashdescr.h'),
join('src', 'multiarray', 'iterators.h'),
join('src', 'multiarray', 'mapping.h'),
join('src', 'multiarray', 'methods.h'),
join('src', 'multiarray', 'multiarraymodule.h'),
join('src', 'multiarray', 'nditer_impl.h'),
join('src', 'multiarray', 'numpymemoryview.h'),
join('src', 'multiarray', 'number.h'),
join('src', 'multiarray', 'numpyos.h'),
join('src', 'multiarray', 'refcount.h'),
join('src', 'multiarray', 'scalartypes.h'),
join('src', 'multiarray', 'sequence.h'),
join('src', 'multiarray', 'shape.h'),
join('src', 'multiarray', 'ucsnarrow.h'),
join('src', 'multiarray', 'usertypes.h'),
join('src', 'multiarray', 'vdot.h'),
join('src', 'private', 'lowlevel_strided_loops.h'),
join('include', 'numpy', 'arrayobject.h'),
join('include', 'numpy', '_neighborhood_iterator_imp.h'),
join('include', 'numpy', 'npy_endian.h'),
join('include', 'numpy', 'arrayscalars.h'),
join('include', 'numpy', 'noprefix.h'),
join('include', 'numpy', 'npy_interrupt.h'),
join('include', 'numpy', 'npy_3kcompat.h'),
join('include', 'numpy', 'npy_math.h'),
join('include', 'numpy', 'halffloat.h'),
join('include', 'numpy', 'npy_common.h'),
join('include', 'numpy', 'npy_os.h'),
join('include', 'numpy', 'utils.h'),
join('include', 'numpy', 'ndarrayobject.h'),
join('include', 'numpy', 'npy_cpu.h'),
join('include', 'numpy', 'numpyconfig.h'),
join('include', 'numpy', 'ndarraytypes.h'),
join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
join('include', 'numpy', '_numpyconfig.h.in'),
# add library sources as distuils does not consider libraries
# dependencies
] + npysort_sources + npymath_sources
multiarray_src = [
join('src', 'multiarray', 'alloc.c'),
join('src', 'multiarray', 'arrayobject.c'),
join('src', 'multiarray', 'arraytypes.c.src'),
join('src', 'multiarray', 'array_assign.c'),
join('src', 'multiarray', 'array_assign_scalar.c'),
join('src', 'multiarray', 'array_assign_array.c'),
join('src', 'multiarray', 'buffer.c'),
join('src', 'multiarray', 'calculation.c'),
join('src', 'multiarray', 'common.c'),
join('src', 'multiarray', 'convert.c'),
join('src', 'multiarray', 'convert_datatype.c'),
join('src', 'multiarray', 'conversion_utils.c'),
join('src', 'multiarray', 'ctors.c'),
join('src', 'multiarray', 'datetime.c'),
join('src', 'multiarray', 'datetime_strings.c'),
join('src', 'multiarray', 'datetime_busday.c'),
join('src', 'multiarray', 'datetime_busdaycal.c'),
join('src', 'multiarray', 'descriptor.c'),
join('src', 'multiarray', 'dtype_transfer.c'),
join('src', 'multiarray', 'einsum.c.src'),
join('src', 'multiarray', 'flagsobject.c'),
join('src', 'multiarray', 'getset.c'),
join('src', 'multiarray', 'hashdescr.c'),
join('src', 'multiarray', 'item_selection.c'),
join('src', 'multiarray', 'iterators.c'),
join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
join('src', 'multiarray', 'mapping.c'),
join('src', 'multiarray', 'methods.c'),
join('src', 'multiarray', 'multiarraymodule.c'),
join('src', 'multiarray', 'templ_common.h.src'),
join('src', 'multiarray', 'nditer_templ.c.src'),
join('src', 'multiarray', 'nditer_api.c'),
join('src', 'multiarray', 'nditer_constr.c'),
join('src', 'multiarray', 'nditer_pywrap.c'),
join('src', 'multiarray', 'number.c'),
join('src', 'multiarray', 'numpymemoryview.c'),
join('src', 'multiarray', 'numpyos.c'),
join('src', 'multiarray', 'refcount.c'),
join('src', 'multiarray', 'sequence.c'),
join('src', 'multiarray', 'shape.c'),
join('src', 'multiarray', 'scalarapi.c'),
join('src', 'multiarray', 'scalartypes.c.src'),
join('src', 'multiarray', 'usertypes.c'),
join('src', 'multiarray', 'ucsnarrow.c'),
join('src', 'multiarray', 'vdot.c'),
]
blas_info = get_info('blas_opt', 0)
if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):
extra_info = blas_info
multiarray_src.append(join('src', 'multiarray', 'cblasfuncs.c'))
else:
extra_info = {}
if not ENABLE_SEPARATE_COMPILATION:
multiarray_deps.extend(multiarray_src)
multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')]
multiarray_src.append(generate_multiarray_templated_sources)
multiarray_src.append(join('src', 'multiarray', 'templ_common.h.src'))
config.add_extension('multiarray',
sources=multiarray_src +
[generate_config_h,
generate_numpyconfig_h,
generate_numpy_api,
join(codegen_dir, 'generate_numpy_api.py'),
join('*.py')],
depends=deps + multiarray_deps,
libraries=['npymath', 'npysort'],
extra_info=extra_info)
#######################################################################
# umath module #
#######################################################################
# umath version: this function is needed to build foo.c from foo.c.src
# when foo.c is included in another file and as such not in the src
# argument of build_ext command
def generate_umath_templated_sources(ext, build_dir):
from numpy.distutils.misc_util import get_cmd
subpath = join('src', 'umath')
sources = [
join(local_dir, subpath, 'loops.h.src'),
join(local_dir, subpath, 'loops.c.src'),
join(local_dir, subpath, 'simd.inc.src')]
# numpy.distutils generate .c from .c.src in weird directories, we have
# to add them there as they depend on the build_dir
config.add_include_dirs(join(build_dir, subpath))
cmd = get_cmd('build_src')
cmd.ensure_finalized()
cmd.template_sources(sources, ext)
def generate_umath_c(ext, build_dir):
target = join(build_dir, header_dir, '__umath_generated.c')
dir = os.path.dirname(target)
if not os.path.exists(dir):
os.makedirs(dir)
script = generate_umath_py
if newer(script, target):
f = open(target, 'w')
f.write(generate_umath.make_code(generate_umath.defdict,
generate_umath.__file__))
f.close()
return []
umath_src = [
join('src', 'umath', 'umathmodule.c'),
join('src', 'umath', 'reduction.c'),
join('src', 'umath', 'funcs.inc.src'),
join('src', 'umath', 'simd.inc.src'),
join('src', 'umath', 'loops.h.src'),
join('src', 'umath', 'loops.c.src'),
join('src', 'umath', 'ufunc_object.c'),
join('src', 'umath', 'ufunc_type_resolution.c')]
umath_deps = [
generate_umath_py,
join('src', 'multiarray', 'common.h'),
join('src', 'umath', 'simd.inc.src'),
join(codegen_dir, 'generate_ufunc_api.py'),
join('src', 'private', 'ufunc_override.h')] + npymath_sources
if not ENABLE_SEPARATE_COMPILATION:
umath_deps.extend(umath_src)
umath_src = [join('src', 'umath', 'umathmodule_onefile.c')]
umath_src.append(generate_umath_templated_sources)
umath_src.append(join('src', 'umath', 'funcs.inc.src'))
umath_src.append(join('src', 'umath', 'simd.inc.src'))
config.add_extension('umath',
sources = umath_src +
[generate_config_h,
generate_numpyconfig_h,
generate_umath_c,
generate_ufunc_api],
depends = deps + umath_deps,
libraries = ['npymath'],
)
#######################################################################
# scalarmath module #
#######################################################################
config.add_extension('scalarmath',
sources = [join('src', 'scalarmathmodule.c.src'),
join('src', 'private', 'scalarmathmodule.h.src'),
generate_config_h,
generate_numpyconfig_h,
generate_numpy_api,
generate_ufunc_api],
depends = deps + npymath_sources,
libraries = ['npymath'],
)
#######################################################################
# umath_tests module #
#######################################################################
config.add_extension('umath_tests',
sources = [join('src', 'umath', 'umath_tests.c.src')])
#######################################################################
# custom rational dtype module #
#######################################################################
config.add_extension('test_rational',
sources = [join('src', 'umath', 'test_rational.c.src')])
#######################################################################
# struct_ufunc_test module #
#######################################################################
config.add_extension('struct_ufunc_test',
sources = [join('src', 'umath', 'struct_ufunc_test.c.src')])
#######################################################################
# multiarray_tests module #
#######################################################################
config.add_extension('multiarray_tests',
sources = [join('src', 'multiarray', 'multiarray_tests.c.src')])
#######################################################################
# operand_flag_tests module #
#######################################################################
config.add_extension('operand_flag_tests',
sources = [join('src', 'umath', 'operand_flag_tests.c.src')])
config.add_data_dir('tests')
config.add_data_dir('tests/data')
config.make_svn_version_py()
return config
if __name__=='__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| 41.79801
| 97
| 0.549408
|
862f5f3c9eea4790effc4a258bf761e618a4b6a9
| 1,220
|
py
|
Python
|
LeetCode/incomplete/522-Longest-Uncommon-Subsequence-II.py
|
PyroGenesis/Comprehensive-Coding-Solutions
|
46684c07f16e35a779c075946b1b96956f2797e8
|
[
"MIT"
] | null | null | null |
LeetCode/incomplete/522-Longest-Uncommon-Subsequence-II.py
|
PyroGenesis/Comprehensive-Coding-Solutions
|
46684c07f16e35a779c075946b1b96956f2797e8
|
[
"MIT"
] | null | null | null |
LeetCode/incomplete/522-Longest-Uncommon-Subsequence-II.py
|
PyroGenesis/Comprehensive-Coding-Solutions
|
46684c07f16e35a779c075946b1b96956f2797e8
|
[
"MIT"
] | null | null | null |
# LeetCode imports
from typing import List
class Solution:
def findLUSlength(self, strs: List[str]) -> int:
n = len(strs)
strs.sort(key=len)
strs.reverse()
def isSubSeq(string, parent):
i, j = 0, 0
while i < len(string) and j < len(parent):
if string[i] == parent[j]:
i += 1
j += 1
# print(f'isSubSeq - string: {string}, parent: {parent}, {i == len(string)}')
return i == len(string)
for string_idx in range(n):
string = strs[string_idx]
is_subseq = False
for potential_parent_idx in range(n):
if string_idx == potential_parent_idx:
continue
potential_parent = strs[potential_parent_idx]
if len(potential_parent) < len(string):
break
if isSubSeq(string, potential_parent):
is_subseq = True
break
if not is_subseq:
return len(string)
return -1
| 30.5
| 89
| 0.443443
|
3733ced1a00ae45b2e08c01b962c2ae436fea55e
| 7,814
|
py
|
Python
|
openapi_spec_validator/validators.py
|
gcallaghan/openapi-spec-validator
|
3ab3411936faaee91246627f957ba6108cd47d44
|
[
"Apache-2.0"
] | null | null | null |
openapi_spec_validator/validators.py
|
gcallaghan/openapi-spec-validator
|
3ab3411936faaee91246627f957ba6108cd47d44
|
[
"Apache-2.0"
] | null | null | null |
openapi_spec_validator/validators.py
|
gcallaghan/openapi-spec-validator
|
3ab3411936faaee91246627f957ba6108cd47d44
|
[
"Apache-2.0"
] | null | null | null |
import logging
import string
from jsonschema.validators import RefResolver
from six import iteritems
from openapi_spec_validator.exceptions import (
ParameterDuplicateError, ExtraParametersError, UnresolvableParameterError,
)
from openapi_spec_validator.managers import ResolverManager
log = logging.getLogger(__name__)
def is_ref(spec):
return isinstance(spec, dict) and '$ref' in spec
class Dereferencer(object):
def __init__(self, spec_resolver):
self.resolver_manager = ResolverManager(spec_resolver)
def dereference(self, item):
log.debug("Dereferencing %s", item)
if item is None or not is_ref(item):
return item
ref = item['$ref']
with self.resolver_manager.in_scope(item) as resolver:
with resolver.resolving(ref) as target:
return target
class SpecValidator(object):
def __init__(self, validator_factory, resolver_handlers):
self.validator_factory = validator_factory
self.resolver_handlers = resolver_handlers
def validate(self, spec, spec_url=''):
for error in self.iter_errors(spec, spec_url=spec_url):
raise error
def iter_errors(self, spec, spec_url=''):
spec_resolver = self._get_resolver(spec_url, spec)
dereferencer = self._get_dereferencer(spec_resolver)
validator = self._get_validator(spec_resolver)
yield from validator.iter_errors(spec)
paths = spec.get('paths', {})
yield from self._iter_paths_errors(paths, dereferencer)
components = spec.get('components', {})
yield from self._iter_components_errors(components, dereferencer)
def _get_resolver(self, base_uri, referrer):
return RefResolver(
base_uri, referrer, handlers=self.resolver_handlers)
def _get_dereferencer(self, spec_resolver):
return Dereferencer(spec_resolver)
def _get_validator(self, spec_resolver):
return self.validator_factory.create(spec_resolver)
def _iter_paths_errors(self, paths, dereferencer):
return PathsValidator(dereferencer).iter_errors(paths)
def _iter_components_errors(self, components, dereferencer):
return ComponentsValidator(dereferencer).iter_errors(components)
class ComponentsValidator(object):
def __init__(self, dereferencer):
self.dereferencer = dereferencer
def iter_errors(self, components):
components_deref = self.dereferencer.dereference(components)
schemas = components_deref.get('schemas', {})
yield from self._iter_schemas_errors(schemas)
def _iter_schemas_errors(self, schemas):
return SchemasValidator(self.dereferencer).iter_errors(schemas)
class SchemasValidator(object):
def __init__(self, dereferencer):
self.dereferencer = dereferencer
def iter_errors(self, schemas):
schemas_deref = self.dereferencer.dereference(schemas)
for name, schema in iteritems(schemas_deref):
yield from self._iter_schem_errors(schema)
def _iter_schem_errors(self, schema):
return SchemaValidator(self.dereferencer).iter_errors(schema)
class SchemaValidator(object):
def __init__(self, dereferencer):
self.dereferencer = dereferencer
def iter_errors(self, schema):
schema_deref = self.dereferencer.dereference(schema)
if 'allOf' in schema_deref:
for inner_schema in schema_deref['allOf']:
yield from self.iter_errors(inner_schema)
required = schema_deref.get('required', [])
properties = schema_deref.get('properties', {}).keys()
extra_properties = list(set(required) - set(properties))
if extra_properties:
yield ExtraParametersError(
"Required list has not defined properties: {0}".format(
extra_properties
)
)
class PathsValidator(object):
def __init__(self, dereferencer):
self.dereferencer = dereferencer
def iter_errors(self, paths):
paths_deref = self.dereferencer.dereference(paths)
for url, path_item in iteritems(paths_deref):
yield from self._iter_path_errors(url, path_item)
def _iter_path_errors(self, url, path_item):
return PathValidator(self.dereferencer).iter_errors(url, path_item)
class PathValidator(object):
def __init__(self, dereferencer):
self.dereferencer = dereferencer
def iter_errors(self, url, path_item):
path_item_deref = self.dereferencer.dereference(path_item)
yield from self._iter_path_item_errors(url, path_item_deref)
def _iter_path_item_errors(self, url, path_item):
return PathItemValidator(self.dereferencer).iter_errors(url, path_item)
class PathItemValidator(object):
OPERATIONS = [
'get', 'put', 'post', 'delete', 'options', 'head', 'patch', 'trace',
]
def __init__(self, dereferencer):
self.dereferencer = dereferencer
def iter_errors(self, url, path_item):
path_item_deref = self.dereferencer.dereference(path_item)
parameters = path_item_deref.get('parameters', [])
yield from self._iter_parameters_errors(parameters)
for field_name, operation in iteritems(path_item):
if field_name not in self.OPERATIONS:
continue
yield from self._iter_operation_errors(
url, field_name, operation, parameters)
def _iter_operation_errors(self, url, name, operation, path_parameters):
return OperationValidator(self.dereferencer).iter_errors(
url, name, operation, path_parameters)
def _iter_parameters_errors(self, parameters):
return ParametersValidator(self.dereferencer).iter_errors(parameters)
class OperationValidator(object):
def __init__(self, dereferencer):
self.dereferencer = dereferencer
def iter_errors(self, url, name, operation, path_parameters=None):
path_parameters = path_parameters or []
operation_deref = self.dereferencer.dereference(operation)
parameters = operation_deref.get('parameters', [])
yield from self._iter_parameters_errors(parameters)
all_params = list(set(
list(self._get_path_param_names(path_parameters)) +
list(self._get_path_param_names(parameters))
))
for path in self._get_path_params_from_url(url):
if path not in all_params:
yield UnresolvableParameterError(
"Path parameter '{0}' for '{1}' operation in '{2}' "
"was not resolved".format(path, name, url)
)
return []
def _get_path_param_names(self, params):
for param in params:
param_deref = self.dereferencer.dereference(param)
if param_deref['in'] == 'path':
yield param_deref['name']
def _get_path_params_from_url(self, url):
formatter = string.Formatter()
path_params = [item[1] for item in formatter.parse(url)]
return filter(None, path_params)
def _iter_parameters_errors(self, parameters):
return ParametersValidator(self.dereferencer).iter_errors(parameters)
class ParametersValidator(object):
def __init__(self, dereferencer):
self.dereferencer = dereferencer
def iter_errors(self, parameters):
seen = set()
for parameter in parameters:
parameter_deref = self.dereferencer.dereference(parameter)
key = (parameter_deref['name'], parameter_deref['in'])
if key in seen:
yield ParameterDuplicateError(
"Duplicate parameter `{0}`".format(parameter_deref['name'])
)
seen.add(key)
| 32.423237
| 79
| 0.67891
|
100b4eff6789424351f91453e5bc1cf4269b60bd
| 3,562
|
py
|
Python
|
bindings/python/ensmallen/datasets/string/ralstoniaeutrophajmp134.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/ralstoniaeutrophajmp134.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/ralstoniaeutrophajmp134.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Ralstonia eutropha JMP134.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def RalstoniaEutrophaJmp134(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Ralstonia eutropha JMP134 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Ralstonia eutropha JMP134 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="RalstoniaEutrophaJmp134",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 32.981481
| 223
| 0.67799
|
645c1d0d08a8951d121ad71750179ecdef0a53ce
| 1,568
|
py
|
Python
|
backend/env/lib/python3.8/site-packages/gevent/testing/hub.py
|
lubitelpospat/CFM-source
|
4e6af33ee68c6f2f05b6952b64a6b3f0591d5b03
|
[
"MIT"
] | 10
|
2021-03-23T03:46:19.000Z
|
2022-03-08T07:20:25.000Z
|
backend/env/lib/python3.8/site-packages/gevent/testing/hub.py
|
lubitelpospat/CFM-source
|
4e6af33ee68c6f2f05b6952b64a6b3f0591d5b03
|
[
"MIT"
] | 7
|
2021-05-21T16:51:48.000Z
|
2022-03-12T00:50:26.000Z
|
backend/env/lib/python3.8/site-packages/gevent/testing/hub.py
|
lubitelpospat/CFM-source
|
4e6af33ee68c6f2f05b6952b64a6b3f0591d5b03
|
[
"MIT"
] | 4
|
2021-04-21T00:49:34.000Z
|
2021-11-21T09:18:29.000Z
|
# Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, print_function, division
from gevent.hub import Hub
from .exception import ExpectedException
class QuietHub(Hub):
EXPECTED_TEST_ERROR = (ExpectedException,)
def handle_error(self, context, type, value, tb):
if issubclass(type, self.EXPECTED_TEST_ERROR):
# Don't print these to cut down on the noise in the test logs
return
return Hub.handle_error(self, context, type, value, tb)
| 43.555556
| 79
| 0.760842
|
0aa1ab7b4279c6c9ca678196684aac82b267c34c
| 1,240
|
py
|
Python
|
code/cfgs/TSUNAMIconfig.py
|
kensakurada/SceneChangeDet
|
0530e0162863fec0c5296188526f0d27e0109814
|
[
"MIT"
] | null | null | null |
code/cfgs/TSUNAMIconfig.py
|
kensakurada/SceneChangeDet
|
0530e0162863fec0c5296188526f0d27e0109814
|
[
"MIT"
] | null | null | null |
code/cfgs/TSUNAMIconfig.py
|
kensakurada/SceneChangeDet
|
0530e0162863fec0c5296188526f0d27e0109814
|
[
"MIT"
] | null | null | null |
import os
BASE_PATH = '/home/sakurada/work/src/SceneChangeDet/code'
PRETRAIN_MODEL_PATH = os.path.join(BASE_PATH,'pretrain')
DATA_PATH = '/projects/g-nedo-geospatial/work/sakurada/work/data/pcd/pcd_2018_0906_r4s4/set0'
TRAIN_DATA_PATH = os.path.join(DATA_PATH+'/train')
TRAIN_LABEL_PATH = os.path.join(DATA_PATH+'/train')
TRAIN_TXT_PATH = os.path.join(DATA_PATH,'train.txt')
VAL_DATA_PATH = os.path.join(DATA_PATH+'/test')
VAL_LABEL_PATH = os.path.join(DATA_PATH+'/test')
VAL_TXT_PATH = os.path.join(DATA_PATH,'test.txt')
SAVE_PATH = '/projects/g-nedo-geospatial/work/sakurada/work/data/pcd_precut/SceneChangeDet/checkpoint/set0'
SAVE_CKPT_PATH = os.path.join(SAVE_PATH,'ckpt_final')
if not os.path.exists(SAVE_CKPT_PATH):
os.makedirs(SAVE_CKPT_PATH)
SAVE_PRED_PATH = os.path.join(SAVE_PATH,'prediction_final')
if not os.path.exists(SAVE_PRED_PATH):
os.makedirs(SAVE_PRED_PATH)
TRAINED_BEST_PERFORMANCE_CKPT = os.path.join(SAVE_CKPT_PATH,'model_best.pth')
INIT_LEARNING_RATE = 1e-7
DECAY = 5e-5
MOMENTUM = 0.90
MAX_ITER = 40000
BATCH_SIZE = 1
THRESHS = [0.1,0.3,0.5]
THRESH = 0.1
LOSS_PARAM_CONV = 1
LOSS_PARAM_FC = 1
TRANSFROM_SCALES= (1024,224)
T0_MEAN_VALUE = (128.793,108.267,98.685)
T1_MEAN_VALUE = (166.814,136.916,122.396)
| 37.575758
| 107
| 0.778226
|
cbdc20894eb541af242719db73af364ed38396f7
| 923
|
py
|
Python
|
AppPkg/Applications/Python/Python-2.7.2/Tools/compiler/demo.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 2,757
|
2018-04-28T21:41:36.000Z
|
2022-03-29T06:33:36.000Z
|
AppPkg/Applications/Python/Python-2.7.2/Tools/compiler/demo.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 20
|
2019-07-23T15:29:32.000Z
|
2022-01-21T12:53:04.000Z
|
AppPkg/Applications/Python/Python-2.7.2/Tools/compiler/demo.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 449
|
2018-05-09T05:54:05.000Z
|
2022-03-30T14:54:18.000Z
|
#! /usr/bin/env python
"""Print names of all methods defined in module
This script demonstrates use of the visitor interface of the compiler
package.
"""
import compiler
class MethodFinder:
"""Print the names of all the methods
Each visit method takes two arguments, the node and its current
scope. The scope is the name of the current class or None.
"""
def visitClass(self, node, scope=None):
self.visit(node.code, node.name)
def visitFunction(self, node, scope=None):
if scope is not None:
print "%s.%s" % (scope, node.name)
self.visit(node.code, None)
def main(files):
mf = MethodFinder()
for file in files:
f = open(file)
buf = f.read()
f.close()
ast = compiler.parse(buf)
compiler.walk(ast, mf)
if __name__ == "__main__":
import sys
main(sys.argv[1:])
| 23.666667
| 70
| 0.603467
|
0f7c15fc9ea65031ebad1c7efad966887a1be49e
| 4,063
|
py
|
Python
|
venv/lib/python2.7/site-packages/tensorflow/contrib/seq2seq/ops/gen_beam_search_ops.py
|
nainkunal933/subspace-clustering
|
17bae7cb632a9ca786f03dcd002848ca0a4058d1
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/site-packages/tensorflow/contrib/seq2seq/ops/gen_beam_search_ops.py
|
nainkunal933/subspace-clustering
|
17bae7cb632a9ca786f03dcd002848ca0a4058d1
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/site-packages/tensorflow/contrib/seq2seq/ops/gen_beam_search_ops.py
|
nainkunal933/subspace-clustering
|
17bae7cb632a9ca786f03dcd002848ca0a4058d1
|
[
"MIT"
] | 1
|
2022-03-20T08:11:49.000Z
|
2022-03-20T08:11:49.000Z
|
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: beam_search_ops.cc
"""
import collections as _collections
from tensorflow.python.eager import execute as _execute
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
@tf_export('GatherTree')
def gather_tree(step_ids, parent_ids, max_sequence_lengths, end_token, name=None):
r"""Calculates the full beams from the per-step ids and parent beam ids.
On CPU, if an out of bound parent id is found, an error is returned.
On GPU, if an out of bound parent id is found, a -1 is stored in the
corresponding output value and the execution for that beam returns early.
For a given beam, past the time step containing the first decoded `end_token`
all values are filled in with `end_token`.
TODO(ebrevdo): fill in the remainder of this docstring.
Args:
step_ids: A `Tensor`. Must be one of the following types: `int32`.
`[max_time, batch_size, beam_width]`.
parent_ids: A `Tensor`. Must have the same type as `step_ids`.
`[max_time, batch_size, beam_width]`.
max_sequence_lengths: A `Tensor` of type `int32`. `[batch_size]`.
end_token: A `Tensor`. Must have the same type as `step_ids`. `[]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `step_ids`.
`[max_time, batch_size, beam_width]`.
"""
_ctx = _context.context()
if _ctx.in_graph_mode():
_, _, _op = _op_def_lib._apply_op_helper(
"GatherTree", step_ids=step_ids, parent_ids=parent_ids,
max_sequence_lengths=max_sequence_lengths, end_token=end_token,
name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"))
else:
_attr_T, _inputs_T = _execute.args_to_matching_eager([step_ids, parent_ids, end_token], _ctx)
(step_ids, parent_ids, end_token) = _inputs_T
max_sequence_lengths = _ops.convert_to_tensor(max_sequence_lengths, _dtypes.int32)
_inputs_flat = [step_ids, parent_ids, max_sequence_lengths, end_token]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"GatherTree", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"GatherTree", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_ops.RegisterShape("GatherTree")(None)
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "GatherTree"
# input_arg {
# name: "step_ids"
# type_attr: "T"
# }
# input_arg {
# name: "parent_ids"
# type_attr: "T"
# }
# input_arg {
# name: "max_sequence_lengths"
# type: DT_INT32
# }
# input_arg {
# name: "end_token"
# type_attr: "T"
# }
# output_arg {
# name: "beams"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_INT32
# }
# }
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\nt\n\nGatherTree\022\r\n\010step_ids\"\001T\022\017\n\nparent_ids\"\001T\022\030\n\024max_sequence_lengths\030\003\022\016\n\tend_token\"\001T\032\n\n\005beams\"\001T\"\020\n\001T\022\004type:\005\n\0032\001\003")
| 35.640351
| 248
| 0.71745
|
1e85d01327935714e4a049c0001f26e6ba4f90ec
| 6,325
|
py
|
Python
|
python/http_client/v1/polyaxon_sdk/models/v1_list_agents_response.py
|
mouradmourafiq/polyaxon-client
|
5fc32b9decc7305161561d404b0127f3e900c64a
|
[
"Apache-2.0"
] | null | null | null |
python/http_client/v1/polyaxon_sdk/models/v1_list_agents_response.py
|
mouradmourafiq/polyaxon-client
|
5fc32b9decc7305161561d404b0127f3e900c64a
|
[
"Apache-2.0"
] | null | null | null |
python/http_client/v1/polyaxon_sdk/models/v1_list_agents_response.py
|
mouradmourafiq/polyaxon-client
|
5fc32b9decc7305161561d404b0127f3e900c64a
|
[
"Apache-2.0"
] | 1
|
2021-12-03T07:12:03.000Z
|
2021-12-03T07:12:03.000Z
|
#!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.18.2
Contact: contact@polyaxon.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1ListAgentsResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'count': 'int',
'results': 'list[V1Agent]',
'previous': 'str',
'next': 'str'
}
attribute_map = {
'count': 'count',
'results': 'results',
'previous': 'previous',
'next': 'next'
}
def __init__(self, count=None, results=None, previous=None, next=None, local_vars_configuration=None): # noqa: E501
"""V1ListAgentsResponse - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._count = None
self._results = None
self._previous = None
self._next = None
self.discriminator = None
if count is not None:
self.count = count
if results is not None:
self.results = results
if previous is not None:
self.previous = previous
if next is not None:
self.next = next
@property
def count(self):
"""Gets the count of this V1ListAgentsResponse. # noqa: E501
:return: The count of this V1ListAgentsResponse. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this V1ListAgentsResponse.
:param count: The count of this V1ListAgentsResponse. # noqa: E501
:type count: int
"""
self._count = count
@property
def results(self):
"""Gets the results of this V1ListAgentsResponse. # noqa: E501
:return: The results of this V1ListAgentsResponse. # noqa: E501
:rtype: list[V1Agent]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this V1ListAgentsResponse.
:param results: The results of this V1ListAgentsResponse. # noqa: E501
:type results: list[V1Agent]
"""
self._results = results
@property
def previous(self):
"""Gets the previous of this V1ListAgentsResponse. # noqa: E501
:return: The previous of this V1ListAgentsResponse. # noqa: E501
:rtype: str
"""
return self._previous
@previous.setter
def previous(self, previous):
"""Sets the previous of this V1ListAgentsResponse.
:param previous: The previous of this V1ListAgentsResponse. # noqa: E501
:type previous: str
"""
self._previous = previous
@property
def next(self):
"""Gets the next of this V1ListAgentsResponse. # noqa: E501
:return: The next of this V1ListAgentsResponse. # noqa: E501
:rtype: str
"""
return self._next
@next.setter
def next(self, next):
"""Sets the next of this V1ListAgentsResponse.
:param next: The next of this V1ListAgentsResponse. # noqa: E501
:type next: str
"""
self._next = next
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ListAgentsResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ListAgentsResponse):
return True
return self.to_dict() != other.to_dict()
| 27.863436
| 120
| 0.594941
|
1cd53a7160479fd6d4a68eca68eca4dd0b9f0b52
| 715
|
py
|
Python
|
glue/core/qt/tests/test_style_dialog.py
|
HPLegion/glue
|
1843787ccb4de852dfe103ff58473da13faccf5f
|
[
"BSD-3-Clause"
] | 550
|
2015-01-08T13:51:06.000Z
|
2022-03-31T11:54:47.000Z
|
glue/core/qt/tests/test_style_dialog.py
|
HPLegion/glue
|
1843787ccb4de852dfe103ff58473da13faccf5f
|
[
"BSD-3-Clause"
] | 1,362
|
2015-01-03T19:15:52.000Z
|
2022-03-30T13:23:11.000Z
|
glue/core/qt/tests/test_style_dialog.py
|
HPLegion/glue
|
1843787ccb4de852dfe103ff58473da13faccf5f
|
[
"BSD-3-Clause"
] | 142
|
2015-01-08T13:08:00.000Z
|
2022-03-18T13:25:57.000Z
|
import time
from qtpy import QtCore
from glue.core import Data
from glue.core.tests.util import simple_session
from ..style_dialog import StyleDialog
class NonBlockingStyleDialog(StyleDialog):
def exec_(self, *args):
self.show()
time.sleep(0.1)
self.accept()
def test_style_dialog():
# This is in part a regression test for a bug in Python 3. It is not a
# full test of StyleDialog.
session = simple_session()
hub = session.hub
collect = session.data_collection
image = Data(label='im',
x=[[1, 2], [3, 4]],
y=[[2, 3], [4, 5]])
pos = QtCore.QPoint(10, 10)
st = NonBlockingStyleDialog.dropdown_editor(image, pos)
| 22.34375
| 74
| 0.637762
|
159dcc6c35b4c0ec9b1e9d31057da94fde1ce933
| 6,017
|
py
|
Python
|
src/insulaudit/devices/onetouch/proto.py
|
bewest/insulaudit
|
2c0aa04a596775517a1e651723796dc19ea99ea7
|
[
"MIT"
] | 22
|
2015-03-10T20:50:23.000Z
|
2020-11-28T13:23:54.000Z
|
src/insulaudit/devices/onetouch/proto.py
|
bewest/insulaudit
|
2c0aa04a596775517a1e651723796dc19ea99ea7
|
[
"MIT"
] | 2
|
2016-03-13T12:56:34.000Z
|
2018-11-17T18:11:43.000Z
|
src/insulaudit/devices/onetouch/proto.py
|
bewest/insulaudit
|
2c0aa04a596775517a1e651723796dc19ea99ea7
|
[
"MIT"
] | 10
|
2015-06-14T21:30:59.000Z
|
2018-09-13T19:01:43.000Z
|
from insulaudit.log import io, logger as log
from insulaudit import lib, core
from insulaudit.data import glucose
import time
HEADER = [ 0x11, 0x0D ]
STX = 0x02
ETX = 0x03
TIMEOUT = 0.5
RETRIES = 3
def ls_long( B ):
B.reverse( )
return lib.BangLong( B )
def ls_int( B ):
B.reverse( )
return lib.BangInt( B )
def format_glucose( data ):
"""
>>> date, value = format_glucose( '''P "WED","11/10/10","01:46:00 '''
... + '''"," 076 ","N","00", 00 099C''' )
>>> date.isoformat( )
'2010-11-10T01:46:00'
>>> value
76
"""
try:
date = lib.parse.date( 'T'.join(
data.replace( '"', '' ).split( ',' )[ 1:3 ]) )
value = int( data.split( '"' )[ 7 ].strip( ) )
except (IndexError, ValueError), e:
log.info( data )
raise InvalidGlucose( data )
return date, value
class OneTouch2Exception(core.CarelinkException): pass
class InvalidResponse(OneTouch2Exception): pass
class InvalidGlucose(InvalidResponse): pass
class OneTouchCommand( core.Command ):
code = HEADER
response = ''
def __call__( self, port ):
self.response = port.readline( ).strip( )
return self.response
def decode( self, msg ):
return str( msg )
def isEmpty( self, response ):
return response == ''
class ReadSerial( OneTouchCommand ):
code = list( bytearray( b'DM@' ) )
class ReadFirmware( OneTouchCommand ):
code = list( bytearray( b'DM?' ) )
class ReadRFID( OneTouchCommand ):
code = list( bytearray( b'DMID' ) )
class UltraSmartWakeUp1( OneTouchCommand ):
code = bytearray( [ 0xB0, 0x04, 0x00, 0x00, 0x00, 0x00, 0x07 ] )
class UltraSmartWakeUp2( OneTouchCommand ):
code = bytearray( [ 0x80, 0x25, 0x00, 0x00, 0x00, 0x00, 0x07,
0x80, 0x25, 0x00, 0x00, 0x00, 0x00, 0x08 ] )
def __call__(self, port ):
return True
class UltraSmartWakeUpStage1( OneTouchCommand ):
#code = bytearray( [ 0x00, 0x96, 0x00, 0x00, 0x00, 0x00, 0x08 ] )
code = bytearray( [ 0x11, 0x0D, ] )
def __call__(self, port ):
stuff = port.write("")
#time.sleep(5)
stuff = port.readlines( )
io.info( "RECIEVED HANDSHAKE REPLY: %s bytes" % len(stuff) )
io.info(lib.hexdump(bytearray( stuff )))
if len(stuff) > 0:
return True
return False
class UltraSmartWakeUpStage2( OneTouchCommand ):
code = bytearray( [ 0x80, 0x25, 0x00, 0x00, 0x00, 0x00, 0x08,
0x80, 0x25, 0x00, 0x00, 0x00, 0x00, 0x08,
0x11, 0x11, 0x0D, 0x0D, 0x44, 0x44, 0x4D,
0x4D, 0x53, 0x53, 0x0D, 0x0D, 0x0D, 0x0D,
0x11, 0x11, 0x0D, 0x0D, 0x44, 0x44, 0x4D,
0x4D, 0x53, 0x53, 0x0D, 0x0D, 0x0D, 0x0D,
0x00, 0x96, 0x00, 0x00, 0x00, 0x00, 0x08,
0x00, 0x96, 0x00, 0x00, 0x00, 0x00, 0x08,
0x11, 0x11, 0x0D, 0x0D, 0x44, 0x44, 0x4D,
0x4D, 0x53, 0x53, 0x0D, 0x0D, 0x0D, 0x0D,
0x11, 0x11, 0x0D, 0x0D, 0x44, 0x44, 0x4D,
0x4D, 0x40, 0x40, 0x0D, 0x0D ] )
class ReadGlucose( OneTouchCommand ):
code = list( bytearray( b'DMP' ) )
def __call__( self, port ):
head = port.readline( ).strip( )
body = [ ]
for line in port.readlines( ):
try:
body.append( format_glucose( line ) )
except InvalidGlucose, e: pass
io.debug ( 'read glucose:head:%s:body.len:%s' % ( head, len(body) ) )
self.response = ( head, glucose.l2np( body ) )
return self.response
def decode( self, msg ):
return msg
def isEmpty( self, *args ):
return self.response[0] == ''
class OneTouchUltra2( core.CommBuffer ):
__timeout__ = 20
__pause__ = 02
def stage1_wakeup(self):
io.info("wakeup: stage 1")
command = UltraSmartWakeUpStage1( )
msg = bytearray( command.code )
for x in xrange( RETRIES ):
self.write( str( msg ) )
#self.write( str( msg ) )
time.sleep( self.__pause__ )
response = command( self )
if response:
break
io.info( 'get response:%s' % ( response ) )
if not response:
raise OneTouch2Exception("NOT A GOOD START")
def stage2_wakeup(self):
stage2a = [ 0x80, 0x25, 0x00, 0x00, 0x00, 0x00, 0x07 ]
stage2b = [ 0x80, 0x25, 0x00, 0x00, 0x00, 0x00, 0x08 ]
stage2c = [ 0x11, 0x0D, 0x44, 0x4D, 0x53, 0x0D, 0x0D ]
stage2d = [ 0x11, 0x0D, 0x44, 0x4D, 0x53, 0x0D, 0x0D ]
stage2e = [ 0x00, 0x96, 0x00, 0x00, 0x00, 0x00, 0x08 ]
stage2f = [ 0x11, 0x0D, 0x44, 0x4D, 0x53, 0x0D ]
stages = [ stage2a, stage2b, stage2c, stage2d, stage2e, stage2f, ]
awake = False
for stage in stages:
msg = bytearray(stage)
self.write( str( msg ) )
response = self.readlines( )
if len(response) > 0:
io.info("got a response!!!")
io.info(lib.hexdump(bytearray(response)))
awake = True
return awake
def wakeup_smart( self ):
io.info("begin wakeup")
self.stage1_wakeup( )
self.stage2_wakeup( )
#stage2 = UltraSmartWakeUpStage2( )
#self.write( str( stage2.code ) )
#response_2 = stage2( self )
#self.write( str( wake1.code ) )
time.sleep( self.__pause__ )
def read_glucose( self ):
header, body = self.execute( ReadGlucose( ) )
return header, body
def wrap( self, data ):
frame = HEADER + data + [ 0x0D ]
return bytearray( frame )
def execute( self, command ):
"""
"""
msg = self.wrap( command.code )
# empty meter's buffer before writing anything
self.readlines( )
for x in xrange( RETRIES ):
self.write( str( msg ) )
self.write( str( msg ) )
time.sleep( self.__pause__ )
io.info( 'dm read:%s' % x );
response = command( self )
if not command.isEmpty( response ):
break
io.info( 'get response:%r' % ( repr( response ) ) )
return command.decode( response )
class Link(OneTouchUltra2):
pass
if __name__ == '__main__':
import doctest
doctest.testmod( )
#####
# EOF
| 29.067633
| 76
| 0.590161
|
3c19fe5723da55805c9a14d9ca66ab3a99aa5421
| 1,728
|
py
|
Python
|
setup.py
|
kottilukkalarun/CosineSimilary
|
450c3bae10495109c3b3e494a0e91f46adfb623f
|
[
"MIT"
] | null | null | null |
setup.py
|
kottilukkalarun/CosineSimilary
|
450c3bae10495109c3b3e494a0e91f46adfb623f
|
[
"MIT"
] | null | null | null |
setup.py
|
kottilukkalarun/CosineSimilary
|
450c3bae10495109c3b3e494a0e91f46adfb623f
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
setup(
name = 'CosineSimilarityFinder', # How you named your package folder (MyLib)
packages = ['SimilarityFinder'], # Chose the same as "name"
version = '0.4', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description = 'FINDS COSINE SIMILARITY BETWEEN TWO TEXTS', # Give a short description about your library
author = 'ARUN KESAVAN KOTTILUKKAL', # Type in your name
author_email = 'arunkottilukkal@outlook.in', # Type in your E-Mail
url = 'https://github.com/kottilukkalarun/CosineSimilary', # Provide either the link to your github or to your website
download_url = 'https://github.com/kottilukkalarun/CosineSimilary/archive/v_04.tar.gz', # I explain this later on
keywords = ['COSINE SIMILARITY', 'TEXT SIMILARITY', 'STRING SIMILARITY'], # Keywords that define your package best
install_requires=[
'nltk',
'pandas',
'requests',
'scikit-learn',
],
classifiers=[
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3', #Specify which pyhton versions that you want to support
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| 59.586207
| 146
| 0.674769
|
ac259ef12c963775ca6584eab63bba8a58af5196
| 2,454
|
py
|
Python
|
test/test_content_view.py
|
gustavs408650/looker_sdk_30
|
8b52449f216b2cb3b84f09e2856bcea1ed4a2b0c
|
[
"MIT"
] | null | null | null |
test/test_content_view.py
|
gustavs408650/looker_sdk_30
|
8b52449f216b2cb3b84f09e2856bcea1ed4a2b0c
|
[
"MIT"
] | null | null | null |
test/test_content_view.py
|
gustavs408650/looker_sdk_30
|
8b52449f216b2cb3b84f09e2856bcea1ed4a2b0c
|
[
"MIT"
] | 1
|
2019-11-12T10:05:51.000Z
|
2019-11-12T10:05:51.000Z
|
# coding: utf-8
"""
Looker API 3.0 Reference
### Authorization The Looker API uses Looker **API3** credentials for authorization and access control. Looker admins can create API3 credentials on Looker's **Admin/Users** page. Pass API3 credentials to the **/login** endpoint to obtain a temporary access_token. Include that access_token in the Authorization header of Looker API requests. For details, see [Looker API Authorization](https://looker.com/docs/r/api/authorization) ### Client SDKs The Looker API is a RESTful system that should be usable by any programming language capable of making HTTPS requests. Client SDKs for a variety of programming languages can be generated from the Looker API's Swagger JSON metadata to streamline use of the Looker API in your applications. A client SDK for Ruby is available as an example. For more information, see [Looker API Client SDKs](https://looker.com/docs/r/api/client_sdks) ### Try It Out! The 'api-docs' page served by the Looker instance includes 'Try It Out!' buttons for each API method. After logging in with API3 credentials, you can use the \"Try It Out!\" buttons to call the API directly from the documentation page to interactively explore API features and responses. ### Versioning Future releases of Looker will expand this API release-by-release to securely expose more and more of the core power of Looker to API client applications. API endpoints marked as \"beta\" may receive breaking changes without warning. Stable (non-beta) API endpoints should not receive breaking changes in future releases. For more information, see [Looker API Versioning](https://looker.com/docs/r/api/versioning) # noqa: E501
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import looker_client_30
from looker_client_30.looker_sdk.content_view import ContentView # noqa: E501
from looker_client_30.rest import ApiException
class TestContentView(unittest.TestCase):
"""ContentView unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testContentView(self):
"""Test ContentView"""
# FIXME: construct object with mandatory attributes with example values
# model = looker_client_30.models.content_view.ContentView() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 59.853659
| 1,639
| 0.757131
|
394f64845380a60bd70d44950f60d76bba837896
| 66
|
py
|
Python
|
test/test_preprocess.py
|
foldfelis/sqstate
|
a6a14a48d6ac7e41c55e2d62de3072074ba86ece
|
[
"MIT"
] | 1
|
2021-07-14T08:23:54.000Z
|
2021-07-14T08:23:54.000Z
|
test/test_preprocess.py
|
akjmo97/sqstate
|
a6a14a48d6ac7e41c55e2d62de3072074ba86ece
|
[
"MIT"
] | 10
|
2021-01-16T17:09:57.000Z
|
2021-01-29T16:03:45.000Z
|
test/test_preprocess.py
|
akjmo97/sqstate
|
a6a14a48d6ac7e41c55e2d62de3072074ba86ece
|
[
"MIT"
] | 1
|
2021-07-02T07:15:21.000Z
|
2021-07-02T07:15:21.000Z
|
import sqstate.preprocess as pre
class TestPreprocess:
pass
| 11
| 32
| 0.772727
|
0e465ee0ed62186a0ca2ed3ae3f5b3fb8f47d102
| 2,298
|
py
|
Python
|
mud/piece_filter.py
|
akortman/mud
|
3b0fcff7b57a7fdc2f1c06d076cb0d6ed8eb5881
|
[
"MIT"
] | null | null | null |
mud/piece_filter.py
|
akortman/mud
|
3b0fcff7b57a7fdc2f1c06d076cb0d6ed8eb5881
|
[
"MIT"
] | null | null | null |
mud/piece_filter.py
|
akortman/mud
|
3b0fcff7b57a7fdc2f1c06d076cb0d6ed8eb5881
|
[
"MIT"
] | null | null | null |
'''
Usually, you want to restrict a dataset to a particular kind of Piece,
e.g. 4/4 time.
This module provides implementations of some predicate functions to use
as filters.
'''
def failure_reason(filter):
if isinstance(filter, PieceFilter):
return filter.why()
return f"Failed on testing {filter.type}: filter"
class PieceFilter(object):
'''
Generic filter object.
'''
def __init__(self):
raise NotImplementedError
def test(self, piece):
raise NotImplementedError
def __call__(self, piece):
return self.test(piece)
def why(self):
return f"Unspecified rejection criteria ({type(self)})"
class AtomicSlicable(PieceFilter):
'''
Tests whether all events in a piece are contained in atomic slices
when sliced to a given resolution.
'''
def __init__(self, slice_resolution):
self._slice_resolution = slice_resolution
def test(self, piece):
for bar in piece.bars():
for ts in bar.generate_slices(self._slice_resolution):
if not ts.is_atomic_slice():
return False
return True
def why(self):
return f"Not atomic slicable with resolution {self._slice_resolution}"
class NotesWithinRange(PieceFilter):
'''
Tests whether all notes are within a given note range.
'''
def __init__(self, octave_range):
raise NotImplementedError
def test(self, piece):
raise NotImplementedError
def why(self):
return f"Notes outside of range"
class BarLengthIs(PieceFilter):
'''
Tests whether the pieces are in the given time signatures.
'''
def __init__(self, bar_length):
self._bar_length = bar_length
def test(self, piece):
for bar in piece.bars():
if bar.length().in_beats() != self._bar_length:
return False
return True
def why(self):
return f"Bar length not {self._bar_length}"
class IsMonophonic(PieceFilter):
'''
Tests whether the piece is monophonic (no simultaneous pitches).
'''
def __init__(self):
pass
def test(self, piece):
return piece.is_monophonic()
def why(self):
return f"Piece is not monophonic"
| 26.413793
| 78
| 0.63577
|
2b443cc5355d5d17448d3afc8bce7c9469a1db5c
| 1,609
|
py
|
Python
|
cloudkittyclient/v2/client.py
|
NeCTAR-RC/python-cloudkittyclient
|
20003a58cec701dd39945246955d5434681e5184
|
[
"Apache-2.0"
] | 19
|
2015-10-18T02:56:16.000Z
|
2019-07-19T16:33:08.000Z
|
cloudkittyclient/v2/client.py
|
NeCTAR-RC/python-cloudkittyclient
|
20003a58cec701dd39945246955d5434681e5184
|
[
"Apache-2.0"
] | 1
|
2018-12-17T13:11:24.000Z
|
2018-12-17T13:11:24.000Z
|
cloudkittyclient/v2/client.py
|
NeCTAR-RC/python-cloudkittyclient
|
20003a58cec701dd39945246955d5434681e5184
|
[
"Apache-2.0"
] | 11
|
2016-02-02T02:32:58.000Z
|
2022-02-11T02:22:56.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2018 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from cloudkittyclient.v1 import client
from cloudkittyclient.v2 import dataframes
from cloudkittyclient.v2.rating import modules
from cloudkittyclient.v2 import scope
from cloudkittyclient.v2 import summary
# NOTE(peschk_l) v2 client needs to implement v1 until the v1 API has been
# completely ported to v2
class Client(client.Client):
def __init__(self,
session=None,
adapter_options={},
cacert=None,
insecure=False,
**kwargs):
super(Client, self).__init__(
session=session,
adapter_options=adapter_options,
cacert=cacert,
insecure=insecure,
**kwargs
)
self.dataframes = dataframes.DataframesManager(self.api_client)
self.scope = scope.ScopeManager(self.api_client)
self.summary = summary.SummaryManager(self.api_client)
self.rating = modules.RatingManager(self.api_client)
| 35.755556
| 78
| 0.677439
|
993af1509f9127592b735a7412939002c28e2d30
| 1,880
|
py
|
Python
|
tests/contract_tests/KT1TnwBxgK4ayHuxrti6KKkJpWBHXBYRCX6H/test_tnwbxg_updateBaker.py
|
konchunas/pytezos
|
65576d18bdf1956fae8ea21241b6c43a38921b83
|
[
"MIT"
] | 98
|
2019-02-07T16:33:38.000Z
|
2022-03-31T15:53:41.000Z
|
tests/contract_tests/KT1TnwBxgK4ayHuxrti6KKkJpWBHXBYRCX6H/test_tnwbxg_updateBaker.py
|
konchunas/pytezos
|
65576d18bdf1956fae8ea21241b6c43a38921b83
|
[
"MIT"
] | 152
|
2019-05-20T16:38:56.000Z
|
2022-03-30T14:24:38.000Z
|
tests/contract_tests/KT1TnwBxgK4ayHuxrti6KKkJpWBHXBYRCX6H/test_tnwbxg_updateBaker.py
|
konchunas/pytezos
|
65576d18bdf1956fae8ea21241b6c43a38921b83
|
[
"MIT"
] | 34
|
2019-07-25T12:03:51.000Z
|
2021-11-11T22:23:38.000Z
|
from unittest import TestCase
from os.path import dirname, join
import json
from pytezos.michelson.program import MichelsonProgram
from pytezos.michelson.types.big_map import big_map_diff_to_lazy_diff
from pytezos.michelson.forge import forge_micheline, unforge_micheline
folder = 'dexter_usdtz_xtz'
entrypoint = 'removeLiquidity'
class MainnetOperationTestCaseTNWBXG(TestCase):
@classmethod
def setUpClass(cls):
with open(join(dirname(__file__), f'', '__script__.json')) as f:
script = json.loads(f.read())
cls.program = MichelsonProgram.match(script['code'])
with open(join(dirname(__file__), f'', f'updateBaker.json')) as f:
operation = json.loads(f.read())
cls.entrypoint = f'updateBaker'
cls.operation = operation
# cls.maxDiff = None
def test_parameters_tnwbxg(self):
original_params = self.program.parameter.from_parameters(self.operation['parameters'])
py_obj = original_params.to_python_object()
# pprint(py_obj)
readable_params = self.program.parameter.from_parameters(original_params.to_parameters(mode='readable'))
self.assertEqual(py_obj, readable_params.to_python_object())
self.program.parameter.from_python_object(py_obj)
def test_lazy_storage_tnwbxg(self):
storage = self.program.storage.from_micheline_value(self.operation['storage'])
lazy_diff = big_map_diff_to_lazy_diff(self.operation['big_map_diff'])
extended_storage = storage.merge_lazy_diff(lazy_diff)
py_obj = extended_storage.to_python_object(try_unpack=True, lazy_diff=True)
# pprint(py_obj)
def test_parameters_forging(self):
expected = self.operation['parameters'].get('value', {'prim': 'Unit'})
actual = unforge_micheline(forge_micheline(expected))
self.assertEqual(expected, actual)
| 39.166667
| 112
| 0.722872
|
7d51210d235aede3eeab7d1c30bfeef18e2115e4
| 1,126
|
py
|
Python
|
rasa_exp/nlu/tokenizers/simple_tokenizer.py
|
shfshf/rasa_exp
|
dd6db46c14c36f0ffe9602551836af43cebcfead
|
[
"Apache-2.0"
] | null | null | null |
rasa_exp/nlu/tokenizers/simple_tokenizer.py
|
shfshf/rasa_exp
|
dd6db46c14c36f0ffe9602551836af43cebcfead
|
[
"Apache-2.0"
] | null | null | null |
rasa_exp/nlu/tokenizers/simple_tokenizer.py
|
shfshf/rasa_exp
|
dd6db46c14c36f0ffe9602551836af43cebcfead
|
[
"Apache-2.0"
] | null | null | null |
import logging
import typing
from typing import Any, Dict, Optional, Text, List
from rasa.nlu.tokenizers.tokenizer import Token, Tokenizer
from rasa.nlu.training_data import Message, TrainingData
logger = logging.getLogger(__name__)
if typing.TYPE_CHECKING:
from rasa.nlu.model import Metadata
class OneCharTokenizer(Tokenizer):
"""自定义中文分词组件"""
# provides = ["tokens"]
language_list = ["zh"]
defaults = {
"custom_dict": None,
# Flag to check whether to split intents
"intent_tokenization_flag": False,
# Symbol on which intent should be split
"intent_split_symbol": "_",
} # default don't load custom dictionary
def __init__(self, component_config: Dict[Text, Any] = None) -> None:
super().__init__(component_config)
def tokenize(self, message: Message, attribute: Text) -> List[Token]:
text = message.get(attribute)
tokenized = [i for i in text]
tokens = []
offset = 0
for word in tokenized:
tokens.append(Token(word, offset))
offset += len(word)
return tokens
| 26.809524
| 73
| 0.652753
|
3060b102169b208a856b773ebd3cb7841408f650
| 4,461
|
py
|
Python
|
graphql/validation/tests/test_no_unused_variables.py
|
phil303/graphql-core
|
f983e19cc8cd818275621e2909fb485cf227fe0c
|
[
"MIT"
] | 1
|
2021-01-13T15:06:53.000Z
|
2021-01-13T15:06:53.000Z
|
graphql/validation/tests/test_no_unused_variables.py
|
phil303/graphql-core
|
f983e19cc8cd818275621e2909fb485cf227fe0c
|
[
"MIT"
] | null | null | null |
graphql/validation/tests/test_no_unused_variables.py
|
phil303/graphql-core
|
f983e19cc8cd818275621e2909fb485cf227fe0c
|
[
"MIT"
] | 1
|
2020-07-23T17:53:27.000Z
|
2020-07-23T17:53:27.000Z
|
from graphql.language.location import SourceLocation
from graphql.validation.rules import NoUnusedVariables
from .utils import expect_fails_rule, expect_passes_rule
def unused_variable(variable_name, op_name, line, column):
return {
'message': NoUnusedVariables.unused_variable_message(variable_name, op_name),
'locations': [SourceLocation(line, column)]
}
def test_uses_all_variables():
expect_passes_rule(NoUnusedVariables, '''
query ($a: String, $b: String, $c: String) {
field(a: $a, b: $b, c: $c)
}
''')
def test_uses_all_variables_deeply():
expect_passes_rule(NoUnusedVariables, '''
query Foo($a: String, $b: String, $c: String) {
field(a: $a) {
field(b: $b) {
field(c: $c)
}
}
}
''')
def test_uses_all_variables_deeply_in_inline_fragments():
expect_passes_rule(NoUnusedVariables, '''
query Foo($a: String, $b: String, $c: String) {
... on Type {
field(a: $a) {
field(b: $b) {
... on Type {
field(c: $c)
}
}
}
}
}
''')
def test_uses_all_variables_in_fragment():
expect_passes_rule(NoUnusedVariables, '''
query Foo($a: String, $b: String, $c: String) {
...FragA
}
fragment FragA on Type {
field(a: $a) {
...FragB
}
}
fragment FragB on Type {
field(b: $b) {
...FragC
}
}
fragment FragC on Type {
field(c: $c)
}
''')
def test_variable_used_by_fragment_in_multiple_operations():
expect_passes_rule(NoUnusedVariables, '''
query Foo($a: String) {
...FragA
}
query Bar($b: String) {
...FragB
}
fragment FragA on Type {
field(a: $a)
}
fragment FragB on Type {
field(b: $b)
}
''')
def test_variable_used_by_recursive_fragment():
expect_passes_rule(NoUnusedVariables, '''
query Foo($a: String) {
...FragA
}
fragment FragA on Type {
field(a: $a) {
...FragA
}
}
''')
def test_variable_not_used():
expect_fails_rule(NoUnusedVariables, '''
query ($a: String, $b: String, $c: String) {
field(a: $a, b: $b)
}
''', [
unused_variable('c', None, 2, 38)
])
def test_multiple_variables_not_used():
expect_fails_rule(NoUnusedVariables, '''
query Foo($a: String, $b: String, $c: String) {
field(b: $b)
}
''', [
unused_variable('a', 'Foo', 2, 17),
unused_variable('c', 'Foo', 2, 41)
])
def test_variable_not_used_in_fragments():
expect_fails_rule(NoUnusedVariables, '''
query Foo($a: String, $b: String, $c: String) {
...FragA
}
fragment FragA on Type {
field(a: $a) {
...FragB
}
}
fragment FragB on Type {
field(b: $b) {
...FragC
}
}
fragment FragC on Type {
field
}
''', [
unused_variable('c', 'Foo', 2, 41)
])
def test_multiple_variables_not_used_in_fragments():
expect_fails_rule(NoUnusedVariables, '''
query Foo($a: String, $b: String, $c: String) {
...FragA
}
fragment FragA on Type {
field {
...FragB
}
}
fragment FragB on Type {
field(b: $b) {
...FragC
}
}
fragment FragC on Type {
field
}
''', [
unused_variable('a', 'Foo', 2, 17),
unused_variable('c', 'Foo', 2, 41)
])
def test_variable_not_used_by_unreferenced_fragment():
expect_fails_rule(NoUnusedVariables, '''
query Foo($b: String) {
...FragA
}
fragment FragA on Type {
field(a: $a)
}
fragment FragB on Type {
field(b: $b)
}
''', [
unused_variable('b', 'Foo', 2, 17),
])
def test_variable_not_used_by_fragment_used_by_other_operation():
expect_fails_rule(NoUnusedVariables, '''
query Foo($b: String) {
...FragA
}
query Bar($a: String) {
...FragB
}
fragment FragA on Type {
field(a: $a)
}
fragment FragB on Type {
field(b: $b)
}
''', [
unused_variable('b', 'Foo', 2, 17),
unused_variable('a', 'Bar', 5, 17),
])
| 21.975369
| 85
| 0.516924
|
61247d8da8430ca6893ca1789461cc9c70467db6
| 3,898
|
py
|
Python
|
pytet_v0.3/matrix.py
|
tlsrbwls999/pytet
|
865cd9a4702b278cab0882d9e1ec15abb3a7396f
|
[
"Apache-2.0"
] | null | null | null |
pytet_v0.3/matrix.py
|
tlsrbwls999/pytet
|
865cd9a4702b278cab0882d9e1ec15abb3a7396f
|
[
"Apache-2.0"
] | null | null | null |
pytet_v0.3/matrix.py
|
tlsrbwls999/pytet
|
865cd9a4702b278cab0882d9e1ec15abb3a7396f
|
[
"Apache-2.0"
] | null | null | null |
class MatrixError(Exception):
pass
class Matrix:
nAlloc = 0
nFree = 0
def get_nAlloc(self):
return Matrix.nAlloc
def get_nFree(self):
return Matrix.nFree
def get_dy(self):
return self._dy
def get_dx(self):
return self._dx
def get_array(self):
return self._array
def __del__(self):
Matrix.nFree += 1
def __alloc(self, cy, cx):
if cy < 0 or cx < 0:
raise MatrixError("wrong matrix size")
self._dy = cy
self._dx = cx
self._array = [[0]*self._dx for i in range(self._dy)]
#print(self.__array)
Matrix.nAlloc += 1
def __init__(self, arg):
if isinstance(arg, list):
array = arg
cy = len(array)
cx = len(array[0])
self.__alloc(cy, cx)
for y in range(cy):
for x in range(cx):
self._array[y][x] = array[y][x]
return
elif isinstance(arg, Matrix):
other = arg
cy = other._dy
cx = other._dx
self.__alloc(cy, cx)
for y in range(cy):
for x in range(cx):
self._array[y][x] = other._array[y][x]
return
else:
self.__alloc(0, 0)
return
def __str__(self):
return 'Matrix(%d, %d)' % (self._dy, self._dx)
def print(self):
print('[', end=' ')
for y in range(self._dy-1):
print('[', end=' ')
for x in range(self._dx-1):
print(self._array[y][x], end=', ')
print(self._array[y][self._dx-1], end=' ')
print('],', end=' ')
print('[', end=' ')
for x in range(self._dx-1):
print(self._array[self._dy-1][x], end=', ')
print(self._array[self._dy-1][self._dx-1], end=' ')
print(']', end=' ')
print(']')
def clip(self, top, left, bottom, right):
cy = bottom - top
cx = right - left
temp = [[0]*cx for i in range(cy)]
for y in range(cy):
for x in range(cx):
if (top+y >= 0) and (left+x >= 0) \
and (top+y < self._dy) and (left+x < self._dx):
temp[y][x] = self._array[top+y][left+x]
else:
raise MatrixError("invalid matrix range")
return Matrix(temp)
def paste(self, other, top, left):
for y in range(other._dy):
for x in range(other._dx):
if (top+y >= 0) and (left+x >= 0) \
and (top+y < self._dy) and (left+x < self._dx):
self._array[top+y][left+x] = other._array[y][x]
else:
raise MatrixError("invalid matrix range")
def __add__(self, other):
if (self._dx != other._dx) or (self._dy != other._dy):
raise MatrixError("matrix sizes mismatch")
temp = [[0]*self._dx for i in range(self._dy)]
for y in range(self._dy):
for x in range(self._dx):
temp[y][x] = self._array[y][x] + other._array[y][x]
return Matrix(temp)
def sum(self):
total = 0
for y in range(self._dy):
for x in range(self._dx):
total += self._array[y][x]
return total
def mulc(self, coef):
for y in range(self._dy):
for x in range(self._dx):
self._array[y][x] *= coef
def anyGreaterThan(self, val):
for y in range(self._dy):
temp = [v for v in self._array[y] if v > val]
if len(temp) > 0:
return True
return False
| 29.530303
| 84
| 0.452027
|
faa234c60116541c5f983eb0a3cc562bf0f3f29d
| 1,455
|
py
|
Python
|
flatten_dict.py
|
UPstartDeveloper/Problem_Solving_Practice
|
bd61333b3b056e82a94297e02bc05a17552e3496
|
[
"MIT"
] | null | null | null |
flatten_dict.py
|
UPstartDeveloper/Problem_Solving_Practice
|
bd61333b3b056e82a94297e02bc05a17552e3496
|
[
"MIT"
] | null | null | null |
flatten_dict.py
|
UPstartDeveloper/Problem_Solving_Practice
|
bd61333b3b056e82a94297e02bc05a17552e3496
|
[
"MIT"
] | null | null | null |
def flatten_dictionary(dictionary):
# init keys array
new_dict = dict()
keys = list()
def make_key(keys, key):
"""Form the key"""
new_key = ".".join(keys)
if key != "":
new_key = ".".join((new_key, key))
return new_key
def flatten(flatten_dict, keys):
# iterate over kv pair
for key, value in flatten_dict.items():
# Base Case: if key normal
if isinstance(value, dict) is False:
# use what's in the keys array to form the new key
if len(keys) > 0:
key = make_key(keys, key)
# add to a new dict,
new_dict[key] = value
# Recursive Case if leads to a nested dict:
elif isinstance(value, dict) is True:
# add to the keys array
if key != "":
keys.append(key)
flatten(value, keys)
# pop from the keys array
if len(keys) > 0:
keys.pop()
# print(new_dict)
# call the helper func
flatten(dictionary, keys)
# return the dictionary
return new_dict
"""
diction = {
"Key1" : "1",
"Key2" : {
"a" : "2",
"b" : "3",
"c" : {
"d" : "3",
"e" : {
"" : "1"
}
}
}
}
"""
diction = {"": {"a": 1}, "b": 2}
print(flatten_dictionary(diction))
| 25.526316
| 66
| 0.452921
|
8bf40651b3c721850d8504a21a53a6e461375d32
| 338
|
py
|
Python
|
Desafios/des098.py
|
vitormrts/ExerciciosPython
|
176b1c21e147670f7495678bdd4fc97241440d28
|
[
"MIT"
] | 1
|
2021-02-07T18:58:57.000Z
|
2021-02-07T18:58:57.000Z
|
Desafios/des098.py
|
vitormrts/ExerciciosPython
|
176b1c21e147670f7495678bdd4fc97241440d28
|
[
"MIT"
] | null | null | null |
Desafios/des098.py
|
vitormrts/ExerciciosPython
|
176b1c21e147670f7495678bdd4fc97241440d28
|
[
"MIT"
] | null | null | null |
pessoa = dict()
dados = []
pessoa['Nome'] = str(input('Nome: '))
pessoa['Nota'] = float(input(f'Média de {pessoa["Nome"]}: '))
if pessoa['Nota'] >= 5:
pessoa['Situação'] = 'Aprovado'
else:
pessoa['Situação'] = 'Reprovado'
dados.append(pessoa.copy())
for p in dados:
for k, v in p.items():
print(f'{k} é igual a {v}')
| 24.142857
| 61
| 0.58284
|
258c4f86b6802d87073bc671867892fe134a32b8
| 840
|
py
|
Python
|
tests/opcodes/cases/test_and_36.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2020-08-11T02:31:24.000Z
|
2020-08-11T02:31:24.000Z
|
tests/opcodes/cases/test_and_36.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2020-12-30T16:44:56.000Z
|
2020-12-30T16:44:56.000Z
|
tests/opcodes/cases/test_and_36.py
|
tqtezos/pytezos
|
a4ac0b022d35d4c9f3062609d8ce09d584b5faa8
|
[
"MIT"
] | 1
|
2022-03-20T19:01:00.000Z
|
2022-03-20T19:01:00.000Z
|
from unittest import TestCase
from tests import abspath
from pytezos.repl.interpreter import Interpreter
from pytezos.michelson.converter import michelson_to_micheline
from pytezos.repl.parser import parse_expression
class OpcodeTestand_36(TestCase):
def setUp(self):
self.maxDiff = None
self.i = Interpreter(debug=True)
def test_opcode_and_36(self):
res = self.i.execute(f'INCLUDE "{abspath("opcodes/contracts/and.tz")}"')
self.assertTrue(res['success'])
res = self.i.execute('RUN (Pair False False) None')
self.assertTrue(res['success'])
exp_val_expr = michelson_to_micheline('(Some False)')
exp_val = parse_expression(exp_val_expr, res['result']['storage'].type_expr)
self.assertEqual(exp_val, res['result']['storage']._val)
| 32.307692
| 84
| 0.686905
|
f451753d808d1e0491a8bba28157444b4b08d345
| 2,245
|
py
|
Python
|
plot_code/tau_delta_success.py
|
pondbooks/STL_SAC_NCSs_Ver2
|
bee0db08a33182419e2b3a0b49e5798bccf17bcd
|
[
"MIT"
] | 1
|
2022-03-08T03:33:16.000Z
|
2022-03-08T03:33:16.000Z
|
plot_code/tau_delta_success.py
|
pondbooks/STL_SAC_NCSs_Ver2
|
bee0db08a33182419e2b3a0b49e5798bccf17bcd
|
[
"MIT"
] | null | null | null |
plot_code/tau_delta_success.py
|
pondbooks/STL_SAC_NCSs_Ver2
|
bee0db08a33182419e2b3a0b49e5798bccf17bcd
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = "DejaVu Serif" # Font
plt.rcParams["font.size"] = 10 # Size of word
for i in range(14):
data_nopretrain = np.loadtxt('tau_preprocess/success'+str(i)+'.csv',skiprows=1,delimiter=",")
data_nopretrain_temp = data_nopretrain[:,1]
data_nopretrain_temp = np.array([data_nopretrain_temp])
data_proposed_method = np.loadtxt('tau_delta_preprocess/success'+str(i)+'.csv',skiprows=1,delimiter=",")
data_proposed_method_temp = data_proposed_method[:,1]
data_proposed_method_temp = np.array([data_proposed_method_temp])
# Martix is [num of learning curve, steps]
if i > 0:
data_nopretrain_matrix = np.concatenate([data_nopretrain_matrix, data_nopretrain_temp])
data_proposed_method_matrix = np.concatenate([data_proposed_method_matrix, data_proposed_method_temp])
else: # i=0
data_nopretrain_matrix = data_nopretrain_temp
data_proposed_method_matrix = data_proposed_method_temp
data_len = len(data_proposed_method_temp[0])
print(data_len)
steps = []
for i in range(data_len):
steps.append((i+1)*10000)
train_nopretrain_scores_mean = np.mean(data_nopretrain_matrix, axis=0)
train_nopretrain_scores_std = np.std(data_nopretrain_matrix, axis=0)
train_proposed_method_scores_mean = np.mean(data_proposed_method_matrix, axis=0)
train_proposed_method_scores_std = np.std(data_proposed_method_matrix, axis=0)
plt.figure()
plt.xlabel("Learning Steps")
plt.ylabel("Success Rate")
# Plot Traing score and Test score
plt.plot(steps, train_nopretrain_scores_mean,color="r", label="$\u03c4$-MDP")
plt.plot(steps, train_proposed_method_scores_mean,color="b", label="$\u03c4 d$-MDP")
# Plot standard distribution
plt.fill_between(steps, train_nopretrain_scores_mean - train_nopretrain_scores_std, train_nopretrain_scores_mean + train_nopretrain_scores_std, color="r", alpha=0.15)
plt.fill_between(steps, train_proposed_method_scores_mean - train_proposed_method_scores_std, train_proposed_method_scores_mean + train_proposed_method_scores_std, color="b", alpha=0.15)
plt.xlim(0, 600000)
plt.ylim(0., 1.01)
plt.legend(loc="best")
plt.grid()
#plt.show()
plt.savefig("success_rates_tau_d.png")
| 43.173077
| 186
| 0.773274
|
9617bcf3d2e5ffbbfda2b79409a5c6201807a592
| 13,568
|
py
|
Python
|
python/cuml/utils/input_utils.py
|
efajardo-nv/cuml
|
bc86714836284ed4752c267513e5d447e884e1c5
|
[
"Apache-2.0"
] | null | null | null |
python/cuml/utils/input_utils.py
|
efajardo-nv/cuml
|
bc86714836284ed4752c267513e5d447e884e1c5
|
[
"Apache-2.0"
] | null | null | null |
python/cuml/utils/input_utils.py
|
efajardo-nv/cuml
|
bc86714836284ed4752c267513e5d447e884e1c5
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cuml.utils.numba_utils
import cudf
import cupy as cp
import numpy as np
import rmm
import warnings
from collections import namedtuple
from collections.abc import Collection
from cuml.utils import rmm_cupy_ary
from numba import cuda
inp_array = namedtuple('inp_array', 'array pointer n_rows n_cols dtype')
def get_dev_array_ptr(ary):
"""
Returns ctype pointer of a numba style device array
"""
return ary.device_ctypes_pointer.value
def get_cudf_column_ptr(col):
"""
Returns pointer of a cudf Series
"""
return col.__cuda_array_interface__['data'][0]
def get_dtype(X):
"""
Returns dtype of obj as a Numpy style dtype (like np.float32)
"""
if isinstance(X, cudf.DataFrame):
dtype = np.dtype(X[X.columns[0]]._column.dtype)
elif (isinstance(X, cudf.Series)):
dtype = np.dtype(X._column.dtype)
elif isinstance(X, np.ndarray):
dtype = X.dtype
elif cuda.is_cuda_array(X):
dtype = X.dtype
elif cuda.devicearray.is_cuda_ndarray(X):
dtype = X.dtype
else:
raise TypeError("Input object not understood for dtype detection.")
return dtype
def input_to_dev_array(X, order='F', deepcopy=False,
check_dtype=False, convert_to_dtype=False,
check_cols=False, check_rows=False,
fail_on_order=False):
"""
Convert input X to device array suitable for C++ methods.
Acceptable input formats:
* cuDF Dataframe - returns a deep copy always.
* cuDF Series - returns by reference or a deep copy depending on
`deepcopy`.
* Numpy array - returns a copy in device always
* cuda array interface compliant array (like Cupy) - returns a
reference unless `deepcopy`=True.
* numba device array - returns a reference unless deepcopy=True
Parameters
----------
X : cuDF.DataFrame, cuDF.Series, numba array, NumPy array or any
cuda_array_interface compliant array like CuPy or pytorch.
order: string (default: 'F')
Whether to return a F-major or C-major array. Used to check the order
of the input. If fail_on_order=True method will raise ValueError,
otherwise it will convert X to be of order `order`.
deepcopy: boolean (default: False)
Set to True to always return a deep copy of X.
check_dtype: np.dtype (default: False)
Set to a np.dtype to throw an error if X is not of dtype `check_dtype`.
convert_to_dtype: np.dtype (default: False)
Set to a dtype if you want X to be converted to that dtype if it is
not that dtype already.
check_cols: int (default: False)
Set to an int `i` to check that input X has `i` columns. Set to False
(default) to not check at all.
check_rows: boolean (default: False)
Set to an int `i` to check that input X has `i` columns. Set to False
(default) to not check at all.
fail_on_order: boolean (default: False)
Set to True if you want the method to raise a ValueError if X is not
of order `order`.
Returns
-------
`inp_array`: namedtuple('inp_array', 'array pointer n_rows n_cols dtype')
A new device array if the input was not a numba device
array. It is a reference to the input X if it was a numba device array
or cuda array interface compliant (like cupy)
"""
if convert_to_dtype:
X = convert_dtype(X, to_dtype=convert_to_dtype)
check_dtype = False
if isinstance(X, cudf.DataFrame):
dtype = np.dtype(X[X.columns[0]]._column.dtype)
if order == 'F':
X_m = X.as_gpu_matrix(order='F')
elif order == 'C':
X_m = cuml.utils.numba_utils.row_matrix(X)
elif (isinstance(X, cudf.Series)):
if deepcopy:
X_m = X.to_gpu_array()
else:
if X.null_count == 0:
# using __cuda_array_interface__ support of cudf.Series for
# this temporarily while switching from rmm device_array to
# rmm deviceBuffer https://github.com/rapidsai/cuml/issues/1379
X_m = cuda.as_cuda_array(X._column)
else:
raise ValueError("Error: cuDF Series has missing/null values")
elif isinstance(X, np.ndarray):
dtype = X.dtype
X_m = rmm.to_device(np.array(X, order=order, copy=False))
elif cuda.is_cuda_array(X):
# Use cuda array interface to create a device array by reference
X_m = cuda.as_cuda_array(X)
if deepcopy:
out_dev_array = rmm.device_array_like(X_m)
out_dev_array.copy_to_device(X_m)
X_m = out_dev_array
elif cuda.devicearray.is_cuda_ndarray(X):
if deepcopy:
out_dev_array = rmm.device_array_like(X)
out_dev_array.copy_to_device(X)
X_m = out_dev_array
else:
X_m = X
else:
msg = "X matrix format " + str(X.__class__) + " not supported"
raise TypeError(msg)
dtype = X_m.dtype
if check_dtype:
if isinstance(check_dtype, type) or isinstance(check_dtype, np.dtype):
if dtype != check_dtype:
del X_m
raise TypeError("Expected " + str(check_dtype) + "input but" +
" got " + str(dtype) + " instead.")
elif isinstance(check_dtype, Collection) and \
not isinstance(check_dtype, str):
# The 'not isinstance(check_dtype, string)' condition is needed,
# because the 'float32' string is a Collection, but in this
# branch we only want to process collections like
# [np.float32, np.float64].
if dtype not in check_dtype:
del X_m
raise TypeError("Expected input to be of type in " +
str(check_dtype) + " but got " + str(dtype))
else:
raise ValueError("Expected a type as check_dtype arg, but got " +
str(check_dtype))
n_rows = X_m.shape[0]
if len(X_m.shape) > 1:
n_cols = X_m.shape[1]
else:
n_cols = 1
if check_cols:
if n_cols != check_cols:
raise ValueError("Expected " + str(check_cols) +
" columns but got " + str(n_cols) +
" columns.")
if check_rows:
if n_rows != check_rows:
raise ValueError("Expected " + str(check_rows) +
" rows but got " + str(n_rows) +
" rows.")
if not check_numba_order(X_m, order):
if fail_on_order:
raise ValueError("Expected " + order_to_str(order) +
" major order, but got the opposite.")
else:
warnings.warn("Expected " + order_to_str(order) + " major order, "
"but got the opposite. Converting data, this will "
"result in additional memory utilization.")
X_m = rmm_cupy_ary(cp.array, X_m, copy=False, order=order)
X_m = cuda.as_cuda_array(X_m)
X_ptr = get_dev_array_ptr(X_m)
return inp_array(array=X_m, pointer=X_ptr, n_rows=n_rows, n_cols=n_cols,
dtype=dtype)
def convert_dtype(X, to_dtype=np.float32):
"""
Convert X to be of dtype `dtype`
Supported float dtypes for overflow checking.
Todo: support other dtypes if needed.
"""
if isinstance(X, np.ndarray):
dtype = X.dtype
if dtype != to_dtype:
X_m = X.astype(to_dtype)
if len(X[X == np.inf]) > 0:
raise TypeError("Data type conversion resulted"
"in data loss.")
return X_m
elif isinstance(X, cudf.Series) or isinstance(X, cudf.DataFrame):
return X.astype(to_dtype)
elif cuda.is_cuda_array(X):
X_m = cp.asarray(X)
X_m = X_m.astype(to_dtype)
return cuda.as_cuda_array(X_m)
else:
raise TypeError("Received unsupported input type " % type(X))
return X
def check_numba_order(dev_ary, order):
if order == 'F':
return dev_ary.is_f_contiguous()
elif order == 'C':
return dev_ary.is_c_contiguous()
def order_to_str(order):
if order == 'F':
return 'column (\'F\')'
elif order == 'C':
return 'row (\'C\')'
def input_to_host_array(X, order='F', deepcopy=False,
check_dtype=False, convert_to_dtype=False,
check_cols=False, check_rows=False,
fail_on_order=False):
"""
Convert input X to host array (NumPy) suitable for C++ methods that accept
host arrays.
Acceptable input formats:
* Numpy array - returns a pointer to the original input
* cuDF Dataframe - returns a deep copy always
* cuDF Series - returns by reference or a deep copy depending on `deepcopy`
* cuda array interface compliant array (like Cupy) - returns a \
reference unless deepcopy=True
* numba device array - returns a reference unless deepcopy=True
Parameters
----------
X:
cuDF.DataFrame, cuDF.Series, numba array, NumPy array or any
cuda_array_interface compliant array like CuPy or pytorch.
order: string (default: 'F')
Whether to return a F-major or C-major array. Used to check the order
of the input. If fail_on_order=True method will raise ValueError,
otherwise it will convert X to be of order `order`.
deepcopy: boolean (default: False)
Set to True to always return a deep copy of X.
check_dtype: np.dtype (default: False)
Set to a np.dtype to throw an error if X is not of dtype `check_dtype`.
convert_to_dtype: np.dtype (default: False)
Set to a dtype if you want X to be converted to that dtype if it is
not that dtype already.
check_cols: int (default: False)
Set to an int `i` to check that input X has `i` columns. Set to False
(default) to not check at all.
check_rows: boolean (default: False)
Set to an int `i` to check that input X has `i` columns. Set to False
(default) to not check at all.
fail_on_order: boolean (default: False)
Set to True if you want the method to raise a ValueError if X is not
of order `order`.
Returns
-------
`inp_array`: namedtuple('inp_array', 'array pointer n_rows n_cols dtype')
`inp_array` is a new device array if the input was not a NumPy device
array. It is a reference to the input X if it was a NumPy host array
"""
if convert_to_dtype:
X = convert_dtype(X, to_dtype=convert_to_dtype)
check_dtype = False
if isinstance(X, cudf.DataFrame):
dtype = np.dtype(X[X.columns[0]]._column.dtype)
if order == 'F':
X_m = X.as_gpu_matrix(order='F')
elif order == 'C':
X_m = cuml.utils.numba_utils.row_matrix(X)
X_m = X_m.copy_to_host()
elif (isinstance(X, cudf.Series)):
if X.null_count == 0:
X_m = X.to_array()
else:
raise ValueError('cuDF Series has missing (null) values.')
elif isinstance(X, np.ndarray):
X_m = np.array(X, order=order, copy=deepcopy)
elif cuda.is_cuda_array(X):
# Use cuda array interface to create a device array by reference
X_m = cuda.as_cuda_array(X)
X_m = np.array(X_m.copy_to_host(), order=order)
else:
msg = "X matrix format " + str(X.__class__) + " not supported"
raise TypeError(msg)
dtype = X_m.dtype
if check_dtype:
if isinstance(check_dtype, type):
if dtype != check_dtype:
del X_m
raise TypeError("Expected " + str(check_dtype) + "input but" +
" got " + str(dtype) + " instead.")
elif isinstance(check_dtype, Collection):
if dtype not in check_dtype:
del X_m
raise TypeError("Expected input to be of type in " +
str(check_dtype) + " but got " + str(dtype))
n_rows = X_m.shape[0]
if len(X_m.shape) > 1:
n_cols = X_m.shape[1]
else:
n_cols = 1
if check_cols:
if n_cols != check_cols:
raise ValueError("Expected " + str(check_cols) +
" columns but got " + str(n_cols) +
" columns.")
if check_rows:
if n_rows != check_rows:
raise ValueError("Expected " + str(check_rows) +
" rows but got " + str(n_rows) +
" rows.")
X_ptr = X_m.ctypes.data
return inp_array(array=X_m, pointer=X_ptr, n_rows=n_rows, n_cols=n_cols,
dtype=dtype)
| 33.173594
| 79
| 0.601415
|
d9b53737dcd4f1df9fd790329113b1488cdef923
| 199
|
py
|
Python
|
Python/Chapter03/progex03_5.py
|
msiplab/EicProgLab
|
4ca523d09c3b414fceda10f3cff055e94fbcc2a9
|
[
"MIT"
] | 1
|
2021-04-16T04:26:05.000Z
|
2021-04-16T04:26:05.000Z
|
Python/Chapter03/progex03_5.py
|
msiplab/EicProgLab
|
4ca523d09c3b414fceda10f3cff055e94fbcc2a9
|
[
"MIT"
] | null | null | null |
Python/Chapter03/progex03_5.py
|
msiplab/EicProgLab
|
4ca523d09c3b414fceda10f3cff055e94fbcc2a9
|
[
"MIT"
] | 2
|
2020-06-30T02:11:18.000Z
|
2021-08-09T04:44:16.000Z
|
from person import Person
def main():
taro = Person('太郎','アニメ')
hanako = Person('花子','アイドル')
print('太郎の気分:' + taro.kibun)
print('花子の気分:' + hanako.kibun)
if __name__ == '__main__':
main()
| 14.214286
| 31
| 0.623116
|
fc568bbad86d5e77fcb85a897ac12efae61034a7
| 6,612
|
py
|
Python
|
sdk/python/pulumi_azure_native/sql/v20200801preview/get_managed_instance_vulnerability_assessment.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/sql/v20200801preview/get_managed_instance_vulnerability_assessment.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/sql/v20200801preview/get_managed_instance_vulnerability_assessment.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetManagedInstanceVulnerabilityAssessmentResult',
'AwaitableGetManagedInstanceVulnerabilityAssessmentResult',
'get_managed_instance_vulnerability_assessment',
]
@pulumi.output_type
class GetManagedInstanceVulnerabilityAssessmentResult:
"""
A managed instance vulnerability assessment.
"""
def __init__(__self__, id=None, name=None, recurring_scans=None, storage_account_access_key=None, storage_container_path=None, storage_container_sas_key=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if recurring_scans and not isinstance(recurring_scans, dict):
raise TypeError("Expected argument 'recurring_scans' to be a dict")
pulumi.set(__self__, "recurring_scans", recurring_scans)
if storage_account_access_key and not isinstance(storage_account_access_key, str):
raise TypeError("Expected argument 'storage_account_access_key' to be a str")
pulumi.set(__self__, "storage_account_access_key", storage_account_access_key)
if storage_container_path and not isinstance(storage_container_path, str):
raise TypeError("Expected argument 'storage_container_path' to be a str")
pulumi.set(__self__, "storage_container_path", storage_container_path)
if storage_container_sas_key and not isinstance(storage_container_sas_key, str):
raise TypeError("Expected argument 'storage_container_sas_key' to be a str")
pulumi.set(__self__, "storage_container_sas_key", storage_container_sas_key)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="recurringScans")
def recurring_scans(self) -> Optional['outputs.VulnerabilityAssessmentRecurringScansPropertiesResponse']:
"""
The recurring scans settings
"""
return pulumi.get(self, "recurring_scans")
@property
@pulumi.getter(name="storageAccountAccessKey")
def storage_account_access_key(self) -> Optional[str]:
"""
Specifies the identifier key of the storage account for vulnerability assessment scan results. If 'StorageContainerSasKey' isn't specified, storageAccountAccessKey is required.
"""
return pulumi.get(self, "storage_account_access_key")
@property
@pulumi.getter(name="storageContainerPath")
def storage_container_path(self) -> str:
"""
A blob storage container path to hold the scan results (e.g. https://myStorage.blob.core.windows.net/VaScans/).
"""
return pulumi.get(self, "storage_container_path")
@property
@pulumi.getter(name="storageContainerSasKey")
def storage_container_sas_key(self) -> Optional[str]:
"""
A shared access signature (SAS Key) that has write access to the blob container specified in 'storageContainerPath' parameter. If 'storageAccountAccessKey' isn't specified, StorageContainerSasKey is required.
"""
return pulumi.get(self, "storage_container_sas_key")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetManagedInstanceVulnerabilityAssessmentResult(GetManagedInstanceVulnerabilityAssessmentResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetManagedInstanceVulnerabilityAssessmentResult(
id=self.id,
name=self.name,
recurring_scans=self.recurring_scans,
storage_account_access_key=self.storage_account_access_key,
storage_container_path=self.storage_container_path,
storage_container_sas_key=self.storage_container_sas_key,
type=self.type)
def get_managed_instance_vulnerability_assessment(managed_instance_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
vulnerability_assessment_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetManagedInstanceVulnerabilityAssessmentResult:
"""
A managed instance vulnerability assessment.
:param str managed_instance_name: The name of the managed instance for which the vulnerability assessment is defined.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str vulnerability_assessment_name: The name of the vulnerability assessment.
"""
__args__ = dict()
__args__['managedInstanceName'] = managed_instance_name
__args__['resourceGroupName'] = resource_group_name
__args__['vulnerabilityAssessmentName'] = vulnerability_assessment_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:sql/v20200801preview:getManagedInstanceVulnerabilityAssessment', __args__, opts=opts, typ=GetManagedInstanceVulnerabilityAssessmentResult).value
return AwaitableGetManagedInstanceVulnerabilityAssessmentResult(
id=__ret__.id,
name=__ret__.name,
recurring_scans=__ret__.recurring_scans,
storage_account_access_key=__ret__.storage_account_access_key,
storage_container_path=__ret__.storage_container_path,
storage_container_sas_key=__ret__.storage_container_sas_key,
type=__ret__.type)
| 44.675676
| 216
| 0.702662
|
9623aa1e71d06b5def859bf55974fd720b08b3d0
| 1,572
|
py
|
Python
|
functions/subsFunc.py
|
VMAJSTER/openstreamingplatform
|
f002246db922dab9a3f019f46001f3901326feaf
|
[
"MIT"
] | null | null | null |
functions/subsFunc.py
|
VMAJSTER/openstreamingplatform
|
f002246db922dab9a3f019f46001f3901326feaf
|
[
"MIT"
] | null | null | null |
functions/subsFunc.py
|
VMAJSTER/openstreamingplatform
|
f002246db922dab9a3f019f46001f3901326feaf
|
[
"MIT"
] | null | null | null |
from app import app
from flask_mail import Message
from classes.shared import email
from classes import settings
from classes import subscriptions
from classes import Sec
from functions import system
@system.asynch
def runSubscription(subject, destination, message):
with app.app_context():
sysSettings = settings.settings.query.first()
finalMessage = message + "<p>If you would like to unsubscribe, click the link below: <br><a href='" + sysSettings.siteProtocol + sysSettings.siteAddress + "/unsubscribe?email=" + destination + "'>Unsubscribe</a></p></body></html>"
msg = Message(subject=subject, recipients=[destination])
msg.sender = sysSettings.siteName + "<" + sysSettings.smtpSendAs + ">"
msg.body = finalMessage
msg.html = finalMessage
email.send(msg)
return True
def processSubscriptions(channelID, subject, message):
subscriptionQuery = subscriptions.channelSubs.query.filter_by(channelID=channelID).all()
if subscriptionQuery:
system.newLog(2, "Sending Subscription Emails for Channel ID: " + str(channelID))
subCount = 0
for sub in subscriptionQuery:
userQuery = Sec.User.query.filter_by(id=int(sub.userID)).first()
if userQuery is not None:
result = runSubscription(subject, userQuery.email, message)
subCount = subCount + 1
system.newLog(2, "Processed " + str(subCount) + " out of " + str(len(subscriptionQuery)) + " Email Subscriptions for Channel ID: " + str(channelID) )
return True
| 44.914286
| 238
| 0.689567
|
a6350f3cc364bb749fe1c4116550af41a0219f77
| 31,348
|
py
|
Python
|
tests/query_test.py
|
Jaymon/prom
|
b9eab53f9cc9870a3212e96129671c36a749aa94
|
[
"MIT"
] | 8
|
2018-04-10T17:42:34.000Z
|
2022-01-14T09:20:23.000Z
|
tests/query_test.py
|
Jaymon/prom
|
b9eab53f9cc9870a3212e96129671c36a749aa94
|
[
"MIT"
] | 85
|
2018-03-29T00:48:28.000Z
|
2021-10-16T07:31:02.000Z
|
tests/query_test.py
|
firstopinion/prom
|
b9eab53f9cc9870a3212e96129671c36a749aa94
|
[
"MIT"
] | 3
|
2019-02-19T23:50:37.000Z
|
2021-05-12T02:07:57.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import datetime
import time
from threading import Thread
import sys
import testdata
#from testdata.threading import Thread
from . import BaseTestCase, EnvironTestCase, TestCase, SkipTest
from prom.query import (
Query,
Bounds,
Field,
Fields,
Iterator,
#AllIterator,
)
from prom.compat import *
import prom
class FieldTest(BaseTestCase):
def test___new__(self):
q = self.get_query()
f = Field(q, "MAX(foo)")
#f = Field("MAX(foo)", schema=testdata.mock(field_name="foo"))
self.assertEqual("foo", f.name)
self.assertEqual("MAX", f.function_name)
class FieldsTest(BaseTestCase):
def test_fields(self):
q = self.get_query()
fs = Fields()
fs.append(Field(q, "foo", 1))
fs.append(Field(q, "foo", 2))
fs.append(Field(q, "bar", 3))
#fs.append(Field(q, "che", 4))
fields = fs.fields
self.assertEqual(2, fields["foo"])
self.assertEqual(3, fields["bar"])
#self.assertEqual(4, fields["che"])
def test___bool__(self):
fs = Fields()
self.assertFalse(fs)
q = self.get_query()
fs.append(Field(q, "foo", 1))
self.assertTrue(fs)
def test_names(self):
q = self.get_query()
fs = Fields()
fs.append(Field(q, "foo", None))
fs.append(Field(q, "bar", None))
fs.append(Field(q, "foo", None))
self.assertEqual(["foo", "bar"], list(fs.names()))
class BoundsTest(TestCase):
def test___nonzero__(self):
b = Bounds()
self.assertFalse(b)
def test_offset_from_page(self):
lc = Bounds()
lc.page = 2
self.assertEqual(1, lc.offset)
lc = Bounds()
lc.limit = 5
lc.page = 2
self.assertEqual(5, lc.offset)
self.assertEqual(5, lc.limit)
def test_non_paginate_limit(self):
lc = Bounds()
self.assertEqual((0, 0), lc.get())
lc.limit = 10
self.assertEqual((10, 0), lc.get())
lc.page = 1
self.assertEqual((10, 0), lc.get())
lc.offset = 15
self.assertEqual((10, 15), lc.get())
lc.page = 2
self.assertEqual((10, 10), lc.get())
lc.page = 3
self.assertEqual((10, 20), lc.get())
lc.page = 0
self.assertEqual((10, 0), lc.get())
with self.assertRaises(ValueError):
lc.page = -10
lc.offset = 0
self.assertEqual((10, 0), lc.get())
with self.assertRaises(ValueError):
lc.offset = -10
lc.limit = 0
self.assertEqual((0, 0), lc.get())
with self.assertRaises(ValueError):
lc.limit = -10
def test_paginate_limit(self):
lc = Bounds()
lc.limit = 10
lc.paginate = True
self.assertEqual(11, lc.limit_paginate)
self.assertEqual((11, 0), lc.get())
lc.page = 3
self.assertEqual((11, 20), lc.get())
lc.offset = 15
self.assertEqual((11, 15), lc.get())
lc.paginate = False
self.assertEqual((10, 15), lc.get())
class QueryTest(EnvironTestCase):
def test_query_syntactic_sugar(self):
Foo = self.get_orm_class()
self.insert(Foo, 5)
pk = Foo.query.select_pk().value_pk(3)
self.assertEqual(3, pk)
pkl = list(Foo.query.select_pk().values_pk([2]))
self.assertEqual(2, pkl[0])
o = Foo.query.one_pk(1)
self.assertEqual(1, o.pk)
ol = list(Foo.query.get_pk([1]))
self.assertEqual(1, ol[0].pk)
def test_select_all(self):
Foo = self.get_orm_class()
q = Foo.query.select("*")
self.assertRegex(q.render(), r"(?m)SELECT\s+\*\s+FROM")
def test_schemas(self):
Foo = self.get_orm_class()
Bar = self.get_orm_class()
bar_q = Bar.query.select_foo()
foo_q = Foo.query.select_pk().in_foo(bar_q)
schemas = foo_q.schemas
self.assertEqual(2, len(schemas))
self.assertEqual(Foo.schema, schemas[0])
self.assertEqual(String(Bar.schema), String(schemas[1]))
def test_render(self):
q = self.get_query()
q.is_foo(1)
q.is_bar("two")
r = q.render()
self.assertRegex(r, r"foo[^=]+=\s*1")
self.assertRegex(r, r"bar[^=]+=\s*'two'")
def test_find_methods_1(self):
q = self.get_query()
opm, qm, fn = q.find_methods("eq_foo_bar")
opm2, qm2, fn2 = q.find_methods("foo_bar_eq")
self.assertEqual("eq_field", opm.__name__)
self.assertEqual(opm2.__name__, opm.__name__)
self.assertEqual("foo_bar", fn)
self.assertEqual(fn2, fn)
with self.assertRaises(AttributeError):
q.find_methods("baklsdkf_foo_bar")
with self.assertRaises(AttributeError):
q.find_methods("baklsdkf_field")
with self.assertRaises(AttributeError):
q.find_methods("_field")
with self.assertRaises(AttributeError):
q.find_methods("baklsdkf")
def test_find_methods_2(self):
q = self.get_query()
method_name = "is_{}".format(testdata.random.choice(list(q.schema.fields.keys())))
r = q.find_methods(method_name)
self.assertEqual("is_field", r[0].__name__)
self.assertTrue(r[2] in set(q.schema.fields.keys()))
with self.assertRaises(AttributeError):
q.find_methods("testing")
q = self.get_query()
q.orm_class = None
tests = [
("gt_foo_bar", ("gt_field", "foo_bar")),
]
for t in tests:
r = q.find_methods(t[0])
self.assertEqual(t[1][0], r[0].__name__)
self.assertEqual(t[1][1], r[2])
def test_find_methods_3(self):
q = Query()
om, qm, fn = q.find_methods("one_pk")
self.assertEqual(q.eq_field, om)
self.assertEqual(q.one, qm)
self.assertEqual("pk", fn)
with self.assertRaises(AttributeError):
q.find_methods("foo_pk")
def test_like(self):
_q = self.get_query()
self.insert(_q, 5)
for bar in ["bar che", "foo bar", "foo bar che"]:
self.insert_fields(_q, bar=bar)
count = _q.copy().like_bar("bar%").count()
self.assertEqual(1, count)
count = _q.copy().like_bar("%bar").count()
self.assertEqual(1, count)
count = _q.copy().like_bar("%bar%").count()
self.assertEqual(3, count)
count = _q.copy().nlike_bar("bar%").count()
self.assertEqual(7, count)
count = _q.copy().nlike_bar("%bar").count()
self.assertEqual(7, count)
count = _q.copy().nlike_bar("%bar%").count()
self.assertEqual(5, count)
count = _q.copy().like_bar("bar____").count()
self.assertEqual(1, count)
count = _q.copy().like_bar("____bar").count()
self.assertEqual(1, count)
count = _q.copy().like_bar("____bar____").count()
self.assertEqual(1, count)
def test_between(self):
_q = self.get_query()
self.insert(_q, 5)
q = _q.copy()
vals = list(q.select_pk().between_pk(2, 4))
self.assertEqual(3, len(vals))
for v in vals:
self.assertTrue(v >= 2 and v <= 4)
def test_ref_threading(self):
basedir = testdata.create_modules({
"rtfoo.rtbar.tqr1": [
"import prom",
"",
"class Foo(prom.Orm):",
" table_name = 'thrd_qr2_foo'",
" one=prom.Field(int, True)",
"",
],
"rtfoo.rtbar.tqr2": [
"import prom",
"from tqr1 import Foo",
"",
"class Bar(prom.Orm):",
" table_name = 'thrd_qr2_bar'",
" one=prom.Field(int, True)",
" foo_id=prom.Field(Foo, True)",
""
]
})
tqr1 = basedir.module("rtfoo.rtbar.tqr1")
sys.modules.pop("rtfoo.rtbar.tqr2.Bar", None)
#tqr2 = basedir.module("tqr2")
def target():
q = tqr1.Foo.query.ref("rtfoo.rtbar.tqr2.Bar")
f = tqr1.Foo()
q = f.query.ref("rtfoo.rtbar.tqr2.Bar")
t1 = Thread(target=target)
# if we don't get stuck in a deadlock this test passes
t1.start()
t1.join()
def test_query_ref_1(self):
testdata.create_modules({
"qr2": "\n".join([
"import prom",
"",
"class Foo(prom.Orm):",
" table_name = 'qr2_foo'",
" foo=prom.Field(int, True)",
" bar=prom.Field(str, True)",
""
"class Bar(prom.Orm):",
" table_name = 'qr2_bar'",
" foo=prom.Field(int, True)",
" bar=prom.Field(str, True)",
" che=prom.Field(Foo, True)",
""
])
})
from qr2 import Foo as t1, Bar as t2
ti1 = t1.create(foo=11, bar='11')
ti12 = t1.create(foo=12, bar='12')
ti2 = t2.create(foo=21, bar='21', che=ti1.pk)
ti22 = t2.create(foo=22, bar='22', che=ti12.pk)
orm_classpath = "{}.{}".format(t2.__module__, t2.__name__)
l = list(ti1.query.ref(orm_classpath).select_foo().is_pk(ti12.pk).get())
self.assertEqual(22, l[0])
self.assertEqual(1, len(l))
l = list(ti1.query.ref(orm_classpath).select_foo().is_pk(ti1.pk).get())
self.assertEqual(21, l[0])
self.assertEqual(1, len(l))
l = list(ti1.query.ref(orm_classpath).select_foo().is_pk(ti1.pk).get())
self.assertEqual(21, l[0])
self.assertEqual(1, len(l))
l = list(ti1.query.ref(orm_classpath).select_foo().is_pk(ti1.pk).get())
self.assertEqual(21, l[0])
self.assertEqual(1, len(l))
l = list(ti1.query.ref(orm_classpath).select_foo().get())
self.assertEqual(2, len(l))
def test_query_ref_2(self):
testdata.create_modules({
"qre": "\n".join([
"import prom",
"",
"class T1(prom.Orm):",
" table_name = 'qre_t1'",
""
"class T2(prom.Orm):",
" table_name = 'qre_t2'",
" t1_id=prom.Field(T1, True)",
""
"class T3(prom.Orm):",
" table_name = 'qre_t3'",
""
])
})
from qre import T1, T2, T3
t1a = T1.create()
t1b = T1.create()
t2 = T2.create(t1_id=t1a.pk)
classpath = "{}.{}".format(T2.__module__, T2.__name__)
r = T1.query.ref(classpath).is_pk(t1a.pk).count()
self.assertEqual(1, r)
r = T1.query.ref(classpath).is_pk(t1b.pk).count()
self.assertEqual(0, r)
def test_null_iterator(self):
"""you can now pass empty lists to in and nin and not have them throw an
error, instead they return an empty iterator"""
_q = self.get_query()
self.insert(_q, 1)
q = _q.copy()
r = q.in_foo([]).get()
self.assertFalse(r)
count = 0
for x in r:
count += 0
self.assertEqual(0, count)
self.assertEqual(0, len(r))
def test_field_datetime(self):
_q = self.get_query()
q = _q.copy()
q.is__created(day=int(datetime.datetime.utcnow().strftime('%d')))
r = q.get()
self.assertFalse(r)
pk = self.insert(q, 1)[0]
# get the object out so we can use it to query
o = _q.copy().one_pk(pk)
dt = o._created
day = int(dt.strftime('%d'))
q = _q.copy()
q.in__created(day=day)
r = q.get()
self.assertEqual(1, len(r))
q = _q.copy()
q.is__created(day=day)
r = q.get()
self.assertEqual(1, len(r))
q = _q.copy()
q.in__created(day=[day, day + 1])
r = q.get()
self.assertEqual(1, len(r))
def test_pk_fields(self):
tclass = self.get_orm_class()
q = tclass.query
q.gte_pk(5).lte_pk(1).lt_pk(1).gte_pk(5)
q.desc_pk()
q.asc_pk()
q.set_pk(None)
for where_field in q.fields_where:
self.assertEqual(where_field.name, "_id")
for sort_field in q.fields_sort:
self.assertEqual(sort_field.name, "_id")
for set_field in q.fields_set:
self.assertEqual(set_field.name, "_id")
def test_get_pk(self):
tclass = self.get_orm_class()
t = tclass()
t.foo = 1
t.bar = "bar1"
t.save()
t2 = tclass()
t2.foo = 2
t2.bar = "bar2"
t2.save()
pks = [t.pk, t2.pk]
res = tclass.query.get_pk(pks)
self.assertEqual(2, len(res))
self.assertEqual(list(res.pk), pks)
def test_value_query(self):
_q = self.get_query()
v = _q.copy().select_foo().value()
self.assertEqual(None, v)
count = 2
pks = self.insert(_q, count)
o = _q.copy().one_pk(pks[0])
v = _q.copy().select_foo().is_pk(o.pk).value()
self.assertEqual(o.foo, v)
v = _q.copy().select_foo().select_bar().is_pk(o.pk).value()
self.assertEqual(o.foo, v[0])
self.assertEqual(o.bar, v[1])
def test_pk(self):
orm_class = self.get_orm_class()
v = orm_class.query.select_pk().one()
self.assertEqual(None, v)
count = 2
self.insert(orm_class, count)
v = orm_class.query.select_pk().asc_pk().one()
self.assertEqual(1, v)
def test_pks(self):
orm_class = self.get_orm_class()
q = self.get_query()
v = list(orm_class.query.select_pk().get())
self.assertEqual(0, len(v))
count = 2
self.insert(orm_class, count)
v = list(orm_class.query.select_pk().get())
self.assertEqual(2, len(v))
def test___iter__(self):
count = 5
q = self.get_query()
self.insert(q, count)
rcount = 0
for t in q:
rcount += 1
self.assertEqual(count, rcount)
def test_has(self):
q = self.get_query()
self.assertFalse(q.has())
count = 1
self.insert(q, count)
self.assertTrue(q.has())
def test_all(self):
count = 10
q = self.get_query()
self.insert(q, count)
# if no limit is set then it should go through all results
rcount = 0
for r in q.copy().all():
rcount += 1
self.assertEqual(count, rcount)
# if there is a limit then all should only go until that limit
rcount = 0
for r in q.copy().limit(1).all():
rcount += 1
self.assertEqual(1, rcount)
# only go until the end of the results
rcount = 0
for r in q.copy().limit(6).offset(6).all():
rcount += 1
self.assertEqual(4, rcount)
def test_in_field(self):
q = self.get_query()
q.in_foo([])
self.assertEqual([], list(q.get()))
q = self.get_query()
q.in_foo([1, 2])
self.assertEqual(q.fields_where[0].value, [1, 2,])
q = self.get_query()
q.in_foo([1])
self.assertEqual(q.fields_where[0].value, [1])
q = self.get_query()
q.in_foo([1, 2])
self.assertEqual(q.fields_where[0].value, [1, 2])
q = self.get_query()
q.in_foo(range(1, 3))
self.assertEqual(q.fields_where[0].value, [1, 2,])
q = self.get_query()
q.in_foo((x for x in [1, 2]))
self.assertEqual(q.fields_where[0].value, [1, 2,])
def test_set(self):
q = self.get_query()
field_names = list(q.schema.fields.keys())
fields = dict(zip(field_names, [None] * len(field_names)))
q.set(**fields)
self.assertEqual(fields, {f.name: f.value for f in q.fields_set})
q = self.get_query()
q.set(fields)
self.assertEqual(fields, {f.name: f.value for f in q.fields_set})
def test_select(self):
q = self.get_query()
fields_select = list(q.schema.fields.keys())
q.select(*fields_select[0:-1])
self.assertEqual(fields_select[0:-1], list(q.fields_select.names()))
q = self.get_query()
q.select(fields_select)
self.assertEqual(fields_select, list(q.fields_select.names()))
q = self.get_query()
q.select(fields_select[0:-1], fields_select[-1])
self.assertEqual(fields_select, list(q.fields_select.names()))
# make sure chaining works
q = self.get_query()
q.select(fields_select[0]).select(*fields_select[1:])
self.assertEqual(fields_select, list(q.fields_select.names()))
def test_child_magic(self):
orm_class = self.get_orm_class()
class ChildQuery(Query):
pass
orm_class.query_class = ChildQuery
q = orm_class.query
q.is_foo(1) # if there is no error, it passed
with self.assertRaises(AttributeError):
q.aksdlfjldks_foo(2)
def test_properties(self):
q = self.get_query()
r = q.schema
self.assertTrue(r)
r = q.interface
self.assertEqual(r, q.orm_class.interface)
self.assertTrue(r)
q.orm_class = None
self.assertFalse(q.schema)
self.assertFalse(q.interface)
def test___getattr__(self):
q = self.get_query()
q.is_foo(1)
self.assertEqual(1, len(q.fields_where))
self.assertEqual("eq", q.fields_where[0].operator)
with self.assertRaises(AttributeError):
q.testsfsdfsdft_fieldname(1, 2, 3)
def test_append_operation(self):
tests = [
("is_field", ["foo", 1], ["eq", "foo", 1]),
("not_field", ["foo", 1], ["ne", "foo", 1]),
("lte_field", ["foo", 1], ["lte", "foo", 1]),
("lt_field", ["foo", 1], ["lt", "foo", 1]),
("gte_field", ["foo", 1], ["gte", "foo", 1]),
("gt_field", ["foo", 1], ["gt", "foo", 1]),
("in_field", ["foo", (1, 2, 3)], ["in", "foo", [1, 2, 3]]),
("nin_field", ["foo", (1, 2, 3)], ["nin", "foo", [1, 2, 3]]),
]
for i, t in enumerate(tests):
q = self.get_query()
cb = getattr(q, t[0])
r = cb(*t[1])
self.assertEqual(q, r)
self.assertEqual(t[2][0], q.fields_where[0].operator)
self.assertEqual(t[2][1], q.fields_where[0].name)
self.assertEqual(t[2][2], q.fields_where[0].value)
q = self.get_query()
q.between_field("foo", 1, 2)
self.assertEqual("gte", q.fields_where[0].operator)
self.assertEqual("lte", q.fields_where[1].operator)
def test_append_sort(self):
tests = [
("append_sort", [1, "foo"], [1, "foo"]),
("append_sort", [-1, "foo"], [-1, "foo"]),
("append_sort", [5, "foo"], [1, "foo"]),
("append_sort", [-5, "foo"], [-1, "foo"]),
("asc_field", ["foo"], [1, "foo"]),
("desc_field", ["foo"], [-1, "foo"]),
]
q = self.get_query()
for i, t in enumerate(tests):
cb = getattr(q, t[0])
r = cb(*t[1])
self.assertEqual(q, r)
self.assertEqual(t[2][0], q.fields_sort[i].direction)
self.assertEqual(t[2][1], q.fields_sort[i].name)
with self.assertRaises(ValueError):
q.append_sort(0, "foo")
def test_bounds_methods(self):
q = self.get_query()
q.limit(10)
self.assertEqual((10, 0), q.bounds.get())
q.page(1)
self.assertEqual((10, 0), q.bounds.get())
q.offset(15)
self.assertEqual((10, 15), q.bounds.get())
q.page(2)
self.assertEqual((10, 10), q.bounds.get())
q.page(3)
self.assertEqual((10, 20), q.bounds.get())
q.page(0)
self.assertEqual((10, 0), q.bounds.get())
q.offset(0)
self.assertEqual((10, 0), q.bounds.get())
q.limit(0)
self.assertEqual((0, 0), q.bounds.get())
def test_insert_and_update(self):
orm_class = self.get_orm_class()
q = orm_class.query
o = orm_class(foo=1, bar="value 1")
fields = o.to_interface()
pk = q.copy().set(fields).insert()
o = q.copy().one_pk(pk)
self.assertLess(0, pk)
self.assertTrue(o._created)
self.assertTrue(o._updated)
fields["foo"] = 2
fields["bar"] = "value 2"
row_count = q.copy().set(fields).is_pk(pk).update()
self.assertEqual(1, row_count)
o2 = q.copy().one_pk(pk)
self.assertEqual(2, o2.foo)
self.assertEqual("value 2", o2.bar)
self.assertEqual(o._created, o2._created)
self.assertEqual(o._updated, o2._updated)
def test_update_bubble_up(self):
"""
https://github.com/jaymon/prom/issues/11
"""
orm = self.get_orm()
orm.schema.set_field("che", prom.Field(str, False))
orm.foo = 1
orm.bar = "bar 1"
orm.che = None
orm.save()
ret = orm.query.set_foo(2).set_bar("bar 2").not_che(None).update()
self.assertEqual(0, ret)
ret = orm.query.set_foo(2).set_bar("bar 2").is_che(None).update()
self.assertEqual(1, ret)
def test_delete(self):
tclass = self.get_orm_class()
first_pk = self.insert(tclass, 1)[0]
with self.assertRaises(ValueError):
r = tclass.query.delete()
r = tclass.query.is_pk(first_pk).delete()
self.assertEqual(1, r)
r = tclass.query.is_pk(first_pk).delete()
self.assertEqual(0, r)
def test_get(self):
TestGetTorm = self.get_orm_class()
_ids = self.insert(TestGetTorm, 2)
q = TestGetTorm.query
for o in q.get():
self.assertEqual(type(o), TestGetTorm)
self.assertTrue(o._id in _ids)
self.assertFalse(o.is_modified())
def test_one(self):
TestGetOneTorm = self.get_orm_class()
_ids = self.insert(TestGetOneTorm, 2)
q = TestGetOneTorm.query
o = q.one()
self.assertEqual(type(o), TestGetOneTorm)
self.assertTrue(o._id in _ids)
self.assertFalse(o.is_modified())
def test_copy(self):
q1 = self.get_query()
q2 = q1.copy()
q1.is_foo(1)
self.assertEqual(1, len(q1.fields_where))
self.assertEqual(0, len(q2.fields_where))
self.assertNotEqual(id(q1), id(q2))
self.assertNotEqual(id(q1.fields_where), id(q2.fields_where))
self.assertNotEqual(id(q1.bounds), id(q2.bounds))
def test_values_query(self):
_q = self.get_query()
count = 2
pks = self.insert(_q, count)
vals = _q.copy().select_foo().values()
self.assertEqual(count, len(vals))
for v in vals:
self.assertTrue(isinstance(v, int))
vals = _q.copy().select_foo().select_bar().values()
self.assertEqual(count, len(vals))
for v in vals:
self.assertTrue(isinstance(v, list))
vals = _q.copy().select_foo().limit(1).values()
self.assertEqual(1, len(vals))
def test_count(self):
orm_class = self.get_orm_class()
self.insert(orm_class, 10)
self.assertEqual(5, orm_class.query.offset(5).count())
self.assertEqual(5, orm_class.query.limit(5).count())
self.assertEqual(10, orm_class.query.count())
class IteratorTest(BaseTestCase):
def get_iterator(self, count=5, limit=5, page=0):
q = self.get_query()
self.insert(q, count)
i = q.limit(limit).page(page).get()
return i
def test___repr__(self):
"""https://github.com/Jaymon/prom/issues/137"""
orm_class = self.create_orms()
it = orm_class.query.get()
s = it.__repr__()
self.assertNotEqual("[]", s)
def test___init__(self):
count = 10
orm_class = self.get_orm_class()
self.insert(orm_class, count)
q = orm_class.query.gt_pk(5)
it = Iterator(q)
self.assertLess(0, len(it))
for o in it:
self.assertLess(5, o.pk)
def test___getitem___slicing(self):
count = 10
orm_class = self.get_orm_class()
pks = self.insert(orm_class, count)
it = orm_class.query.select_pk().asc_pk().get()
list(it[-5:6])
return
self.assertEqual(pks[-5:6], list(it[-5:6]))
self.assertEqual(pks[2:5], list(it[2:5]))
self.assertEqual(pks[2:], list(it[2:]))
self.assertEqual(pks[:2], list(it[:2]))
with self.assertRaises(ValueError):
it[1:2:2]
def test___getitem___positive_index(self):
count = 10
orm_class = self.get_orm_class()
pks = self.insert(orm_class, count)
q = orm_class.query.asc_pk()
it = Iterator(q)
self.assertEqual(pks[0], it[0].pk)
self.assertEqual(pks[-1], it[len(pks) - 1].pk)
with self.assertRaises(IndexError):
it[len(pks)]
q = orm_class.query.offset(4).limit(2).asc_pk()
it = Iterator(q)
self.assertEqual(pks[4], it[0].pk)
self.assertEqual(pks[5], it[1].pk)
with self.assertRaises(IndexError):
it[3]
def test___getitem___negative_index(self):
count = 10
orm_class = self.get_orm_class()
pks = self.insert(orm_class, count)
q = orm_class.query.asc_pk()
it = Iterator(q)
self.assertEqual(it[-1].pk, pks[-1])
self.assertEqual(it[-2].pk, pks[-2])
with self.assertRaises(IndexError):
it[-(len(pks) + 5)]
def test_copy(self):
count = 10
orm_class = self.get_orm_class()
self.insert(orm_class, count)
q = orm_class.query.asc_pk()
it1 = Iterator(q)
it2 = it1.copy()
it2.reverse()
self.assertNotEqual(list(v for v in it1), list(v for v in it2))
def test_custom(self):
"""make sure setting a custom Iterator class works normally and wrapped
by an AllIterator()"""
count = 3
orm_class = self.get_orm_class()
self.insert(orm_class, count)
self.assertEqual(count, len(list(orm_class.query.get())))
class CustomIterator(Iterator):
def ifilter(self, o):
return not o.pk == 1
orm_class.iterator_class = CustomIterator
self.assertEqual(count - 1, len(list(orm_class.query.get())))
self.assertEqual(count - 1, len(list(orm_class.query.all())))
def test_ifilter(self):
count = 3
_q = self.get_query()
self.insert(_q, count)
l = _q.copy().get()
self.assertEqual(3, len(list(l)))
l = _q.copy().get()
def ifilter(o): return o.pk == 1
l.ifilter = ifilter
l2 = _q.copy().get()
self.assertEqual(len(list(filter(ifilter, l2))), len(list(l)))
def test_reverse(self):
"""Iterator.reverse() reverses the iterator in place"""
count = 10
orm_class = self.get_orm_class()
pks = self.insert(orm_class, count)
pks.reverse()
q = orm_class.query.asc_pk()
it = Iterator(q)
it.reverse()
for i, o in enumerate(it):
self.assertEqual(pks[i], o.pk)
q = orm_class.query.asc_pk()
it = Iterator(q)
for i, o in enumerate(reversed(it)):
self.assertEqual(pks[i], o.pk)
def test_all_1(self):
count = 15
q = self.get_query()
pks = self.insert(q, count)
self.assertLess(0, len(pks))
g = q.all()
self.assertEqual(1, g[0].pk)
self.assertEqual(2, g[1].pk)
self.assertEqual(3, g[2].pk)
self.assertEqual(6, g[5].pk)
self.assertEqual(13, g[12].pk)
with self.assertRaises(IndexError):
g[count + 5]
for i, x in enumerate(g):
if i > 7: break
self.assertEqual(9, g[8].pk)
gcount = 0
for x in g: gcount += 1
self.assertEqual(count, gcount)
gcount = 0
for x in g: gcount += 1
self.assertEqual(count, gcount)
self.assertEqual(count, len(g))
g = q.all()
self.assertEqual(count, len(g))
def test_all_limit(self):
count = 15
q = self.get_query()
self.insert(q, count)
q.limit(5)
g = q.all()
self.assertEqual(3, g[2].pk)
with self.assertRaises(IndexError):
g[6]
def test_values(self):
count = 5
_q = self.get_query()
self.insert(_q, count)
g = _q.copy().select_bar().get()
icount = 0
for v in g:
self.assertTrue(isinstance(v, basestring))
icount += 1
self.assertEqual(count, icount)
g = _q.copy().select_bar().select_foo().get()
icount = 0
for v in g:
icount += 1
self.assertTrue(isinstance(v[0], basestring))
self.assertTrue(isinstance(v[1], int))
self.assertEqual(count, icount)
def test___iter__(self):
count = 5
i = self.get_iterator(count)
rcount = 0
for t in i:
rcount += 1
self.assertEqual(count, rcount)
rcount = 0
for t in i:
self.assertTrue(isinstance(t, prom.Orm))
rcount += 1
self.assertEqual(count, rcount)
def test___len__(self):
count = 5
i = self.get_iterator(count)
self.assertEqual(len(i), count)
orm_class = i.orm_class
i = orm_class.query.limit(3).get()
self.assertEqual(3, len(i))
def test___getattr__(self):
count = 5
i = self.get_iterator(count)
rs = list(i.foo)
self.assertEqual(count, len(rs))
with self.assertRaises(AttributeError):
i.kadfjkadfjkhjkgfkjfkjk_bogus_field
def test_pk(self):
count = 5
i = self.get_iterator(count)
rs = list(i.pk)
self.assertEqual(count, len(rs))
def test_has_more(self):
limit = 3
count = 5
q = self.get_query()
pks = self.insert(q.orm_class, count)
self.assertEqual(count, len(pks))
i = q.limit(limit).page(0).get()
self.assertTrue(i.has_more())
return
i = q.limit(limit).page(3).get()
self.assertFalse(i.has_more())
i = q.limit(limit).page(1).get()
self.assertTrue(i.has_more())
i = q.limit(0).page(0).get()
self.assertFalse(i.has_more())
def test_has_more_limit(self):
limit = 4
count = 10
q = self.get_query()
pks = self.insert(q, count)
it = q.select_pk().limit(limit).asc_pk().get()
self.assertEqual(pks[:limit], list(it))
| 28.216022
| 90
| 0.540034
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.