content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from enum import Enum
class SupportedProtocols(Enum):
"""Contains a list of all supported protocols (currently only http and https
Value:
http : "http"
https : "https"
"""
http = "http"
https = "https"
class Methods(Enum):
"""Contains a list of all supported HTTP Verbs
"""
get = "GET"
post = "POST"
put = "PUT"
patch = "PATCH"
delete = "DELETE" | [
6738,
33829,
1330,
2039,
388,
628,
198,
4871,
36848,
19703,
4668,
82,
7,
4834,
388,
2599,
198,
220,
220,
220,
37227,
4264,
1299,
257,
1351,
286,
477,
4855,
19565,
357,
41745,
691,
2638,
290,
3740,
198,
220,
220,
220,
11052,
25,
198,
... | 2.487952 | 166 |
from conet.datasets.bengali_dataset import DATA_DIR
from sklearn.model_selection import StratifiedKFold
| [
6738,
369,
316,
13,
19608,
292,
1039,
13,
65,
1516,
7344,
62,
19608,
292,
316,
1330,
42865,
62,
34720,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
29186,
1431,
42,
37,
727,
198
] | 3.058824 | 34 |
from django.http import JsonResponse
import stripe
# Create your views here. | [
6738,
42625,
14208,
13,
4023,
1330,
449,
1559,
31077,
198,
198,
11748,
39858,
198,
198,
2,
13610,
534,
5009,
994,
13
] | 3.714286 | 21 |
# Copyright (c) 2018-2019 Robin Jarry
# SPDX-License-Identifier: MIT
import os
import shlex
from typing import List
import cffi
HERE = os.path.dirname(__file__)
BUILDER = cffi.FFI()
with open(os.path.join(HERE, "cdefs.h")) as f:
BUILDER.cdef(f.read())
HEADERS = search_paths("LIBYANG_HEADERS")
LIBRARIES = search_paths("LIBYANG_LIBRARIES")
EXTRA_CFLAGS = ["-Werror", "-std=c99"]
EXTRA_CFLAGS += shlex.split(os.environ.get("LIBYANG_EXTRA_CFLAGS", ""))
EXTRA_LDFLAGS = shlex.split(os.environ.get("LIBYANG_EXTRA_LDFLAGS", ""))
with open(os.path.join(HERE, "source.c")) as f:
BUILDER.set_source(
"_libyang",
f.read(),
libraries=["yang"],
extra_compile_args=EXTRA_CFLAGS,
extra_link_args=EXTRA_LDFLAGS,
include_dirs=HEADERS,
library_dirs=LIBRARIES,
py_limited_api=False,
)
if __name__ == "__main__":
BUILDER.compile()
| [
2,
15069,
357,
66,
8,
2864,
12,
23344,
12325,
449,
6532,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
198,
198,
11748,
28686,
198,
11748,
427,
2588,
198,
6738,
19720,
1330,
7343,
198,
198,
11748,
269,
487,
72,
628,
198,... | 2.129717 | 424 |
import os
import json
import urllib
import requests
import datetime
def calc_transportation(body):
"""
移動の予定のみreturn.
"""
margin = 5*60 #margin before next event start
ret = []
for i in range(len(body)):
indict = {}
if i==0:
dur = get_time(os.environ["HOME"],body[i]["location"],body[i]["mode"])
else:
dur = get_time(body[i-1]["location"],body[i]["location"],body[i]["mode"])
t_delta = datetime.timedelta(seconds=(dur+margin))
t_delta_margin = datetime.timedelta(seconds=margin)
dt = datetime.datetime.fromisoformat(body[i]["start"])
indict["title"] = "移動"
indict["start"] = (dt - t_delta).isoformat()
indict["end"] = (dt - t_delta_margin).isoformat()
indict["backgroundColor"] = "#FFCC99"
ret.append(indict)
return ret
| [
11748,
28686,
198,
11748,
33918,
198,
11748,
2956,
297,
571,
198,
11748,
7007,
198,
198,
11748,
4818,
8079,
628,
198,
198,
4299,
42302,
62,
7645,
10189,
7,
2618,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
13328,
100,
119,
47... | 2.006522 | 460 |
# we need to import this in order to be able to import the simulator
# it does not have to do anything other than be imported.
from util import path
from examples.minimalExample.SimulatorCrownstone import SimulatorCrownstone
from simulator import SimulationGui, JsonFileStore, Simulator
config = JsonFileStore('./minimalExample/config.json').getData()
userModule = JsonFileStore('./minimalExample/userData.json').getData()
root = SimulatorCrownstone(1, 0, 0)
root.root = True
simulatorCrownstones = [
root,
SimulatorCrownstone(2, 5, 3),
SimulatorCrownstone(3, 10, 6),
SimulatorCrownstone(4, 15, 9),
SimulatorCrownstone(5, 15, 13),
]
a = SimulationGui()
a.loadSimulatorCrownstones(simulatorCrownstones)
a.loadConfig(config)
b = Simulator()
b.loadCrownstones(simulatorCrownstones)
b.loadConfig(config)
a.loadSimulator(b) # this will load the user module into the simulator as a broadcaster.
a.startSimulation(2)
#a.run() | [
2,
356,
761,
284,
1330,
428,
287,
1502,
284,
307,
1498,
284,
1330,
262,
35375,
198,
2,
340,
857,
407,
423,
284,
466,
1997,
584,
621,
307,
17392,
13,
198,
6738,
7736,
1330,
3108,
198,
198,
6738,
6096,
13,
1084,
4402,
16281,
13,
889... | 3.064935 | 308 |
def locate_associated_files(in_file: str):
"""
Locates associated json (and possibly bvec & bval) files.
Parameters
----------
in_file : str
Input file.
Returns
-------
Tuple[str, str, str]
Tuple of associated json (and possibly bvec & bval) files.
"""
from dmriprep.config import config
from nipype.interfaces.base import traits
associated_extenstions = ["json", "bvec", "bval"]
layout = config.execution.layout
output = {}
for key in associated_extenstions:
output[key] = (
layout.get_nearest(in_file, extension=key) or traits.Undefined
)
return [output.get(key) for key in associated_extenstions]
| [
4299,
17276,
62,
32852,
62,
16624,
7,
259,
62,
7753,
25,
965,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
15181,
689,
3917,
33918,
357,
392,
5457,
275,
35138,
1222,
275,
2100,
8,
3696,
13,
628,
220,
220,
220,
40117,
198,
... | 2.530249 | 281 |
from multipledispatch import dispatch
| [
6738,
5021,
10137,
8802,
963,
1330,
27965,
198
] | 4.75 | 8 |
n1 = int(input("Informe um número: "))
n2 = int(input("Informe um número maior que o anterior: "))
for x in range(n1, n2 + 1):
cont=0
for y in range(n1, x + 1):
if x%y==0:
cont+=1
if cont<=2:
print("{} ".format(y), end="") | [
77,
16,
796,
493,
7,
15414,
7203,
818,
687,
68,
23781,
299,
21356,
647,
78,
25,
366,
4008,
198,
77,
17,
796,
493,
7,
15414,
7203,
818,
687,
68,
23781,
299,
21356,
647,
78,
17266,
1504,
8358,
267,
32700,
25,
366,
4008,
198,
198,
... | 1.783439 | 157 |
#Libraries
import pickle
# To ignore warnings
import warnings
import numpy as np
import pandas as pd
import re as re
from sklearn.tree import DecisionTreeClassifier
warnings.filterwarnings("ignore")
################################################################################
### Data Ingestion & Splitting
################################################################################
#Load data
train_data = pd.read_csv('train.csv')
test_data = pd.read_csv('test.csv')
all_data = [train_data, test_data]
data = pd.concat([train_data, test_data], sort=False)
data.to_csv('titanic.csv', index=False)
################################################################################
### Data & Feature Processing
################################################################################
#Feature 1-2 ready to use, manipulating others
#Feature 3
for data in all_data:
data['family_size'] = data['SibSp'] + data['Parch'] + 1
#Feature 3.1
for data in all_data:
data['is_alone'] = 0
data.loc[data['family_size'] == 1, 'is_alone'] = 1
#Feature 4
for data in all_data:
data['Embarked'] = data['Embarked'].fillna('S')
#Feature 5
for data in all_data:
data['Fare'] = data['Fare'].fillna(data['Fare'].median())
train_data['category_fare'] = pd.qcut(train_data['Fare'], 4)
#Feature 6
for data in all_data:
age_avg = data['Age'].mean()
age_std = data['Age'].std()
age_null = data['Age'].isnull().sum()
random_list = np.random.randint(age_avg - age_std, age_avg + age_std , size = age_null)
data['Age'][np.isnan(data['Age'])] = random_list
data['Age'] = data['Age'].astype(int)
train_data['category_age'] = pd.cut(train_data['Age'], 5)
#Feature 7
for data in all_data:
data['title'] = data['Name'].apply(get_title)
for data in all_data:
data['title'] = data['title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'],'Rare')
data['title'] = data['title'].replace('Mlle','Miss')
data['title'] = data['title'].replace('Ms','Miss')
data['title'] = data['title'].replace('Mme','Mrs')
#Map Data
for data in all_data:
#Mapping Sex
sex_map = { 'female':0 , 'male':1 }
data['Sex'] = data['Sex'].map(sex_map).astype(int)
#Mapping Title
title_map = {'Mr':1, 'Miss':2, 'Mrs':3, 'Master':4, 'Rare':5}
data['title'] = data['title'].map(title_map)
data['title'] = data['title'].fillna(0)
#Mapping Embarked
embark_map = {'S':0, 'C':1, 'Q':2}
data['Embarked'] = data['Embarked'].map(embark_map).astype(int)
#Mapping Fare
data.loc[ data['Fare'] <= 7.91, 'Fare'] = 0
data.loc[(data['Fare'] > 7.91) & (data['Fare'] <= 14.454), 'Fare'] = 1
data.loc[(data['Fare'] > 14.454) & (data['Fare'] <= 31), 'Fare'] = 2
data.loc[ data['Fare'] > 31, 'Fare'] = 3
data['Fare'] = data['Fare'].astype(int)
#Mapping Age
data.loc[ data['Age'] <= 16, 'Age'] = 0
data.loc[(data['Age'] > 16) & (data['Age'] <= 32), 'Age'] = 1
data.loc[(data['Age'] > 32) & (data['Age'] <= 48), 'Age'] = 2
data.loc[(data['Age'] > 48) & (data['Age'] <= 64), 'Age'] = 3
data.loc[ data['Age'] > 64, 'Age'] = 4
#5 Feature Selection
#5.1 Create list of columns to drop
drop_elements = ["Name", "Ticket", "Cabin", "SibSp", "Parch", "family_size"]
#5.3 Drop columns from both data sets
train_data = train_data.drop(drop_elements, axis = 1)
train_data = train_data.drop(['PassengerId','category_fare', 'category_age'], axis = 1)
test_data = test_data.drop(drop_elements, axis = 1)
#5.4 Double check
print("Training data")
print(train_data.head)
print("Test data")
print(test_data.head)
################################################################################
### Model Training & Persist as pickle
################################################################################
#6 Do training with decision tree
X_train = train_data.drop("Survived", axis=1)
Y_train = train_data["Survived"]
decision_tree = DecisionTreeClassifier()
decision_tree.fit(X_train, Y_train)
pkl = open('model.pkl', 'wb')
pickle.dump(decision_tree, pkl)
decision_tree = None
X_train.to_csv("train_data.csv", index=False)
################################################################################
### Model Loading & Inference
################################################################################
#7.1 Prepare prediction data & Model
model_pkl = open('model.pkl', 'rb')
model = pickle.load(model_pkl)
X_test = test_data.drop("PassengerId", axis=1).copy()
#7.2 Do predict
accuracy = round(model.score(X_train, Y_train) * 100, 2)
print('=========================')
print("Model Accuracy: ",accuracy)
print('=========================')
#7.3 Run prediction on entire test data
Y_pred = model.predict(X_test)
result = pd.DataFrame({
"PassengerId":test_data["PassengerId"],
"Survived": Y_pred
})
result.to_csv('result.csv', index = False) | [
2,
43,
11127,
198,
11748,
2298,
293,
198,
2,
1675,
8856,
14601,
198,
11748,
14601,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
302,
355,
302,
198,
6738,
1341,
35720,
13,
21048,
1330,
2642... | 2.748213 | 1,819 |
from SloppyCell.ReactionNetworks import *
# This file was distributed with Ingenue
f = file('spg1_4cell.net')
lines = f.readlines()
for ii, line in enumerate(lines):
if line.startswith('&width'):
width = int(line.split()[1])
elif line.startswith('&height'):
height = int(line.split()[1])
elif line.startswith('&Network'):
net_id = line.split()[1]
elif line.startswith('&Genes'):
begin_genes = ii
elif line.startswith('&Interactions'):
begin_ints = ii
elif line.startswith('&ParameterValues'):
begin_params = ii
elif line.startswith('&InitLevels'):
begin_inits = ii
net = Network(net_id)
if height > 1:
for ii in range(height):
for jj in range(width):
net.add_compartment('cell_%i_%i' % (ii, jj))
else:
for jj in range(width):
net.add_compartment('cell_%i' % (jj))
ii = begin_genes + 1
while True:
line = lines[ii].strip()
if line.startswith('&endGenes'):
break
if line.startswith('&'):
# Process this species
species_id = line[1:]
ii += 1
# Skip to the end of this species entry
while not lines[ii].strip().startswith('&end'):
line = lines[ii]
first, second = line.split()
if first == '&Location':
on_membrane = (second == 'membrane')
ii += 1
for comp_ii, comp_id in enumerate(net.compartments.keys()):
if not on_membrane:
net.add_species('%s_%s' % (species_id, comp_id), comp_id,
name = r'%s_{%i}' % (species_id, comp_ii))
else:
for jj in range(6):
id = '%s_%s_side_%i' % (species_id, comp_id, jj)
name = r'%s_{%i, %i}' % (species_id, comp_ii, jj)
net.add_species(id, comp_id, name = name)
ii += 1
for comp_id in net.compartments.keys():
net.set_var_constant('B_%s' % comp_id, True)
ii = begin_params + 1
while True:
line = lines[ii].strip()
if line.startswith('&endParameterValues'):
break
if line.startswith('&'):
# Process this parameter
temp = line.split()
param_id, param_val = temp[0][1:], float(temp[1])
net.add_parameter(param_id, param_val)
ii += 1
# Create all the appropriate parameter names
for param_id in net.parameters.keys():
if param_id == 'K_PTC_HH':
name = r'k_{PTCHH}'
elif param_id.startswith('K_'):
term = param_id.split('_')[1]
name = r'\kappa_{%s}' % term
elif param_id.startswith('nu_'):
term = param_id.split('_')[1]
name = r'\nu_{%s}' % term
elif param_id.startswith('H_'):
term = param_id.split('_')[1]
name = r'H_{%s}' % term
elif param_id.startswith('alpha_'):
term = param_id.split('_')[1]
name = r'\alpha_{%s}' % term
else:
name = param_id
net.parameters.get(param_id).name = name
net.parameters.get('Endo_WG').name = r'r_{EndoWG}'
net.parameters.get('Exo_WG').name = r'r_{ExoWG}'
net.parameters.get('Mxfer_WG').name = r'r_{MxferWG}'
net.parameters.get('LMxfer_WG').name = r'r_{LMxferWG}'
net.parameters.get('LMxfer_PTC').name = r'r_{LMxferPTC}'
net.parameters.get('LMxfer_HH').name = r'r_{LMxferHH}'
net.parameters.get('maxHH').name = r'\left[HH\right]_0'
net.parameters.get('maxPTC').name = r'\left[PTC\right]_0'
net.parameters.get('C_CID').name = r'C_{CID}'
net.add_parameter('T_0', 1.0, is_optimizable=False)
ii = begin_inits + 1
while True:
line = lines[ii].strip()
if line.startswith('&endInitLevels'):
break
elif line.startswith('&BackgroundLevel'):
spec_id = line.split()[1]
value = float(line.split()[2])
for var_id in net.species.keys():
if var_id.startswith(spec_id):
net.set_var_ic(var_id, value)
elif line.startswith('&ColumnIC'):
spec_id = lines[ii + 1].split()[1]
value = float(lines[ii + 2].split()[1])
column = int(lines[ii + 3].split()[1])
cell_id = net.compartments.keys()[column]
for var_id in net.species.keys():
if var_id.startswith(spec_id) and var_id.count(cell_id):
net.set_var_ic(var_id, value)
ii += 3
ii += 1
net.add_func_def('phi', ['X', 'k_X', 'v_X'], '(X**2)**(v_X/2) / ((k_X**2)**(v_X/2) + (X**2)**(v_X/2))',
name = r'\phi')
net.add_func_def('psi', ['X', 'k_X', 'v_X'], '1 - phi(X, k_X, v_X)',
name = r'\psi')
for comp_ii, comp_id in enumerate(net.compartments.keys()):
# rhs for en_i
# First define EWG^{tot}_{n(i,j)}
net.add_parameter('EWG_tot_pres_%s' % comp_id, is_optimizable=False,
name=r'{EWG_{n(%i, j)}}^{tot}' % comp_ii)
net.add_assignment_rule('EWG_tot_pres_%s' % comp_id,
presented_by_neighbors(net, comp_id, 'EWG'))
rule_str = 'T_0/H_en * (phi(EWG_tot_pres_%(comp)s * psi(CN_%(comp)s, K_CNen, nu_CNen), K_WGen, nu_WGen) - en_%(comp)s)'
net.add_rate_rule('en_%s' % comp_id, rule_str % {'comp': comp_id})
# rhs for EN_i
rule_str = 'T_0/H_EN * (en_%(comp)s - EN_%(comp)s)'
net.add_rate_rule('EN_%s' % comp_id, rule_str % {'comp': comp_id})
# rhs for wg_i
num = '(beta_wg * phi(CID_%(comp)s * psi (CN_%(comp)s, K_CNwg, nu_CNwg), K_CIDwg, nu_CIDwg) + alpha_wg * phi(IWG_%(comp)s, K_WGwg, nu_WGwg))' % {'comp': comp_id}
denom = '(1 + beta_wg * phi(CID_%(comp)s * psi(CN_%(comp)s, K_CNwg, nu_CNwg), K_CIDwg, nu_CIDwg) + alpha_wg * phi(IWG_%(comp)s, K_WGwg, nu_WGwg))' % {'comp': comp_id}
rule_dict = {'num' : num, 'denom' : denom, 'comp' : comp_id}
rule_str = 'T_0/H_wg * %(num)s/%(denom)s - T_0/H_wg * wg_%(comp)s'
net.add_rate_rule('wg_%s' % comp_id, rule_str % rule_dict)
# rhs for IWG_i
net.add_parameter('EWG_tot_%s' % comp_id, is_optimizable=False,
name=r'{EWG_{%i}}^{tot}' % comp_ii)
net.add_assignment_rule('EWG_tot_%s' % comp_id,
total_in_cell(net, comp_id, 'EWG'))
rule_str = 'T_0/H_IWG * (wg_%(comp)s - IWG_%(comp)s) + T_0*(Endo_WG * EWG_tot_%(comp)s - Exo_WG * IWG_%(comp)s)'
net.add_rate_rule('IWG_%s' % comp_id, rule_str % {'comp': comp_id})
#rhs for EWG_i_j
for side_jj in range(6):
terms = ['T_0 * Exo_WG * IWG_%(comp)s/6',
'-(T_0 * Endo_WG * EWG_%(comp)s_side_%(side)i)',
'T_0 * Mxfer_WG * (%(sub)s - EWG_%(comp)s_side_%(side)i)',
'T_0 * LMxfer_WG * (EWG_%(comp)s_side_%(prev)i + EWG_%(comp)s_side_%(next)i - 2*EWG_%(comp)s_side_%(side)i)',
'-T_0/H_EWG * EWG_%(comp)s_side_%(side)i']
rule_str = ' + '.join(terms)
rule_dict = {'sub': opposite_side(net, comp_id, side_jj, 'EWG'),
'comp': comp_id,
'side': side_jj,
'prev': (side_jj - 1) % 6,
'next': (side_jj + 1) % 6}
net.add_rate_rule('EWG_%s_side_%i' % (comp_id, side_jj),
rule_str % rule_dict)
# rhs for ptc_i
rule_str = 'T_0/H_ptc * (phi(CID_%(comp)s * psi(CN_%(comp)s, K_CNptc, nu_CNptc), K_CIDptc, nu_CIDptc) - ptc_%(comp)s)'
rule_dict = {'comp': comp_id}
net.add_rate_rule('ptc_%s' % comp_id,
rule_str % rule_dict)
# rhs for PTC_i_j
for side_jj in range(6):
terms = ['T_0/H_PTC * (ptc_%(comp)s/6 - PTC_%(comp)s_side_%(side)i)',
'-(T_0 * K_PTC_HH * maxHH * %(sub1)s * PTC_%(comp)s_side_%(side)i)',
'T_0 * LMxfer_PTC * (PTC_%(comp)s_side_%(prev)i + PTC_%(comp)s_side_%(next)i - 2*PTC_%(comp)s_side_%(side)i)']
rule_str = ' + '.join(terms)
rule_dict = {'sub1': opposite_side(net, comp_id, side_jj, 'HH'),
'comp': comp_id,
'side': side_jj,
'prev': (side_jj - 1) % 6,
'next': (side_jj + 1) % 6}
net.add_rate_rule('PTC_%s_side_%i' % (comp_id, side_jj),
rule_str % rule_dict)
# rhs for cid_i
rule_str = 'T_0/H_cid * (phi(B_%(comp)s * psi(EN_%(comp)s, K_ENcid, nu_ENcid), K_Bcid, nu_Bcid) - cid_%(comp)s)'
rule_dict = {'comp': comp_id}
net.add_rate_rule('cid_%s' % comp_id,
rule_str % rule_dict)
# rhs for CID_i
net.add_parameter('PTC_tot_%s' % comp_id, is_optimizable=False,
name=r'{PTC_{%i}}^{tot}' % comp_ii)
net.add_assignment_rule('PTC_tot_%s' % comp_id,
total_in_cell(net, comp_id, 'PTC'))
rule_str = 'T_0/H_CID * (cid_%(comp)s - CID_%(comp)s) - T_0 * C_CID * CID_%(comp)s * phi(PTC_tot_%(comp)s, K_PTCCID, nu_PTCCID)'
rule_dict = {'comp': comp_id}
net.add_rate_rule('CID_%s' % comp_id,
rule_str % rule_dict)
# rhs for CN_i
rule_str = 'T_0 * C_CID * CID_%(comp)s * phi(PTC_tot_%(comp)s, K_PTCCID, nu_PTCCID) - T_0 * CN_%(comp)s/H_CN'
rule_dict = {'comp': comp_id}
net.add_rate_rule('CN_%s' % comp_id,
rule_str % rule_dict)
# rhs for hh_i
rule_str = 'T_0/H_hh * (phi(EN_%(comp)s * psi(CN_%(comp)s, K_CNhh, nu_CNhh), K_ENhh, nu_ENhh) - hh_%(comp)s)'
rule_dict = {'comp': comp_id}
net.add_rate_rule('hh_%s' % comp_id,
rule_str % rule_dict)
# rhs for HH_i_j
for side_jj in range(6):
terms = ['T_0/H_HH * (hh_%(comp)s/6 - HH_%(comp)s_side_%(side)s)'
'-T_0 * K_PTC_HH * maxPTC * %(sub1)s * HH_%(comp)s_side_%(side)s',
'T_0 * LMxfer_HH * (HH_%(comp)s_side_%(prev)i + HH_%(comp)s_side_%(next)i - 2*HH_%(comp)s_side_%(side)i)']
rule_str = ' + '.join(terms)
rule_dict = {'sub1': opposite_side(net, comp_id, side_jj, 'PTC'),
'comp': comp_id,
'side': side_jj,
'prev': (side_jj - 1) % 6,
'next': (side_jj + 1) % 6}
net.add_rate_rule('HH_%s_side_%i' % (comp_id, side_jj),
rule_str % rule_dict)
# rhs for PH_i_j
for side_jj in range(6):
rule_str= 'T_0 * K_PTC_HH * maxHH * %(sub1)s * PTC_%(comp)s_side_%(side)i - T_0*PH_%(comp)s_side_%(side)i / H_PH'
rule_dict = {'sub1': opposite_side(net, comp_id, side_jj, 'HH'),
'comp': comp_id,
'side': side_jj}
net.add_rate_rule('PH_%s_side_%i' % (comp_id, side_jj),
rule_str % rule_dict)
| [
6738,
3454,
26696,
28780,
13,
3041,
2673,
7934,
5225,
1330,
1635,
198,
198,
2,
770,
2393,
373,
9387,
351,
554,
5235,
518,
198,
69,
796,
2393,
10786,
2777,
70,
16,
62,
19,
3846,
13,
3262,
11537,
198,
6615,
796,
277,
13,
961,
6615,
... | 1.769681 | 5,983 |
'''
read or write text files in python
'''
# %%
# read a text file
# open the file for reading
with open("dumps/test.txt", mode='r') as f:
# read all the file content
fStr = f.read()
# please note that once again calling f.read() will return empty string
print(fStr)
# this will print the whole file contents
# %%
# load all lines into a list
with open("dumps/test.txt", mode='r') as f:
# load all the lines of the file into an array
textLines = f.readlines()
print(textLines)
# %%
# writing text to a file
# with mode = 'w', old text will be deleted
# with mode = 'a', the new text will be appended to the old text
with open("dumps/test.txt", mode='w') as f:
f.write("The first line\n")
f.write("This is the second line\nThis the third line")
# %%
| [
7061,
6,
198,
961,
393,
3551,
2420,
3696,
287,
21015,
198,
7061,
6,
198,
198,
2,
43313,
198,
2,
1100,
257,
2420,
2393,
198,
2,
1280,
262,
2393,
329,
3555,
198,
4480,
1280,
7203,
67,
8142,
14,
9288,
13,
14116,
1600,
4235,
11639,
81... | 2.836299 | 281 |
from django.contrib.auth.backends import BaseBackend, UserModel, ModelBackend
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.db.models import Exists, OuterRef, Q
import rest_framework_simplejwt.serializers
from pprint import pprint
class MyModelBackend(ModelBackend):
"""
Authenticates against settings.AUTH_USER_MODEL.
"""
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
1891,
2412,
1330,
7308,
7282,
437,
11,
11787,
17633,
11,
9104,
7282,
437,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
42625,
14208,
13... | 3.174603 | 126 |
# pylint: disable=missing-docstring
import datetime
from core.models import instruments
from datasource.models import candles
| [
2,
279,
2645,
600,
25,
15560,
28,
45688,
12,
15390,
8841,
198,
11748,
4818,
8079,
198,
198,
6738,
4755,
13,
27530,
1330,
12834,
198,
6738,
19395,
1668,
13,
27530,
1330,
32268,
628
] | 4 | 32 |
from __future__ import annotations
from ..ocr import ocrhandle, ocr_rectify
from ..utils import segment
from ..utils.device import Device
from ..utils.log import logger
from ..utils.recognize import Recognizer, Scene, RecognizeError
from ..utils.solver import BaseSolver
from ..data import recruit_tag, recruit_agent
class RecruitPoss(object):
""" 记录公招标签组合的可能性数据 """
class RecruitSolver(BaseSolver):
"""
自动进行公招
"""
def run(self, priority: list[str] = None) -> None:
"""
:param priority: list[str], 优先考虑的公招干员,默认为高稀有度优先
"""
self.priority = priority
self.recruiting = 0
logger.info('Start: 公招')
logger.info(f'目标干员:{priority if priority else "无,高稀有度优先"}')
super().run()
def recruit_tags(self) -> bool:
""" 识别公招标签的逻辑 """
needs = self.find('career_needs', judge=False)
avail_level = self.find('available_level', judge=False)
budget = self.find('recruit_budget', judge=False)
up = needs[0][1] - 80
down = needs[1][1] + 60
left = needs[1][0]
right = avail_level[0][0]
while True:
# ocr the recruitment tags and rectify
img = self.recog.img[up:down, left:right]
ocr = ocrhandle.predict(img)
for x in ocr:
if x[1] not in recruit_tag:
x[1] = ocr_rectify(img, x, recruit_tag, '公招标签')
# recruitment tags
tags = [x[1] for x in ocr]
logger.info(f'公招标签:{tags}')
# choose tags
choose, best = self.tags_choose(tags, self.priority)
if best.choose < (1 << 5) and best.min <= 3:
# refresh
if self.tap_element('recruit_refresh', detected=True):
self.tap_element('double_confirm', 0.8,
interval=3, judge=False)
continue
break
logger.info(f'选择:{choose}')
# tap selected tags
for x in ocr:
color = self.recog.img[up+x[2][0][1]-5, left+x[2][0][0]-5]
if (color[2] < 100) != (x[1] not in choose):
self.device.tap((left+x[2][0][0]-5, up+x[2][0][1]-5))
if best.choose < (1 << 5):
# 09:00
self.tap_element('one_hour', 0.2, 0.8, 0)
else:
# 03:50
[self.tap_element('one_hour', 0.2, 0.2, 0) for _ in range(2)]
[self.tap_element('one_hour', 0.5, 0.2, 0) for _ in range(5)]
# start recruit
self.tap((avail_level[1][0], budget[0][1]), interval=5)
def recruit_result(self) -> bool:
""" 识别公招招募到的干员 """
agent = None
ocr = ocrhandle.predict(self.recog.img)
for x in ocr:
if x[1][-3:] == '的信物':
agent = x[1][:-3]
agent_ocr = x
break
if agent is None:
logger.warning('未能识别到干员名称')
else:
if agent not in recruit_agent.keys():
agent_with_suf = [x+'的信物' for x in recruit_agent.keys()]
agent = ocr_rectify(
self.recog.img, agent_ocr, agent_with_suf, '干员名称')[:-3]
if agent in recruit_agent.keys():
if 2 <= recruit_agent[agent]['stars'] <= 4:
logger.info(f'获得干员:{agent}')
else:
logger.critical(f'获得干员:{agent}')
self.tap((self.recog.w // 2, self.recog.h // 2))
def tags_choose(self, tags: list[str], priority: list[str]) -> tuple[list[str], RecruitPoss]:
""" 公招标签选择核心逻辑 """
if priority is None:
priority = []
if len(priority) and isinstance(priority[0], str):
priority = [[x] for x in priority]
possibility: dict[int, RecruitPoss] = {}
agent_level_dict = {}
# 挨个干员判断可能性
for x in recruit_agent.values():
agent_name = x['name']
agent_level = x['stars']
agent_tags = x['tags']
agent_level_dict[agent_name] = agent_level
# 高级资深干员需要有特定的 tag
if agent_level == 6 and '高级资深干员' not in tags:
continue
# 统计 9 小时公招的可能性
valid_9 = None
if 3 <= agent_level <= 6:
valid_9 = 0
if agent_level == 6 and '高级资深干员' in tags:
valid_9 |= (1 << tags.index('高级资深干员'))
if agent_level == 5 and '资深干员' in tags:
valid_9 |= (1 << tags.index('资深干员'))
for tag in agent_tags:
if tag in tags:
valid_9 |= (1 << tags.index(tag))
# 统计 3 小时公招的可能性
valid_3 = None
if 1 <= agent_level <= 4:
valid_3 = 0
for tag in agent_tags:
if tag in tags:
valid_3 |= (1 << tags.index(tag))
# 枚举所有可能的标签组合子集
for o in range(1 << 5):
if valid_9 is not None and o & valid_9 == o:
if o not in possibility.keys():
possibility[o] = RecruitPoss(o)
possibility[o].ls.append(agent_name)
possibility[o].max = max(possibility[o].max, agent_level)
possibility[o].min = min(possibility[o].min, agent_level)
possibility[o].lv2a3 |= 2 <= agent_level <= 3
_o = o + (1 << 5)
if valid_3 is not None and o & valid_3 == o:
if _o not in possibility.keys():
possibility[_o] = RecruitPoss(_o)
possibility[_o].ls.append(agent_name)
possibility[_o].max = max(possibility[_o].max, agent_level)
possibility[_o].min = min(possibility[_o].min, agent_level)
possibility[_o].lv2a3 |= 2 <= agent_level <= 3
# 检查是否存在无法从公开招募中获得的干员
for considering in priority:
for x in considering:
if agent_level_dict.get(x) is None:
logger.error(f'该干员并不能在公开招募中获得:{x}')
raise RuntimeError
best = RecruitPoss(0)
# 按照优先级判断,必定选中同一星级干员
# 附加限制:min_level == agent_level
if best.poss == 0:
logger.debug('choose: priority, min_level == agent_level')
for considering in priority:
for o in possibility.keys():
possibility[o].poss = 0
for x in considering:
if x in possibility[o].ls:
agent_level = agent_level_dict[x]
if agent_level != 1 and agent_level == possibility[o].min:
possibility[o].poss += 1 / len(possibility[o].ls)
elif agent_level == 1 and agent_level == possibility[o].min == possibility[o].max:
# 必定选中一星干员的特殊逻辑
possibility[o].poss += 1 / len(possibility[o].ls)
if best < possibility[o]:
best = possibility[o]
if best.poss > 0:
break
# 按照优先级判断,若目标干员 1 星且该组合不存在 2/3 星的可能,则选择
# 附加限制:min_level == agent_level == 1 and not lv2a3
if best.poss == 0:
logger.debug('choose: priority, min_level == agent_level == 1 and not lv2a3')
for considering in priority:
for o in possibility.keys():
possibility[o].poss = 0
for x in considering:
if x in possibility[o].ls:
agent_level = agent_level_dict[x]
if agent_level == possibility[o].min == 1 and not possibility[o].lv2a3:
# 特殊判断:选中一星和四星干员的 Tag 组合
possibility[o].poss += 1 / len(possibility[o].ls)
if best < possibility[o]:
best = possibility[o]
if best.poss > 0:
break
# 按照优先级判断,必定选中星级 >= 4 的干员
# 附加限制:min_level >= 4
if best.poss == 0:
logger.debug('choose: priority, min_level >= 4')
for considering in priority:
for o in possibility.keys():
possibility[o].poss = 0
if possibility[o].min >= 4:
for x in considering:
if x in possibility[o].ls:
possibility[o].poss += 1 / len(possibility[o].ls)
if best < possibility[o]:
best = possibility[o]
if best.poss > 0:
break
# 按照等级下限判断,必定选中星级 >= 4 的干员
# 附加限制:min_level >= 4
if best.poss == 0:
logger.debug('choose: min_level >= 4')
for o in possibility.keys():
possibility[o].poss = 0
if possibility[o].min >= 4:
possibility[o].poss = possibility[o].min
if best < possibility[o]:
best = possibility[o]
# 按照优先级判断,检查其概率
if best.poss == 0:
logger.debug('choose: priority')
for considering in priority:
for o in possibility.keys():
possibility[o].poss = 0
for x in considering:
if x in possibility[o].ls:
possibility[o].poss += 1 / len(possibility[o].ls)
if best < possibility[o]:
best = possibility[o]
if best.poss > 0:
break
# 按照等级下限判断,默认高稀有度优先
if best.poss == 0:
logger.debug('choose: min_level')
for o in possibility.keys():
possibility[o].poss = possibility[o].min
if best < possibility[o]:
best = possibility[o]
logger.debug(f'poss: {possibility}')
logger.debug(f'best: {best}')
# 返回选择的标签列表
choose = []
for i in range(len(tags)):
if best.choose & (1 << i):
choose.append(tags[i])
return choose, best
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
11485,
1696,
1330,
267,
6098,
28144,
11,
267,
6098,
62,
2554,
1958,
198,
6738,
11485,
26791,
1330,
10618,
198,
6738,
11485,
26791,
13,
25202,
1330,
16232,
198,
6738,
11485,
26791,
13,
... | 1.609197 | 6,415 |
# coding=utf-8
# Author: Rion B Correia
# Date: April 17, 2020
#
# Description: Demographics
#
#
import configparser
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy import event
from utils import add_own_encoders, ensurePathExists, map_age_to_age_group
import matplotlib as mpl
mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['mathtext.rm'] = 'serif'
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import to_hex
from matplotlib.colors import LinearSegmentedColormap
if __name__ == '__main__':
# DB
cfg = configparser.ConfigParser()
cfg.read('../config.ini')
url = 'mysql+pymysql://%(user)s:%(pass)s@%(host)s/%(db)s?charset=utf8' % cfg['IU-RDC-MySQL']
engine = sqlalchemy.create_engine(url, encoding='utf-8')
event.listen(engine, "before_cursor_execute", add_own_encoders)
#
# Retrieve Data
#
#
# Gender (only from those with a medication)
#
sqlg = """
SELECT
p.gender,
COUNT(*) AS 'count'
FROM patient p
WHERE
p.gender IS NOT NULL
AND
p.id_patient IN (SELECT m.id_patient FROM medication m)
GROUP BY p.gender
"""
dfg = pd.read_sql(sqlg, con=engine, index_col='gender')
# Percent
dfg['%'] = dfg['count'] / dfg['count'].sum()
# Color
dfg.loc['Male', 'color'] = '#1f77b4'
dfg.loc['Female', 'color'] = '#d62728'
# Age (today; only from those with a medication)
sqla = """
SELECT
p.age_today AS 'age',
COUNT(*) AS 'count'
FROM patient p
WHERE
p.id_patient IN (SELECT m.id_patient FROM medication m)
GROUP BY p.age_today
"""
dfa = pd.read_sql(sqla, con=engine, index_col='age')
# Map age to age_group
dfa['age_group'] = map_age_to_age_group(dfa.index)
# Group by age_group
dfa = dfa.groupby('age_group').agg({'count': 'sum'})
# Percent
dfa['%'] = dfa['count'] / dfa['count'].sum()
# Color
#cmap = LinearSegmentedColormap.from_list(name='custom', colors=['#ff7f0e', '#d62728', '#9467bd'], N=256, gamma=1.0)
cmap = cm.get_cmap('jet_r')
dfa['color'] = [to_hex(cmap(c)) for c in np.linspace(0, 1, len(dfa))]
#
# Ethnicity
#
sqle = """
SELECT
p.ethnicity,
COUNT(*) AS 'count'
FROM patient p
WHERE
/* p.ethnicity IS NOT NULL AND */
p.id_patient IN (SELECT m.id_patient FROM medication m)
GROUP BY p.ethnicity
"""
dfe = pd.read_sql(sqle, con=engine)
# Rename
dfe['ethnicity'] = dfe.replace({'Hispanic/Latino': 'Hispanic/Latino', 'Not Hispanic, Latino/a, or Spanish origin': 'Not Hisp/Latin/Span', 'Not Hispanic/Latino': 'Not Hisp/Latin'})
dfe['ethnicity'] = dfe['ethnicity'].fillna('n/a')
# To Categorical
dfe['ethnicity'] = pd.Categorical(dfe['ethnicity'], categories=['Not Hisp/Latin', 'Not Hisp/Latin/Span', 'Hisp/Latin', 'n/a'], ordered=True)
# Sort
dfe = dfe.sort_values('ethnicity', ascending=True)
# Set Index
dfe.set_index('ethnicity', inplace=True)
# %
dfe['%'] = dfe['count'] / dfe['count'].sum()
# Color
dfe['color'] = ['#ffbb78', '#c49c94', '#98df8a', '#c7c7c7']
# Race
sqlr = """
SELECT
p.race,
count(*) AS 'count'
FROM patient p
WHERE
p.id_patient IN (SELECT m.id_patient FROM medication m)
GROUP BY p.race
"""
dfr = pd.read_sql(sqlr, con=engine)
# Rename / Group
race_minorities = 'Bi-racial, Hispanic, Islander, or Indian'
dfr['race'] = dfr['race'].replace({'Islander': 'Minorities', 'Bi-racial': 'Minorities', 'Hispanic': 'Minorities', 'Indian': 'Minorities'})
dfr['race'] = dfr['race'].fillna('n/a')
dfr = dfr.groupby('race').agg('sum').reset_index()
# To Categorical
dfr['race'] = pd.Categorical(dfr['race'], categories=['White', 'Black', 'Asian', 'Minorities', 'Indian', 'Islander', 'Bi-racial', 'Hispanic', 'n/a'], ordered=True)
# Sort
dfr = dfr.sort_values('race', ascending=True)
# Set Index[]
dfr.set_index('race', inplace=True)
# %
dfr['%'] = dfr['count'] / dfr['count'].sum()
# Color
dfr['color'] = ['#2ca02c', '#8c564b', '#e377c2', '#17becf', '#c7c7c7']
#
# Plot
#
fig, ax = plt.subplots(figsize=(7, 2.5), nrows=1, ncols=1)
ax.set_title('Patient Demographics')
width = 0.80
edgecolor = '#7f7f7f'
# Gender
cum_percent = 0
for gender, row in dfg.iterrows():
percent = row['%']
facecolor = row['color']
b = ax.barh(2, percent, width, facecolor=facecolor, left=cum_percent, edgecolor=edgecolor, alpha=0.5)
#
patch = b.get_children()[0]
bx, by = patch.get_xy()
tx, ty = 0.5 * patch.get_width() + bx, 0.45 * patch.get_height() + by
#
ax.text(tx, ty, gender, ha='center', va='center', rotation=0)
#
cum_percent += percent
# Age
cum_percent = 0
for age_group, row in dfa.iterrows():
percent = row['%']
facecolor = row['color']
b = ax.barh(1, percent, width, facecolor=facecolor, left=cum_percent, edgecolor=edgecolor, alpha=0.5)
#
patch = b.get_children()[0]
bx, by = patch.get_xy()
tx, ty = 0.59 * patch.get_width() + bx, 0.5 * patch.get_height() + by
#
if age_group not in ['80-84', '85-89', '90-94', '95-99', '>99']:
ax.text(tx, ty, age_group, ha='center', va='center', rotation=90)
#
cum_percent += percent
# Race
cum_percent = 0
for race, row in dfr.iterrows():
percent = row['%']
facecolor = row['color']
b = ax.barh(0, percent, width, facecolor=facecolor, left=cum_percent, edgecolor=edgecolor, alpha=0.5)
#
patch = b.get_children()[0]
bx, by = patch.get_xy()
tx, ty = 0.5 * patch.get_width() + bx, 0.45 * patch.get_height() + by
#
if race in ['White', 'Black']:
ax.text(tx, ty, race, ha='center', va='center', rotation=0)
elif race == 'Minorities':
mx, my = 0.58, -1.1
ax.annotate(race_minorities, xy=(tx, 0.25 * patch.get_height() + by), xycoords='data', xytext=(mx, my),
arrowprops=dict(facecolor='black', arrowstyle="<|-,head_length=0.3,head_width=0.15",
connectionstyle="angle3,angleA=0,angleB=90"),
horizontalalignment='left', verticalalignment='center'
)
else:
ax.text(tx, ty, race, ha='center', va='center', rotation=90)
#
cum_percent += percent
# Ethnicity
"""
cum_percent = 0
for ethnicity, row in dfe.iterrows():
percent = row['%']
color = row['color']
b = ax.barh(0, percent, width, color=color, left=cum_percent, edgecolor=edgecolor)
#
patch = b.get_children()[0]
bx, by = patch.get_xy()
tx, ty = 0.5 * patch.get_width() + bx, 0.45 * patch.get_height() + by
#
if ethnicity in ['Hisp/Latin']:
ax.text(tx, ty, ethnicity, ha='center', va='center', rotation=90)
else:
pass
#
cum_percent += percent
"""
#
xticks = np.linspace(0, 1, 11, endpoint=True)
xticklabels = ['%.1f' % x for x in xticks]
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels)
yticks = np.array([0, 1, 2]) # + (width / 2)
ax.set_yticks(yticks)
ax.set_yticklabels(['Race', 'Age', 'Gender'])
ax.set_xlim(0, 1)
ax.set_ylim(-0.5, 2.5)
# Save
plt.subplots_adjust(left=0.10, right=0.97, bottom=0.20, top=0.88, wspace=0, hspace=0)
wIMGfile = 'images/img-demographics.pdf'
ensurePathExists(wIMGfile)
fig.savefig(wIMGfile)
plt.close() | [
2,
19617,
28,
40477,
12,
23,
198,
2,
6434,
25,
371,
295,
347,
2744,
260,
544,
198,
2,
7536,
25,
3035,
1596,
11,
12131,
198,
2,
198,
2,
12489,
25,
1897,
24188,
198,
2,
198,
2,
198,
11748,
4566,
48610,
198,
11748,
299,
32152,
355,... | 2.100641 | 3,746 |
import logging
import os
import aiobotocore
from ..helpers.env import getenv
BOT_IP = getenv('BOT_IP')
SSH_KEY_NAME = getenv('SSH_KEY_NAME')
_template = get_template()
_session = aiobotocore.get_session()
| [
11748,
18931,
198,
11748,
28686,
198,
11748,
257,
72,
672,
313,
420,
382,
198,
6738,
11485,
16794,
364,
13,
24330,
1330,
651,
24330,
628,
198,
33,
2394,
62,
4061,
796,
651,
24330,
10786,
33,
2394,
62,
4061,
11537,
198,
5432,
39,
62,
... | 2.559524 | 84 |
# Copyright (C) 2018 Seoul National University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
NUM_SHARDS = "num_shards"
SHARD_ID = "shard_id"
SHARD_FILTER_PRED = "shard_filter_predicate"
FILTER_DATASET_NUM_SHARDS_POS = 1
FILTER_DATASET_SHARD_ID_POS = 2
def create_num_shards_and_shard_id():
"""Returns and create the num shards and the shard id tensors.
Returns:
The num shards and the shard id tensors.
Raises:
ValueError: if the num shards tensor or the shard id tensor is already
defined.
"""
# TODO: allow num_shards and shard_id inside a library function
graph = tf.get_default_graph()
num_shards_tensors = graph.get_collection(NUM_SHARDS)
if len(num_shards_tensors) > 0:
raise ValueError('"num_shards" already exists.')
shard_id_tensors = graph.get_collection(SHARD_ID)
if len(shard_id_tensors) > 0:
raise ValueError('"shard_id" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
# Initialize num_shards_tensor=1, and shard_id_tensor=0.
# parallax updates the value when the graph is transformed
# for distributed version.
num_shards_tensor = tf.constant(1, dtype=tf.int64, name="num_shards")
shard_id_tensor = tf.constant(0, dtype=tf.int64, name="shard_id")
tf.add_to_collection(NUM_SHARDS, num_shards_tensor)
tf.add_to_collection(SHARD_ID, shard_id_tensor)
return num_shards_tensor, shard_id_tensor
def shard(ds):
"""Convert a dataset to include shard, it has same effect
with ds.shard(num_shards, index).
"""
# TODO: allow dataset shard inside a function or dataset api
# (e.g., map, parallel_interleave)
num_shards, shard_id = _get_or_create_num_shards_and_shard_id()
f = ds._enumerate().filter(filter_fn)
assert f._predicate.captured_inputs[0] == num_shards
assert f._predicate.captured_inputs[1] == shard_id
tf.add_to_collection(SHARD_FILTER_PRED,
f._predicate.name)
return f.map(lambda _, elem: elem)
| [
2,
15069,
357,
34,
8,
2864,
22372,
2351,
2059,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,... | 2.659763 | 1,014 |
"""
@author: tenghehan
将 video 形式的视频数据转化成 image sequence 形式的数据.
"""
import cv2
import os
import argparse
from tqdm import tqdm
from utils.log import get_logger
if __name__ == "__main__":
args = parse_args()
with VideoFramer(args, video_path=args.VIDEO_PATH) as vdo_frm:
vdo_frm.run() | [
37811,
198,
31,
9800,
25,
256,
1516,
258,
7637,
198,
198,
49546,
2008,
10263,
121,
95,
28156,
237,
21410,
164,
100,
228,
165,
95,
239,
46763,
108,
162,
235,
106,
164,
121,
105,
44293,
244,
22755,
238,
2939,
8379,
10263,
121,
95,
281... | 1.993421 | 152 |
"""
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER
Copyright (c) 2015 Juniper Networks, Inc.
All rights reserved.
Use is subject to license terms.
Licensed under the Apache License, Version 2.0 (the ?License?); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations
under the License.
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging.config
import configparser
from jnpr.space import rest, factory
def main(my_space):
"""
Gets all connected Junos devices from Space. Then gets the snmp/location
configured on each. If this configuration is present, it creates a public
tag with this value and assign the tag to the device.
"""
devices_list = my_space.device_management.devices.\
get(filter_={'deviceFamily': 'junos',
'connectionStatus': 'up'})
for d in devices_list:
print(d.name, d.ipAddr, d.platform)
c = d.configurations.expanded.post(xpaths=['configuration/snmp/location'])
try:
tag_device(my_space, d, c.configuration.location)
except AttributeError:
print("Device %s does not have location configured" % d.name)
if __name__ == "__main__":
# Initialize logging
logging.config.fileConfig('../test/logging.conf')
# Extract Space URL, userid, password from config file
config = configparser.RawConfigParser()
config.read("../test/test.conf")
url = config.get('space', 'url')
user = config.get('space', 'user')
passwd = config.get('space', 'passwd')
# Create a Space REST end point
space = rest.Space(url, user, passwd)
main(space)
| [
37811,
198,
18227,
5626,
8355,
5781,
6375,
22657,
46,
6089,
27975,
38162,
9947,
5626,
34444,
6375,
12680,
45811,
39837,
1137,
198,
198,
15269,
357,
66,
8,
1853,
7653,
9346,
27862,
11,
3457,
13,
198,
3237,
2489,
10395,
13,
198,
198,
1104... | 2.948571 | 700 |
"""Write, Read and Plot 1D input files for swak4foam
==========================================================================
This module allows to read OpenFoam output of one dimensional computation
and then write, plot and read input files for Boundary and Initial
Conditions imposition in 3D computation (via swak4foam):
.. autofunction:: create1dprofil
.. autofunction:: read1dprofil
.. autofunction:: plot1dprofil
"""
#
# ---------------- Module General Import and Declarations ---------------
#
import numpy as np
from fluidfoam.readof import typefield, readmesh, readfield
def create1dprofil_spe(pathw, waxis, var, varname, typevar):
"""
This function provides way to write specific 1D profil (var array)
in OpenFoam Format in the 1d_profil folder of pathw
(for BC imposition in 2D or 3D case for example).
Args:
pathw: str\n
waxis: numpy array\n
var: numpy array\n
varname: str\n
typevar: str\n
Returns:
status: 'create specific 1D profil: done' if ok
A way you might use me is:\n
status = fluidfoam.create1dprofil_spe("pathw", z, epsilon,
"epsilon", "scalar")
Please note that the 1d_profil directory must be existing in the pathw
directory
"""
size1d = waxis.shape[0]
filename = ""
field = var
filename = "" + varname
if typevar == "scalar":
filename1 = pathw + "/1d_profil/" + filename + ".xy"
f = open(filename1, "w")
f.write("(\n")
if field.shape == (1,):
for cell in range(size1d):
f.write("(" + str(waxis[cell]) + " " + str(field[0]) + ")\n")
else:
for cell in range(size1d):
f.write("(" + str(waxis[cell]) + " " + str(field[cell]) + ")\n")
f.write(")\n")
f.close()
elif typevar == "vector":
for i in range(3):
filename1 = pathw + "/1d_profil/" + filename + str(i) + ".xy"
f = open(filename1, "w")
f.write("(\n")
if field.shape == (3, 1):
for cell in range(size1d):
f.write(
"(" + str(waxis[cell]) + " " + str(field[i, 0]) + ")\n"
)
else:
for cell in range(size1d):
f.write(
"(" + str(waxis[cell]) + " " + str(field[i, cell]) + ")\n"
)
f.write(")\n")
f.close()
else:
print("PROBLEM with input: Good input is for example :")
print(
'fluidfoam.create1dprofil_spe("/data/1dcompute/", Y, epsilon, "epsilon", "scalar")\n'
)
status = "create 1D profiles: done"
return status
def create1dprofil(pathr, pathw, timename, axis, varlist):
"""
This function provides way to read 1D profiles at time timename of pathr
and write them in OpenFoam Format in the 1d_profil folder of pathw
(for BC imposition in 2D or 3D case for example).
Args:
pathr: str\n
pathw: str\n
timename: str\n
axis: str\n
varlist: list of str\n
Returns:
status: 'create 1D profiles: done' if ok
A way you might use me is:\n
status = fluidfoam.create1dprofil("path_of_case", "pathw", time, 'Y',
['Ua', 'Ub'])
Please note that the 1d_profil directory must be existing in the pathw
directory
"""
X, Y, Z = readmesh(pathr)
size1d = Y.shape[0]
filename = ""
for var in varlist:
field = readfield(pathr, timename, var)
typevar = typefield(pathr, timename, var)
if axis == "X":
waxis = X
elif axis == "Y":
waxis = Y
elif axis == "Z":
waxis = Z
else:
print("axis does not exist, please check input parameters\n")
filename = "" + var
if typevar == "scalar":
filename1 = pathw + "/1d_profil/" + filename + ".xy"
f = open(filename1, "w")
f.write("(\n")
if field.shape == (1,):
for cell in range(size1d):
f.write("(" + str(waxis[cell]) + " " + str(field[0]) + ")\n")
else:
for cell in range(size1d):
f.write(
"(" + str(waxis[cell]) + " " + str(field[cell]) + ")\n"
)
# np.savetxt(f, np.c_[Y, field], fmt="(%s %s)")
f.write(")\n")
f.close()
elif typevar == "vector":
for i in range(3):
filename1 = pathw + "/1d_profil/" + filename + str(i) + ".xy"
f = open(filename1, "w")
f.write("(\n")
if field.shape == (3, 1):
for cell in range(size1d):
f.write(
"("
+ str(waxis[cell])
+ " "
+ str(field[i, 0])
+ ")\n"
)
else:
for cell in range(size1d):
f.write(
"("
+ str(waxis[cell])
+ " "
+ str(field[i, cell])
+ ")\n"
)
f.write(")\n")
f.close()
print("Warning for pyof users : Ua=Ua0, Va=Ua2, Wa=Ua1\n")
else:
print("PROBLEM with varlist input: Good input is for example :")
print(
'fluidfoam.create1dprofil("/data/1dcompute/", "/data/1dcompute/", "750", "Y",[\'omega\',\'p\'])\n'
)
status = "create 1D profiles: done"
return status
def read1dprofil(file_name):
"""This function provides way to read and return 1D profil created by the
create1dprofil function. file_name can be a complete path.
Args:
filename: str
Returns:
z: 1d mesh corresponding to 1d profil\n
field: scalar value of the field specified via filename\n
size1d: size of the 1d profil
A way you might use me is:\n
z, a, size1d = fluidfoam.read1dprofil("path_of_case/1d_profil/a.xy")
"""
with open(file_name) as handle:
size1d = len(handle.readlines()) - 2
z = np.empty(size1d)
field = np.empty(size1d)
handle.seek(0)
for line_num, line in enumerate(handle):
if (line_num != 0) & (line_num != size1d + 1):
line = line.replace(")", "")
line = line.replace("(", "")
cols = line.split()
z[(line_num - 1)] = cols[0]
field[(line_num - 1)] = cols[1]
return z, field, size1d
def plot1dprofil(pathr, varlist):
"""This function provides way to plot 1D profiles created by the
create1dprofil function.
Args:
pathr: str (must be the full path of the 1d_profil directory)\n
varlist: list of str
A way you might use me is:\n
fluidfoam.plot1dprofil("path_of_case/1d_profil", ['Ua', 'Ub', 'alpha'])
"""
import matplotlib.pyplot as plt
z, field, size1d = read1dprofil(pathr + "/" + varlist[0] + ".xy")
fields = np.empty([len(varlist), size1d])
fields[0] = field
for i in range(len(varlist) - 1):
z, field, size1d = read1dprofil(pathr + "/" + varlist[i + 1] + ".xy")
fields[i + 1] = field
dummy, axarr = plt.subplots(1, len(varlist), sharey=True)
for i, dummy in enumerate(varlist):
axarr[i].plot(fields[i], z)
axarr[i].set_title(varlist[i])
plt.show()
return
| [
37811,
16594,
11,
4149,
290,
28114,
352,
35,
5128,
3696,
329,
1509,
461,
19,
6513,
321,
198,
23926,
2559,
855,
198,
1212,
8265,
3578,
284,
1100,
4946,
37,
78,
321,
5072,
286,
530,
38517,
29964,
198,
392,
788,
3551,
11,
7110,
290,
11... | 1.943524 | 3,984 |
import argparse
import logging
import os
import queue
import sys
import confuse
from energy_usage.influx_client import InfluxClient
from energy_usage.mqtt_client import MqttClient
from energy_usage.sep import parse_sep, usage_to_datapoints
if __name__ == '__main__':
main()
| [
11748,
1822,
29572,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
16834,
198,
11748,
25064,
198,
198,
11748,
27531,
198,
198,
6738,
2568,
62,
26060,
13,
10745,
22564,
62,
16366,
1330,
4806,
22564,
11792,
198,
6738,
2568,
62,
26060,
13... | 3.053191 | 94 |
import builtins
import sys
class Didatic_test:
"""
A class to configure and run simple didatic tests
Didatic_test(Callable=None, args={}, test_name=None, keyboard_inputs=(), \
expected_output=None, expected_prints="", verbose=None, \
run_output_test=None, run_prints_test=None,
)
Parameters
----------
fn: Callable
The function that will be tested
args: dict
The arguments that fn will be tested with. Use parse() to generate args,\
ex.: args = parse('a',5,7, x=1, s='aaa')
test_name: str
An optional identifier that will be printed with the test result
keyboard_inputs: Tuple[str, ...]
A tuple containig all the simulated keyboards inputs that will be used in \
every fn's input()
expected_output: Any
What the fn's return value should be
expected_prints: str
What the fn's internal print()'s concatenation should be \
(including new line character)
verbose: bool
Controls if all the fn's internal print()'s and input()'s prompts are printed
run_output_test: bool
Controls if the fn's return value is checked
run_prints_test: bool
Controls if the fn's internal print()'s are checked
"""
# Função input que inputs copia pro 'inputs' tbm (não retorna pra print original)
@staticmethod
# Função print que copia prints pro 'prints' tbm (não retorna pra print original)
@staticmethod
@staticmethod
# Redefine fn interceptando tudo: args, inputs, prints, output
@staticmethod
@staticmethod
@staticmethod
def generate_test(
fn=None,
args={},
test_name="Test",
verbose=None,
run_output_test=None,
run_prints_test=None,
generator_verbose=None,
):
"""
generate_test(fn, args, test_name="Test", verbose=False, \
run_output_test=True, run_prints_test=False, generator_verbose=False,)
Run the function once using ther given 'args' and intercepts all\
the inpus, prints and outpus
Generate and return the string to create the test with the given configs.\
and the intercepted infos.
ex.: generate_test(fn, Didatic_test.parse_args(1,2,3), "Test-5", True, True)
Parameters
----------
fn: The function that will be tested
args: dict in the format {"pos_inputs": args, "key_inputs": kwargs}
test_name: test name to identify the results and hint the type of test
verbose: controls if the fn's internal inputs and prints will be printed
run_output_test: controls if the output of the test run will be checked \
against the expected output value
run_prints_test: controls if the prints of the test run will be checked \
against the expected prints
generator_verbose: controls if the fn's internal inputs and prints \
will be printed in the fist run (the interception run)
Returns
-------
constructor_str: Return the string with the test constuctor containing\
all the configurations and args predefined, and with the intecepted\
inputs, prints and outputs as the expected values
"""
if fn is None:
fn = Didatic_test.default_fn
if verbose is None:
verbose = Didatic_test.default_verbose or False
if run_output_test is None:
run_output_test = Didatic_test.default_run_output_test or True
if run_prints_test is None:
run_prints_test = Didatic_test.default_run_prints_test or False
if generator_verbose is None:
Didatic_test.default_generator_verbose = generator_verbose or False
interceptions = {}
intercepted_fn = Didatic_test.intercepted_fn(
fn, interceptions, generator_verbose, "[I]: ", "[O]: "
)
pos_args = args.get("pos_inputs", ())
key_args = args.get("key_inputs", {})
output = intercepted_fn(*pos_args, **key_args)
fn_name = fn.__name__
args_str = Didatic_test.__stringify_args(args)
output_str = "'" + output + "'" if type(output) == str else str(output)
prints_str = "".join(interceptions["prints"])
constructor_str = f"Didatic_test({fn_name}, Didatic_test.parse_args\
{args_str}, '{test_name}', {interceptions['inputs']}, {output_str}, \
'{prints_str}', {verbose}, {run_output_test}, {run_prints_test})"
return constructor_str
@staticmethod
def auto_redefine(fn, args={}, verbose=False):
"""
auto_redefine(fn, verbose=False)
Run fn normally once and save all the inputs, then return a\
redefined fn that reuses the same user inputs (simulated)
The args and kwarks continue to work normally
ex.: open_menu = auto_redefine(open_menu)
Parameters
----------
fn: The function that will be called with intercepted inputs
verbose: flag that controls if the inputs primpts will be printed
Returns
-------
auto_redefined: Return a new function that will always use the same\
keyboard inputs as typed on the first run
"""
interceptions = {}
intercepted_fn = Didatic_test.intercepted_fn(
fn, interceptions, verbose, "[I]: ", "[O]: "
)
pos_args = args.get("pos_inputs", ())
key_args = args.get("key_inputs", {})
intercepted_fn(*pos_args, **key_args)
inputs_list = interceptions["inputs"]
auto_redefined = Didatic_test.redefine(fn, inputs_list, verbose)
return auto_redefined
@staticmethod
def redefine(fn, keyboard_inputs, verbose=False):
"""
redefine(fn, keyboard_inputs, verbose=False)
Return a new function that will use the 'keyboard_inputs' tuple\
as simulated inputs, but will work as fn otherwise
ex.: call_menu = redefine(call_menu,('lorem ipsum','25','y','n'))
Parameters
----------
fn: The function that will be copied but will use \
the simulated inputs
keyboard_inputs: The inputs that will be simulated
Returns
-------
refedined_fn: Return a fn copy that will always \
use the 'keyboard_inputs' as input simulation
"""
return refedined_fn
@staticmethod
def parse_args(*args, **kwargs):
"""
parse_args(args, kwargs)
Auxiliar function to pass fn's args and kwargs like in a normal fn call
Just passs the positional args first and then key arguments
ex.: parse_args(1,2,3,x=15,y=[0,0,1],z='aa')
Parameters
----------
args: The positional arguments of fn
kwargs: The key arguments of fn
Returns
-------
values: dict with 2 keys: 'pos_inputs' and 'key_inputs'
"""
return {"pos_inputs": args, "key_inputs": kwargs}
@staticmethod
def run_tests(tests):
"""
run_tests(tests)
Run all the tests in the 'tests' list
Parameters
----------
tests: list[Didatic_test]
A list of tests that you want to execute
"""
results = []
number_of_tests = len(tests)
completed_tests = 0
aborted_tests = 0
correct_outputs_tests = 0
correct_prints_tests = 0
for index, test in enumerate(tests):
if test.test_name is None:
test.test_name = index
else:
test.test_name = f"{index} - {test.test_name}"
result = test.run()
correct_outputs_tests += result["output_is_correct"]
correct_prints_tests += result["print_is_correct"]
aborted_tests += result["test_failed"]
completed_tests += result["test_done"]
results.append(result)
print(
f"""
correct_outputs_tests: {correct_outputs_tests}/{number_of_tests}
correct_prints_tests: {correct_prints_tests}/{number_of_tests}
aborted_tests: {aborted_tests}/{number_of_tests}
completed_tests: {completed_tests}/{number_of_tests}
"""
)
return results
fn = None
verbose = False
run_output_test = True
run_prints_test = False
@staticmethod
def set_defaults(fn=None, verbose=None, run_output_test=None, run_prints_test=None):
"""
set_defaults(fn=None, verbose=None, run_output_test=None, run_prints_test None)
Set common default values fot the tests configs to avoid repetition when \
setting them up later
Parameters
----------
fn: Callable
The function that will be tested
verbose: bool
Controls if all the fn's internal print()'s and \
input()'s prompts are printed
run_output_test: bool
Controls if the fn's return value is checked
run_prints_test: bool
Controls if the fn's internal print()'s are checked
"""
if not (fn is None):
Didatic_test.fn = new_fn
if not (verbose is None):
Didatic_test.verbose = verbose
if not (run_output_test is None):
Didatic_test.run_output_test = run_output_test
if not (run_prints_test is None):
Didatic_test.run_prints_test = run_prints_test
@staticmethod
def set_generator_defaults(
fn=None,
verbose=None,
run_output_test=None,
run_prints_test=None,
generator_verbose=None,
):
"""
set_generator_defaults(fn=None, verbose=None, run_output_test=None, \
run_prints_test=None, generator_verbose=None)
Set common default values fot the test generator to avoid unnecessary repetition
Parameters
----------
fn: Callable
The function that will be tested
verbose: bool
Controls if all the fn's internal print()'s and \
input()'s prompts are printed when a test runs
run_output_test: bool
Controls if the fn's return value is tested
run_prints_test: bool
Controls if the fn's internal print()'s are tested
generator_verbose: bool
Controls if all the fn's internal print()'s and\
input()'s prompts are printed on the test\
generator run
"""
if not (fn is None):
Didatic_test.default_fn = fn
if not (verbose is None):
Didatic_test.default_verbose = verbose
if not (run_output_test is None):
Didatic_test.default_run_output_test = run_output_test
if not (run_prints_test is None):
Didatic_test.default_run_prints_test = run_prints_test
if not (generator_verbose is None):
Didatic_test.default_generator_verbose = generator_verbose
def run(self):
"""
run()
Run the configured Didatic_test, print the result and \
returns a dictionary with the test outcome
{
"output_is_correct": bool,
"print_is_correct": bool,
"test_failed": bool,
"test_done": bool,
}
"""
self.keyboard_inputs_list = list(self.keyboard_inputs)
self.interceptions = {}
fn_temp = Didatic_test.intercepted_fn(
self.fn, self.interceptions, self.verbose, "[I]: ", "[P]: "
)
new_fn = Didatic_test.redefine(fn_temp, self.keyboard_inputs_list, False)
try:
new_fn(*self.args, **self.kwargs)
fn_output = self.interceptions["output"]
self.output_is_correct = fn_output == self.expected_output
fn_prints = "".join(self.interceptions["prints"])
self.print_is_correct = fn_prints == self.expected_prints
self.test_done = True
except Exception as excpt:
self.test_failed = True
self.test_exception = excpt
finally:
print(f"Case: {self.test_name}")
if self.test_failed:
self.__print_exception()
else:
self.__print_result()
print("---------------------------------------------------")
return {
"output_is_correct": self.output_is_correct,
"print_is_correct": self.print_is_correct,
"test_failed": self.test_failed,
"test_done": self.test_done,
}
def just_run(self):
"""
just_run()
Run the configured Didatic_test, print the result and \
returns a dictionary with the test outcome
{
"output_is_correct": bool,
"print_is_correct": bool,
"test_failed": bool,
"test_done": bool,
}
"""
self.keyboard_inputs_list = list(self.keyboard_inputs)
self.interceptions = {}
fn_temp = Didatic_test.intercepted_fn(
self.fn, self.interceptions, self.verbose, "[I]: ", "[P]: "
)
new_fn = Didatic_test.redefine(fn_temp, self.keyboard_inputs_list, False)
print(f"Case: {self.test_name}")
try:
new_fn(*self.args, **self.kwargs)
except Exception as excpt:
self.test_exception = excpt
self.__print_exception()
| [
11748,
3170,
1040,
198,
11748,
25064,
628,
198,
4871,
7731,
1512,
62,
9288,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
317,
1398,
284,
17425,
290,
1057,
2829,
750,
1512,
5254,
628,
220,
220,
220,
7731,
1512,
62,
9288,
7,
141... | 2.252578 | 6,109 |
import dgl
import torch.nn.functional
import typing as _typing
from dgl.nn.pytorch.conv import GINConv
from .. import base_encoder, encoder_registry
from ... import _utils
class ApplyNodeFunc(torch.nn.Module):
"""Update the node feature hv with MLP, BN and ReLU."""
class MLP(torch.nn.Module):
"""MLP with linear output"""
def __init__(self, num_layers, input_dim, hidden_dim, output_dim):
"""MLP layers construction
Paramters
---------
num_layers: int
The number of linear layers
input_dim: int
The dimensionality of input features
hidden_dim: int
The dimensionality of hidden units at ALL layers
output_dim: int
The number of classes for prediction
"""
super(MLP, self).__init__()
self.linear_or_not = True # default is linear model
self.num_layers = num_layers
self.output_dim = output_dim
if num_layers < 1:
raise ValueError("number of layers should be positive!")
elif num_layers == 1:
# Linear model
self.linear = torch.nn.Linear(input_dim, output_dim)
else:
# Multi-layer model
self.linear_or_not = False
self.linears = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
self.linears.append(torch.nn.Linear(input_dim, hidden_dim))
for layer in range(num_layers - 2):
self.linears.append(torch.nn.Linear(hidden_dim, hidden_dim))
self.linears.append(torch.nn.Linear(hidden_dim, output_dim))
for layer in range(num_layers - 1):
self.batch_norms.append(torch.nn.BatchNorm1d(hidden_dim))
@encoder_registry.EncoderUniversalRegistry.register_encoder('gin')
@encoder_registry.EncoderUniversalRegistry.register_encoder('gin_encoder')
| [
11748,
288,
4743,
198,
11748,
28034,
13,
20471,
13,
45124,
198,
11748,
19720,
355,
4808,
774,
13886,
198,
6738,
288,
4743,
13,
20471,
13,
9078,
13165,
354,
13,
42946,
1330,
402,
1268,
3103,
85,
198,
6738,
11485,
1330,
2779,
62,
12685,
... | 2.276647 | 835 |
#
# Copyright 2004,2005 Dave Cridland <dave@cridland.net>
#
# This file forms part of the Infotrope Python Library.
#
# The Infotrope Python Library is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# The Infotrope Python Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the Infotrope Python Library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from infotrope.weak import weakref
import infotrope.acap
dataset_types = {}
datasets = weakref.WeakValueDictionary()
| [
2,
198,
2,
15069,
5472,
11,
14315,
9935,
3864,
312,
1044,
1279,
67,
1015,
31,
66,
6058,
1044,
13,
3262,
29,
198,
2,
198,
2,
770,
2393,
5107,
636,
286,
262,
4806,
313,
305,
431,
11361,
10074,
13,
198,
2,
198,
2,
383,
4806,
313,
... | 3.460208 | 289 |
#%%
import os, sys, glob
from collections import defaultdict
from pygromos.gromos.gromosPP import GromosPP
from pygromos.gromos.gromosXX import GromosXX
from pygromos.files import imd
import restraintmaker
#CHANGE HERE
gromos_bin_path = "/home/bschroed/Documents/code/gromosPP/installed/bin"
restraintmaker_path = os.path.abspath(os.path.dirname(restraintmaker.__file__)+"/..")
control_dict = {
"gen_resn_lib": False,
"gen_single_tops": False, #Buggy!
}
#%%
gromPP = GromosPP(gromos_bin_path)
atb_dirs = restraintmaker_path+"/devtools/otherScripts/b_ATB_solvationFreeEnergies/ATB_molecules"
sets_dir = restraintmaker_path+"/devtools/otherScripts/b_ATB_solvationFreeEnergies/sets"
mstate_dir = sets_dir+"/multistate"
pairwise_dir = sets_dir+"/pairwise"
if(not os.path.exists(mstate_dir)):
os.mkdir(mstate_dir)
if (not os.path.exists(pairwise_dir)):
os.mkdir(pairwise_dir)
sys.path.append(atb_dirs+"/..")
import utils_test_set_ATB as util
#RESNlib
resn_lib_path = sets_dir+"/resn_lib.lib"
if(control_dict['gen_resn_lib']):
#translation lib
from pygromos.files.otherfiles import residue_library
long_short = {}
mol_names = util.translate
for ID in mol_names:
long_short.update({mol_names[ID]["short"]:[mol_names[ID]["orig_name"]]})
resn_lib = residue_library.residue_library()
resn_lib.RESIDUENAMELIB.pdb_top.update(long_short)
resn_lib.write(resn_lib_path)
#Generate TOPOS - THIS STEP IS BUGGY don't use, rather manual?
if (control_dict['gen_single_tops']):
for mol_dir in os.listdir(atb_dirs):
print(mol_dir)
mtb_path = glob.glob(atb_dirs+"/"+mol_dir+"/*.mtb")[0]
ifp_path = glob.glob(atb_dirs+"/"+mol_dir+"/*.ifp")[0]
mol_name = "_"+os.path.basename(mtb_path).split("_")[1] if(not mtb_path.startswith("_")) else os.path.basename(mtb_path).split("_")[0]
top_path = atb_dirs+"/"+mol_dir+"/"+mol_name+".top"
gromPP.make_top(out_top_path=top_path, in_sequence=mol_name, in_solvent="H2O",
in_building_block_lib_path=mtb_path, in_parameter_lib_path=ifp_path, use_argfile=False)
#Systems
##get all_single file_tops:
all_tops = glob.glob(atb_dirs+"/*/*top")
state_all_tops={os.path.basename(value).split(".")[0]: value for value in all_tops}
all_mstate_sys = glob.glob(mstate_dir+"/*")
| [
2,
16626,
198,
11748,
28686,
11,
25064,
11,
15095,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
12972,
70,
398,
418,
13,
70,
398,
418,
13,
70,
398,
418,
10246,
1330,
402,
398,
418,
10246,
198,
6738,
12972,
70,
398,
418,
13,
70,
... | 2.22063 | 1,047 |
#!/usr/bin/env python
"""
Load football network in GML format and compute some network statistcs.
Shows how to download GML graph in a zipped file, unpack it, and load
into a NetworkX graph.
Requires Internet connection to download the URL
http://www-personal.umich.edu/~mejn/netdata/football.zip
"""
# Author: Aric Hagberg (hagberg@lanl.gov)
# Copyright (C) 2007-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from networkx import *
url="http://www-personal.umich.edu/~mejn/netdata/football.zip"
try: # Python 3.x
import urllib.request as urllib
except ImportError: # Python 2.x
import urllib
import io
import zipfile
sock = urllib.urlopen(url) # open URL
s=io.BytesIO(sock.read()) # read into BytesIO "file"
sock.close()
zf = zipfile.ZipFile(s) # zipfile object
txt=zf.read('football.txt').decode() # read info file
gml=zf.read('football.gml').decode() # read gml data
# throw away bogus first line with # from mejn files
gml=gml.split('\n')[1:]
G=parse_gml(gml) # parse gml data
print(txt)
# print degree for each team - number of games
for n,d in G.degree():
print('%s %d' % (n, d))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
8912,
4346,
3127,
287,
402,
5805,
5794,
290,
24061,
617,
3127,
10118,
6359,
13,
198,
198,
2484,
1666,
703,
284,
4321,
402,
5805,
4823,
287,
257,
1976,
3949,
2393,
11,
555,
... | 2.612766 | 470 |
# -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
CATEGORY_MAPPING = {
'1': 'Donation Site',
'2': 'Outlet',
'3': 'Retail Store',
'4': 'Job & Career Support',
'5': 'Headquarters'
}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
15881,
88,
198,
11748,
33918,
198,
6738,
7064,
13,
23814,
1330,
2269,
13210,
1559,
12727,
7449,
198,
198,
34,
6158,
38,
15513,
62,
44,
24805,
2751,
796,
1391,
198... | 2.352381 | 105 |
# coding=utf-8
from __future__ import unicode_literals
import sys
from IPython import start_ipython
import argparse
from apps.core.models.client import Client
import apps.conf # flake8: noqa
from tornado.options import options
import subprocess
managers = [ShellManage, DBShellManage, # flake8: noqa
RedisManage,
]
| [
2,
19617,
28,
40477,
12,
23,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
11748,
25064,
198,
6738,
6101,
7535,
1330,
923,
62,
541,
7535,
198,
11748,
1822,
29572,
198,
6738,
6725,
13,
7295,
13,
27530,
13,
16366... | 2.798387 | 124 |
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.fsm.StatePush import FunctionCall
from direct.showbase.PythonUtil import formatTimeExact, normalDistrib
from direct.task import Task
| [
6738,
1277,
13,
12942,
1662,
1958,
13,
13470,
3673,
1958,
22289,
1330,
1277,
3673,
1958,
198,
6738,
1277,
13,
69,
5796,
13,
9012,
49222,
1330,
15553,
14134,
198,
6738,
1277,
13,
12860,
8692,
13,
37906,
18274,
346,
1330,
5794,
7575,
3109... | 3.87037 | 54 |
import logging
from core.model.graph import Graph
from core.model.lines import LinePool, Line
from core.model.ptn import Link, Stop
from common.model.net import Net
from common.util.constants import LINE_SECTION_HEADER, LINE_ROUTE_ITEMS_SECTION_HEADER, \
LINE_ROUTE_ITEMS_DIRECTION_HEADER, LINE_ROUTE_ITEMS_LINE_NAME_HEADER, LINE_ROUTE_ITEMS_NODE_HEADER
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
198,
6738,
4755,
13,
19849,
13,
34960,
1330,
29681,
198,
6738,
4755,
13,
19849,
13,
6615,
1330,
6910,
27201,
11,
6910,
198,
6738,
4755,
13,
19849,
13,
457,
77,
1330,
7502,
11,
13707,
198,
6738,
2219,
13,
19849,
13,
... | 2.756944 | 144 |
"""Implementation of the teleport command."""
from mcipc.rcon.client import Client
from mcipc.rcon.types import Anchor, Rotation, Vec3
__all__ = ['teleport']
def teleport(self: Client, *, destination: str = None, location: Vec3 = None,
targets: str = None, rotation: Rotation = None,
facing_location: Vec3 = None, facing_entity: str = None,
facing_anchor: Anchor = None) -> str:
"""Teleports the player."""
command = ['teleport']
if targets is not None:
command.append(targets)
if location is not None:
command.append(location)
if rotation is not None:
command.append(rotation)
elif facing_location is not None:
command += ['facing', facing_location]
elif facing_entity is not None:
command += ['facing', 'entity', facing_entity, facing_anchor]
elif destination is not None:
command.append(destination)
else:
raise TypeError('Must specify either destination or location.')
elif destination is not None:
command.append(destination)
elif location is not None:
command.append(location)
else:
raise TypeError('Must specify destination, location or targets.')
return self.run(*command)
| [
37811,
3546,
32851,
286,
262,
32713,
3141,
526,
15931,
198,
198,
6738,
36650,
541,
66,
13,
81,
1102,
13,
16366,
1330,
20985,
198,
6738,
36650,
541,
66,
13,
81,
1102,
13,
19199,
1330,
29253,
273,
11,
371,
14221,
11,
38692,
18,
628,
1... | 2.493458 | 535 |
import re
from typing import Dict, Tuple, NewType
FUNCTION_REGEX = r'^def [\w_]*'
BEGIN_INSERT_REGEX = r'# begin insert '
END_INSERT_REGEX = r'# end insert'
SpecObject = NewType('SpecObjects', Tuple[Dict[str, str], Dict[str, str], Dict[str, str], Dict[str, str]])
def get_spec(file_name: str) -> SpecObject:
"""
Takes in the file name of a spec.md file, opens it and returns the following objects:
functions = {function_name: function_code}
constants= {constant_name: constant_code}
ssz_objects= {object_name: object}
inserts= {insert_tag: code to be inserted}
Note: This function makes heavy use of the inherent ordering of dicts,
if this is not supported by your python version, it will not work.
"""
pulling_from = None # line number of start of latest object
current_name = None # most recent section title
insert_name = None # stores the label of the current insert object
functions = {}
constants = {}
ssz_objects = {}
inserts = {}
function_matcher = re.compile(FUNCTION_REGEX)
inserts_matcher = re.compile(BEGIN_INSERT_REGEX)
is_ssz = False
custom_types = {}
for linenum, line in enumerate(open(file_name).readlines()):
line = line.rstrip()
if pulling_from is None and len(line) > 0 and line[0] == '#' and line[-1] == '`':
current_name = line[line[:-1].rfind('`') + 1: -1]
if line[:9] == '```python':
assert pulling_from is None
pulling_from = linenum + 1
elif line[:3] == '```':
pulling_from = None
elif inserts_matcher.match(line) is not None:
# Find @insert names
insert_name = re.search(r'@[\w]*', line).group(0)
elif insert_name is not None:
# In insert mode, either the next line is more code, or the end of the insert
if re.match(END_INSERT_REGEX, line) is not None:
insert_name = None
else:
inserts[insert_name] = inserts.get(insert_name, '') + line + '\n'
else:
# Handle function definitions & ssz_objects
if pulling_from is not None:
# SSZ Object
if len(line) > 18 and line[:6] == 'class ' and line[-12:] == '(Container):':
name = line[6:-12]
# Check consistency with markdown header
assert name == current_name
is_ssz = True
# function definition
elif function_matcher.match(line) is not None:
current_name = function_matcher.match(line).group(0)
is_ssz = False
if is_ssz:
ssz_objects[current_name] = ssz_objects.get(current_name, '') + line + '\n'
else:
functions[current_name] = functions.get(current_name, '') + line + '\n'
# Handle constant and custom types table entries
elif pulling_from is None and len(line) > 0 and line[0] == '|':
row = line[1:].split('|')
if len(row) >= 2:
for i in range(2):
row[i] = row[i].strip().strip('`')
if '`' in row[i]:
row[i] = row[i][:row[i].find('`')]
if row[1].startswith('uint') or row[1].startswith('Bytes'):
custom_types[row[0]] = row[1]
else:
eligible = True
if row[0][0] not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_':
eligible = False
for c in row[0]:
if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789':
eligible = False
if eligible:
constants[row[0]] = row[1].replace('**TBD**', '0x1234567890123456789012345678901234567890')
return functions, custom_types, constants, ssz_objects, inserts
| [
11748,
302,
198,
6738,
19720,
1330,
360,
713,
11,
309,
29291,
11,
968,
6030,
628,
198,
42296,
4177,
2849,
62,
31553,
6369,
796,
374,
6,
61,
4299,
685,
59,
86,
62,
60,
9,
6,
198,
33,
43312,
62,
20913,
17395,
62,
31553,
6369,
796,
... | 2.010934 | 2,012 |
# proxy module
from __future__ import absolute_import
from envisage.ui.action.action_set import *
| [
2,
15741,
8265,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
45633,
496,
13,
9019,
13,
2673,
13,
2673,
62,
2617,
1330,
1635,
198
] | 3.62963 | 27 |
import warnings
from typing import Optional, Union, Callable, List
from typeguard import typechecked
import tensorflow as tf
from tensorflow_addons.utils.types import FloatTensorLike
from tensorflow_addons.optimizers.utils import is_variable_matched_by_regexes
@tf.keras.utils.register_keras_serializable(package="Addons")
class LAMB(tf.keras.optimizers.Optimizer):
"""Optimizer that implements the Layer-wise Adaptive Moments (LAMB).
See paper [Large Batch Optimization for Deep Learning: Training BERT
in 76 minutes](https://arxiv.org/abs/1904.00962).
"""
@typechecked
def _do_use_weight_decay(self, variable):
"""Whether to use L2 weight decay for `param_name`."""
return not is_variable_matched_by_regexes(
variable, self.exclude_from_weight_decay
)
def _do_layer_adaptation(self, variable):
"""Whether to do layer-wise learning rate adaptation for
`param_name`."""
return not is_variable_matched_by_regexes(
variable, self.exclude_from_layer_adaptation
) | [
11748,
14601,
198,
198,
6738,
19720,
1330,
32233,
11,
4479,
11,
4889,
540,
11,
7343,
198,
6738,
2099,
14864,
1330,
2099,
26752,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
11192,
273,
11125,
62,
39996,
13,
26791,
13,
1919... | 2.718987 | 395 |
for i in range(1,26):
print('{0:5d}{1:5d}{2:6d}{3:6d}{4:6d}{5:6d}{6:6d}{7:6d}'.format(i,i*i,i+25,(i+25)*(i+25),i+50,(i+50)*(i+50),i+75,(i+75)*(i+75)))
# the characters in the curly brackets {} are placeholders for the data.
# the character before the colon indicates which of the data pieces should go where.
# the character after contains formatting information.
# in this case the number is the width of the filed and the d means decimal integer
# there are several other ways to achieve the same or similar result | [
1640,
1312,
287,
2837,
7,
16,
11,
2075,
2599,
198,
220,
220,
220,
3601,
10786,
90,
15,
25,
20,
67,
18477,
16,
25,
20,
67,
18477,
17,
25,
21,
67,
18477,
18,
25,
21,
67,
18477,
19,
25,
21,
67,
18477,
20,
25,
21,
67,
18477,
21,... | 2.846995 | 183 |
import numpy as np
from multiprocessing import Pool
import glob
import os
import pandas as pd
import tensorflow as tf
# MAP
# restrict on @K is in ap_at_n
# data load generator
| [
11748,
299,
32152,
355,
45941,
198,
6738,
18540,
305,
919,
278,
1330,
19850,
198,
11748,
15095,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
11192,
273,
11125,
355,
48700,
628,
198,
2,
34645,
198,
2,
4239,
319,
... | 3.192982 | 57 |
"""Python Script Template."""
from .model_based import MBMPOAgent, MPCAgent
| [
37811,
37906,
12327,
37350,
526,
15931,
198,
6738,
764,
19849,
62,
3106,
1330,
10771,
7378,
23621,
6783,
11,
4904,
8141,
6783,
198
] | 3.454545 | 22 |
import argparse
import os
import re
from pathlib import Path
import numpy as np
import pandas as pd
from cascade.dismod.constants import DensityEnum, IntegrandEnum
from cascade.dismod.db.wrapper import get_engine
from cascade.input_data.configuration.id_map import make_integrand_map
MEASURES_ACCEPTABLE_TO_ELMO = {
"prevalence",
"duration",
"yld",
"continuous",
"cfr",
"proportion",
"mtstandard",
"relrisk",
"incidence",
"tincidence",
"sincidence",
"remission",
"mtexcess",
"pmtexcess",
"mtwith",
"mtall",
"mtspecific",
"mtother",
}
MEASURE_ID_TO_CANONICAL_NAME = {
24: "acute_inc",
23: "acute_prev",
17: "cfr",
22: "chronic_prev",
19: "continuous",
2: "daly",
1: "death",
21: "diswght",
8: "duration",
45: "fertility",
28: "hale",
43: "haq_index",
6: "incidence",
26: "le",
37: "le_decomp",
30: "le_nsnh",
31: "le_nswh",
36: "lt_prevalence",
25: "mmr",
34: "mort_risk",
14: "mtall",
9: "mtexcess",
16: "mtother",
15: "mtspecific",
12: "mtstandard",
13: "mtwith",
38: "pini",
10: "pmtexcess",
27: "pod",
32: "pod_nsnh",
33: "pod_nswh",
44: "population",
5: "prevalence",
18: "proportion",
11: "relrisk",
7: "remission",
29: "sev",
41: "sincidence",
35: "st_prevalence",
20: "survival_rate",
39: "susceptible",
42: "tincidence",
40: "withc",
3: "yld",
4: "yll",
}
REQUIRED_COLUMNS = [
"bundle_id",
"seq",
"nid",
"underlying_nid",
"input_type",
"source_type",
"location_id",
"sex",
"year_start",
"year_end",
"age_start",
"age_end",
"measure",
"mean",
"lower",
"upper",
"standard_error",
"effective_sample_size",
"cases",
"sample_size",
"unit_type",
"unit_value_as_published",
"uncertainty_type",
"uncertainty_type_value",
"representative_name",
"urbanicity_type",
"recall_type",
"recall_type_value",
"sampling_type",
"group",
"specificity",
"group_review",
"is_outlier",
"design_effect",
]
DUMMY_VALUES = {
"nid": 119_796,
"source_type": "Unidentifiable",
"unit_type": "Person",
"unit_value_as_published": 1,
"representative_name": "Nationally representative only",
"urbanicity_type": "Mixed/both",
"recall_type": "Period: years",
"recall_type_value": 1,
"is_outlier": 0,
"response_rate": "",
}
if __name__ == "__main__":
main()
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
302,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
44847,
13,
67,
1042,
375,
13,
9979,
1187,
1330,
... | 2.047847 | 1,254 |
# authors:
# connor archard
# geoff keating
#
# updated:
# 12-15-2016
#
# brief:
# main file for llana operation
if __name__ == "__main__":
print "Hello world" | [
2,
7035,
25,
198,
2,
220,
369,
13099,
3934,
446,
198,
2,
220,
4903,
2364,
885,
803,
198,
2,
198,
2,
6153,
25,
198,
2,
220,
1105,
12,
1314,
12,
5304,
198,
2,
198,
2,
4506,
25,
198,
2,
220,
1388,
2393,
329,
32660,
2271,
4905,
... | 2.522388 | 67 |
# coding: utf-8
from fal.task import Task
import numpy as np
from PIL import Image
from matplotlib.pyplot import imshow
from matplotlib.pyplot import hist
import matplotlib.pyplot as plt
if __name__ == '__main__':
# Let'c create compression task
task = Task()
# We define in 'data flow' that we wish to transform first
task.with_action('compress')
task.with_output('data/transformed.cim') # this is a raw file after transformation
# hence atypical filetype, as most of commercial tools won't be able to read this file
# Let's see the input file ourselves
input_img = Image.open('data/image.bmp')
hist(input_img.histogram(), bins=40)
# This is the input image
plt.rcParams["figure.figsize"] = (20, 9)
imshow(np.asarray(input_img))
# We're telling our framework where to find the image
task.with_input('data/image.bmp')
# Let's process it!
task.run()
# Now we're recreating image from the result
task = Task()
task.with_action('extract')
task.with_input('data/transformed.cim')
task.with_output('data/recreated.bmp')
# Run the process
# Check inverseTransform(self, src) in fal.transforms for more details
task.run()
# And let's see the results
output_image = Image.open('data/recreated.bmp')
plt.rcParams["figure.figsize"] = (20, 9)
imshow(np.asarray(output_image))
hist(output_image.histogram(), bins=40)
# As you can see, image after transformation has visible loss of quality
# and it's color profile (histogram for pixel counts) has changed as well
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
6738,
24215,
13,
35943,
1330,
15941,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
2603,
29487,
8019,
13,
9078,
29487,
1330,
545,
12860,
198,
6738,
2603,
294... | 2.884477 | 554 |
# coding: utf8
import torch.utils.model_zoo as model_zoo
from torchvision.models.resnet import BasicBlock
from torch import nn
import math
import timm
import urllib
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
import torch.nn.functional as F
__all__ = ['resnet18_2d', 'vit']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth'
}
def resnet18_2d(**kwargs):
"""
Construct a the ResNet-18 model with added dropout, FC and softmax layers.
:param kwargs:
:return:
"""
model = ResNetDesigner(BasicBlock, [2, 2, 2, 2], **kwargs)
try:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)
except Exception as err:
print("Error is:", err)
# raise ConnectionError('The URL %s may not be functional anymore. Check if it still exists or '
# 'if it has been moved.' % model_urls['resnet18'])
for p in model.parameters():
p.requires_grad = False
# fine-tune the 4-th residual block
for p in model.layer4.parameters():
p.requires_grad = True
# fine-tune the last FC layer
for p in model.fc.parameters():
p.requires_grad = True
# add a fc layer on top of the transfer_learning model and a softmax classifier
model.add_module('drop_out', nn.Dropout(p=kwargs["dropout"]))
model.add_module('fc_out', nn.Linear(1000, 2))
return model
| [
2,
19617,
25,
3384,
69,
23,
198,
198,
11748,
28034,
13,
26791,
13,
19849,
62,
89,
2238,
355,
2746,
62,
89,
2238,
198,
6738,
28034,
10178,
13,
27530,
13,
411,
3262,
1330,
14392,
12235,
198,
6738,
28034,
1330,
299,
77,
198,
11748,
106... | 2.539363 | 597 |
import csv
import glob
import json
import os
import re
import copy
from dateutil.parser import parse
from datetime import datetime, timezone
# `or '.'` because when you're in the same directory as this code
# `ValueError: no path specified` gets thrown by `relpath` with empty input
src_dir = os.path.relpath(os.path.dirname(__file__) or ".")
possible_tags_path = os.path.join(src_dir, "..", "docs/possible_tags.md")
md_dir = os.path.join(src_dir, "..", "reports")
out_dir = os.path.join(src_dir, "data_build")
combined_fpath = os.path.join(out_dir, "all-locations.md")
csv_fpath_v1 = os.path.join(out_dir, "all-locations.csv")
json_fpath_v1 = os.path.join(out_dir, "all-locations.json")
json_fpath_v2 = os.path.join(out_dir, "all-locations-v2.json")
readme_fpath = os.path.join(out_dir, "README.md")
if not os.path.exists(out_dir):
os.mkdir(out_dir)
date_regex = re.compile(
r"(Jan(uary)?|Feb(ruary)?|Mar(ch)?|Apr(il)?|May|Jun(e)?|"
r"Jul(y)?|Aug(ust)?|Sep(tember)?|Oct(ober)?|Nov(ember)?|"
r"Dec(ember)?)\s+\d{1,2}"
)
url_regex = re.compile(
r"(http|ftp|https):\/\/([\w\-_]+(?:(?:\.[\w\-_]+)+))" r"([\w\-\.,@?^=%&:/~\+#\!]*[\w\-\@?^=%&/~\+#\!])?"
)
# Regex is used to ensure that lat/long is both in a valid format has has 6-7 decimal places (or is an exact 90/180) to improve data quality on the backend
LAT_REGEX = re.compile(r"^\(?([-+]?(?:[1-8]?\d(?:\.\d{6,7})|90(?:\.0+)?)),")
LONG_REGEX = re.compile(r".*,\s*([-+]?(?:180(?:\.0+)?|(?:(?:1[0-7]\d)|(?:[1-9]?\d))(?:\.\d{6,7})))\)?$")
def find_md_link_or_url(text):
"""
find_md_link_or_url('ab[cd](ef)xy') returns:
('abcdxy', 'ef')
All the text goes into the text, and the URL as well.
"""
start = (0,)
open_sq = (1,)
closed_sq = (2,)
open_curve = (3,)
closed_curve = (4,)
state = start
text_content = ""
link_url = ""
for ch in text:
if state == start:
if ch == "[":
state = open_sq
else:
text_content += ch
elif state == open_sq:
if ch == "]":
state = closed_sq
else:
text_content += ch
elif state == closed_sq:
if ch == "(":
state = open_curve
else:
text_content += ch
elif state == open_curve:
if ch == ")":
state = closed_curve
else:
link_url += ch
elif state == closed_curve:
text_content += ch
if len(link_url) == 0:
# no markdown link found, consider it all one url
link_url = text_content
text_content = ""
return text_content.strip(), link_url.strip()
updated_at = datetime.now(timezone.utc).isoformat()
md_header = f"""
GENERATED FILE, PLEASE MAKE EDITS ON MASTER AT https://github.com/2020PB/police-brutality/
UPDATED AT: {updated_at}
"""
md_out_format = """
# {location}
{text}
"""
readme_text = """
# /r/2020PoliceBrutality/ dataset
This repository exists to accumulate and contextualize evidence of police brutality during the 2020 George Floyd protests.
Our goal in doing this is to assist journalists, politicians, prosecutors, activists and concerned individuals who can use the evidence accumulated here for political campaigns, news reporting, public education and prosecution of criminal police officers.
* This branch is just the files generated by parsing the markdown for ease of building other sites.
* For example your webapp can query and display data from https://raw.githubusercontent.com/2020PB/police-brutality/data_build/all-locations.json
* For more info see https://github.com/2020PB/police-brutality
* These data files are generated by https://github.com/2020PB/police-brutality/tree/main/tools
# THESE FILES ARE GENERATED - DO NOT EDIT (including this readme)
# THESE FILES ARE GENERATED - DO NOT EDIT (including this readme)
* Please edit the `.md` files on the `main` branch at https://github.com/2020PB/police-brutality
* Also notice each data row has a `edit_at` link so you can find the source data for every entry.
"""
if __name__ == "__main__":
md_texts = read_all_md_files(md_dir)
data = process_md_texts(md_texts)
to_merged_md_file(md_texts, combined_fpath)
to_json_file_v2(data, json_fpath_v2)
v1_data = [v1_only(item) for item in data]
to_csv_file_v1(v1_data, csv_fpath_v1)
to_json_file_v1(v1_data, json_fpath_v1)
to_readme(readme_fpath)
print("Done!")
| [
11748,
269,
21370,
198,
11748,
15095,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
4866,
198,
198,
6738,
3128,
22602,
13,
48610,
1330,
21136,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
640,
11340,
198,
198,
2,
46... | 2.338834 | 1,939 |
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from django.contrib.flatpages import views as flatpage_views
from django.http import HttpResponse
from django.urls import include, path, re_path
from edd.branding.views import favicon as favicon_view
admin.autodiscover()
rest_urlpatterns = [
path("", include("edd.rest.urls", namespace="rest")),
path("auth/", include("rest_framework.urls", namespace="rest_framework")),
]
urlpatterns = [
# make sure to match the path to favicon *exactly*
re_path(r"favicon\.ico$", favicon_view, name="favicon"),
# simplest possible view for healthcheck
path("health/", lambda request: HttpResponse(), name="healthcheck"),
path("admin/", admin.site.urls),
path("", include("main.urls", namespace="main")),
path("export/", include("edd.export.urls", namespace="export")),
# allauth does not support namespacing
path("accounts/", include("allauth.urls")),
path("utilities/", include("tools.urls", namespace="tools")),
path("profile/", include("edd.profile.urls", namespace="profile")),
path("", include("edd.campaign.urls", namespace="campaign")),
path("rest/", include(rest_urlpatterns)),
# flatpages.urls does not include app_name; cannot include it with namespace
# path('pages/', include('django.contrib.flatpages.urls', namespace='flatpage'))
path("pages/<path:url>", flatpage_views.flatpage, name="flatpage"),
]
if getattr(settings, "EDD_ENABLE_GRAPHQL", False):
from graphene_django.views import GraphQLView
urlpatterns += [
path(
"explore/",
login_required(GraphQLView.as_view(graphiql=True)),
name="graphiql",
),
path("graphql/", login_required(GraphQLView.as_view()), name="graphql",),
]
if getattr(settings, "DEBUG", False):
import debug_toolbar
urlpatterns += [path("__debug__/", include(debug_toolbar.urls, namespace="djdt"))]
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
273,
2024,
1330,
17594,
62,
35827,
198,
6738,
42625,
14208,
13,
3642,
822,
13,... | 2.791377 | 719 |
import os
from enum import Enum
from typing import Optional
__all__ = [
"ColorDepth",
]
class ColorDepth(str, Enum):
"""
Possible color depth values for the output.
"""
value: str
#: One color only.
DEPTH_1_BIT = "DEPTH_1_BIT"
#: ANSI Colors.
DEPTH_4_BIT = "DEPTH_4_BIT"
#: The default.
DEPTH_8_BIT = "DEPTH_8_BIT"
#: 24 bit True color.
DEPTH_24_BIT = "DEPTH_24_BIT"
# Aliases.
MONOCHROME = DEPTH_1_BIT
ANSI_COLORS_ONLY = DEPTH_4_BIT
DEFAULT = DEPTH_8_BIT
TRUE_COLOR = DEPTH_24_BIT
@classmethod
def from_env(cls) -> Optional["ColorDepth"]:
"""
Return the color depth if the $PROMPT_TOOLKIT_COLOR_DEPTH environment
variable has been set.
This is a way to enforce a certain color depth in all prompt_toolkit
applications.
"""
# Check the `PROMPT_TOOLKIT_COLOR_DEPTH` environment variable.
all_values = [i.value for i in ColorDepth]
if os.environ.get("PROMPT_TOOLKIT_COLOR_DEPTH") in all_values:
return cls(os.environ["PROMPT_TOOLKIT_COLOR_DEPTH"])
return None
@classmethod
def default(cls) -> "ColorDepth":
"""
Return the default color depth for the default output.
"""
from .defaults import create_output
return create_output().get_default_color_depth()
| [
11748,
28686,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
19720,
1330,
32233,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
366,
10258,
48791,
1600,
198,
60,
628,
198,
4871,
5315,
48791,
7,
2536,
11,
2039,
388,
2599,
198,
... | 2.270049 | 611 |
"""Utilities
============
.. autoclass:: SetOfVariables
:members:
:private-members:
.. autoclass:: SetOfSpectra
:members:
:private-members:
.. autoclass:: StackOfSetOfVariables
:members:
:private-members:
"""
import numpy as np
from spectratmo.phys_const import g, Gamma_dah
class SetOfVariables(object):
"""Set of variables on one pressure level."""
__radd__ = __add__
__rmul__ = __mul__
class SetOfSpectra(SetOfVariables):
"""Set of energy spectra."""
class StackOfSetOfVariables(object):
"""..."""
| [
37811,
18274,
2410,
198,
25609,
198,
198,
492,
1960,
420,
31172,
3712,
5345,
5189,
23907,
2977,
198,
220,
220,
1058,
30814,
25,
198,
220,
220,
1058,
19734,
12,
30814,
25,
198,
198,
492,
1960,
420,
31172,
3712,
5345,
5189,
49738,
430,
... | 2.707317 | 205 |
# -*- coding: utf-8 -*-
import os
import telebot
import config
import random
import sys
import time
import psycopg2
from telebot import types
# Config vars
token = os.environ['TELEGRAM_TOKEN']
DATABASE_URL=os.environ['DATABASE_URL']
connect = psycopg2.connect(DATABASE_URL, sslmode='require')
cursor = connect.cursor()
#some_api_token = os.environ['SOME_API_TOKEN']
# some_api = some_api_lib.connect(some_api_token)
# ...
bot = telebot.TeleBot(token)
mu = types.ReplyKeyboardMarkup(resize_keyboard=True)
mu.row('Еще!')
@bot.message_handler(commands=['start'])
@bot.message_handler(content_types=["text"])
@bot.callback_query_handler(func=lambda call: True)
if __name__ == '__main__':
bot.polling(none_stop=True)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
11748,
5735,
13645,
198,
11748,
4566,
198,
11748,
4738,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
17331,
22163,
70,
17,
198,
6738,
5735,
13645,
13... | 2.450331 | 302 |
from .speech import Speech | [
6738,
764,
45862,
1330,
24709
] | 5.2 | 5 |
import os
import logging
from .insilico_filter import InSilicoFilter
from .csq_filter import CsqFilter
lof_csq = {'frameshift_variant', 'stop_gained', 'splice_acceptor_variant',
'splice_donor_variant'}
class VepFilter(CsqFilter):
'''An object that filters VCF records based on annotated VEP data.'''
def __init__(self, vcf, csq=[], impact=[], canonical=False, biotypes=[],
in_silico=[], filter_unpredicted=False,
keep_any_damaging=False, splice_in_silico=[],
loftee=False, splice_filter_unpredicted=False,
splice_keep_any_damaging=False, retain_labels=[],
filter_flagged_features=False, freq=None, min_freq=None,
afs=[], gene_filter=None, blacklist=None,filter_known=False,
filter_novel=False, pathogenic=False, no_conflicted=False,
g2p=None, check_g2p_consequence=False,
logging_level=logging.WARNING):
'''
Args:
vcf: input VcfReader object
csq: list of consequence types to keep. If 'default'
appears anywhere in this list then the default
consequence set (as indicated in
data/vep_classes.tsv) will be used. Similarly if
'all' appears anywhere in this list no filtering
on consequence type will occur.
impact: list of variant impacts to retain.
canonical:
Filter consequences on non-canonical transcirpts.
biotypes:
Filter consequences for features not of the
given biotypes. If not provided the default set
of biotypes (as indicated in data/biotypes.tsv)
will be used for biotype filtering.
in_silico:
List of programs and optionally score criteria
for filtering of missense variants using the
InSilicoFilter class in vase.insilico_filter.
filter_unpredicted:
If using 'in_silico' option, filter missense
variants that have missing values for any of the
specified filtering programs.
keep_any_damaging:
If using 'in_silico' option, retain variants if
any of the criteria are met for any of the
specified filtering programs.
loftee: Only retain LoF (i.e. high impact variants)
variants if the LoF annotation from loftee is
'HC' (high confidence).
splice_in_silico:
Similar to 'in_silico' but the prediction
programs are checked for splice_donor_variants,
splice_acceptor_variants and
splice_region_variants rather than missense.
Currently only dbscSNV (rf_score and ada_score),
MaxEntScan and SpliceDistance
(https://github.com/david-a-parry/SpliceDistance)
annotations are supported. This option can be
used to, for example, retain
splice region variants that are have
an 'ada_score' > 0.6 by specifying
'ada_score=0.6' with this option.
splice_filter_unpredicted:
If using 'splice_in_silico' option, filter
splice region variants that have missing values
for any of the specified filtering programs.
splice_keep_any_damaging:
If using 'splice_in_silico' option, retain
variants if any of the criteria are met for any
of the specified filtering programs.
retain_labels:
Do not filter on consequence type if the
following values are present for a label. Labels
and values must be separated by '=' sign. For
example, to retain any consequence which has
a VEP annotation named 'FOO' with value 'BAR'
use 'FOO=BAR'.
filter_flagged_features:
Filter consequences on features which are
flagged by VEP.
freq: Filter consequences if the annotated allele
frequency is equal to or greater than this value.
By default all allele frequency annotations as
listed in "data/vep_maf.tsv" are used, but this
can be altered using the 'afs' option.
min_freq:
As for 'freq' argument but filters consequences
if the allele frequency annotation is less than
this value.
filter_known:
Filter consequences if allele frequency is given
for any of the available VEP frequency
annotations.
filter_novel:
Filter consequences if no allele frequency is
given for any of the available VEP frequency
annotations.
afs: Only use the listed allele frequency annotations
for freq/min_freq/novelty filtering.
gene_filter:
VarByRegion object from vase.var_by_region. If
provided, consequences will be filtered if they
do not alter the features specified in the
VarByRegion object for the current region.
blacklist:
File containing a list of Feature IDs to ignore.
pathogenic:
If True, retain consequences regardless of type
if annotated as 'pathogenic' or 'likely
pathogenic' in 'CLIN_SIG' or 'clinvar_clnsig'
VEP fields. Frequency, biotype and canonical
filtering will still be applied.
no_conflicted:
If 'pathogenic' option is True, only retain
'likely pathogenic' and 'pathogenic'
consequences if there are no conflicting
'benign' or 'likely benign' assertions.
g2p:
G2P object from vase.g2p for filtering on
presence and/or requirements from a G2P file.
check_g2p_consequence:
If a G2P object is provided above, require that
that the observed consequence matches the
'mutation consequence' in the G2P file.
logging_level:
Logging level to use. Default=logging.WARNING.
'''
self.logger = self._get_logger(logging_level)
self.canonical = canonical
self.loftee = loftee
self.filter_flagged = filter_flagged_features
self.freq = freq
self.min_freq = min_freq
self.afs = afs
self.filter_known = filter_known
self.filter_novel = filter_novel
self._check_freq_fields(vcf)
self.in_silico = False
self.splice_in_silico = False
if in_silico:
in_silico = set(in_silico)
self.in_silico = InSilicoFilter(in_silico, filter_unpredicted,
keep_any_damaging)
if splice_in_silico:
splice_in_silico = set(splice_in_silico)
self.splice_in_silico = InSilicoFilter(
programs=splice_in_silico,
filter_unpredicted=splice_filter_unpredicted,
keep_if_any_damaging=splice_keep_any_damaging,
pred_file=os.path.join(os.path.dirname(__file__),
"data",
"vep_splice_insilico_pred.tsv"))
self.pathogenic = pathogenic
self.no_conflicted = no_conflicted
if pathogenic:
self.path_fields = self._get_path_fields(vcf)
super().__init__(vcf=vcf, csq_attribute='CSQ', csq=csq, impact=impact,
biotypes=biotypes, retain_labels=retain_labels,
filter_flagged_features=filter_flagged_features,
gene_filter=gene_filter, blacklist=blacklist, g2p=g2p,
check_g2p_consequence=check_g2p_consequence)
def filter_csq(self, csq):
'''
Returns two boolean values. The first indicates whether the consequence
annotation should be filtered. The second indicates whether the ALT
allele should be filtered irrespective of the given or any other
consequence annotation.
'''
if self.canonical:
try:
if csq['CANONICAL'] != 'YES':
return True, False
except KeyError:
pass
if self.filter_flagged:
try:
if csq['FLAGS']:
return True, False
except KeyError:
pass
if (self.biotypes is not None and csq['BIOTYPE'].lower() not in
self.biotypes):
return True, False
if self.gene_filter:
if not self.gene_filter.target_in_csq(csq):
return True, False
if self.g2p:
if csq['SYMBOL'] not in self.g2p.g2p:
return True, False
if self.blacklist and csq['Feature'] in self.blacklist:
return True, False
if (self.freq or self.min_freq or self.filter_known or
self.filter_novel):
known = False
for af in self.freq_fields:
if csq[af] == '' or csq[af] == '.':
continue
try:
c_af = float(csq[af])
except ValueError:
try:
c_af = max(float(x) for x in csq[af].split('&') if x
!= '.')
except ValueError:
continue
known = True
if self.filter_known:
return True, True
if self.freq:
if c_af >= self.freq:
return True, True
if self.min_freq:
if c_af < self.min_freq:
return True, True
if self.filter_novel and not known:
return True, True
if (self.csq is None and self.impact is None and
not self.check_g2p_consequence):
# if only using biotypes/MAF for filtering
return False, False
if self.pathogenic and self._has_pathogenic_annotation(csq):
return False, False
if self._retain_label_matched(csq):
return False, False
if self.check_g2p_consequence and self.g2p:
filt_csq = self.g2p.consequences_from_gene(csq['SYMBOL'])
else:
filt_csq = self.csq
for s_csq in [x.lower() for x in csq['Consequence'].split('&')]:
matches_csq = False
matches_impact = False
if filt_csq is not None and s_csq in filt_csq:
matches_csq = True
if self.impact is not None and csq['IMPACT'] in self.impact:
matches_impact = True
if matches_csq or matches_impact:
if self.in_silico and s_csq == 'missense_variant':
do_filter = self.in_silico.filter(csq)
if not do_filter:
return False, False
elif self.splice_in_silico and s_csq.startswith("splice"):
do_filter = self.splice_in_silico.filter(csq)
if not do_filter:
return False, False
elif self.loftee and (s_csq in lof_csq or matches_impact
and csq['IMPACT'] == 'HIGH'):
if csq['LoF'] == 'HC':
return False, False
else:
return False, False
return True, False
def get_required_header_fields(self):
'''
Check which CSQ/ANN annotation fields are required given arguments
passed to __init__
'''
required = ['Consequence', 'BIOTYPE']
if self.impact:
required.append('IMPACT')
if self.canonical:
required.append('CANONICAL')
if self.loftee:
required.append('LoF')
if self.filter_flagged:
required.append('FLAGS')
return required
| [
11748,
28686,
198,
11748,
18931,
198,
6738,
764,
1040,
346,
3713,
62,
24455,
1330,
554,
15086,
3713,
22417,
198,
6738,
764,
6359,
80,
62,
24455,
1330,
327,
31166,
22417,
198,
198,
75,
1659,
62,
6359,
80,
796,
1391,
6,
19298,
5069,
213... | 1.853509 | 7,195 |
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
'.molecule/ansible_inventory').get_hosts('all')
| [
11748,
1332,
10745,
430,
13,
26791,
13,
504,
856,
62,
16737,
198,
198,
9288,
10745,
430,
62,
4774,
82,
796,
1332,
10745,
430,
13,
26791,
13,
504,
856,
62,
16737,
13,
2025,
82,
856,
49493,
7,
198,
220,
220,
220,
45302,
76,
2305,
23... | 2.689655 | 58 |
"""
Isotropic Gaussian blobs
"""
from collections import OrderedDict
import logging
import numpy as np
from abc import ABCMeta, abstractmethod
from ..utils.errors import ProgressiveError, ProgressiveStopIteration
from progressivis import ProgressiveError, SlotDescriptor
from ..table.module import TableModule
from ..table.table import Table
from ..table.constant import Constant
from ..utils.psdict import PsDict
from ..core.utils import integer_types
from sklearn.datasets import make_blobs
from sklearn.utils import shuffle as multi_shuffle
logger = logging.getLogger(__name__)
RESERVOIR_SIZE = 10000
class BlobsTableABC(TableModule):
"""Isotropic Gaussian blobs => table
The purpose of the "reservoir" approach is to ensure the reproducibility of the results
"""
outputs = [SlotDescriptor('labels', type=Table, required=False)]
kw_fun = None
@abstractmethod
| [
37811,
198,
3792,
46084,
12822,
31562,
698,
8158,
198,
37811,
198,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
18931,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
450,
66,
1330,
9738,
48526,
11,
12531,
24396,
198,
6... | 3.397727 | 264 |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#
import cv2
| [
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
2393,
287,
262,
6808,
8619,
286,
42... | 3.637931 | 58 |
#!/usr/bin/env python
from .statement import Statement
from ..expr.nameexpr import NameExpr
from ..expr.literalexpr import LiteralExpr
from ..type.type import Type
from . import _import
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
764,
26090,
1330,
21983,
198,
6738,
11485,
31937,
13,
3672,
31937,
1330,
6530,
3109,
1050,
198,
6738,
11485,
31937,
13,
17201,
1000,
87,
1050,
1330,
25659,
1691,
3109,
1050,
1... | 3.2 | 60 |
#!/usr/bin/env python
import argparse
import os
import cv2
import math
import numpy as np
from tqdm import tqdm
from sklearn.model_selection import KFold
from sklearn import metrics
from scipy.optimize import brentq
from scipy import interpolate
from sklearn.metrics import roc_curve, auc
import torch
import torch.nn as nn
import torch.utils.data as data
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
# the ijbc dataset is from insightface
# using the cos similarity
# no flip test
# basic args
parser = argparse.ArgumentParser(description='Evaluation')
parser.add_argument('--feat_list', type=str,
help='The cache folder for validation report')
parser.add_argument('--base_dir', default='data/IJBC/')
parser.add_argument('--type', default='c')
parser.add_argument('--embedding_size', default=512, type=int)
parser.add_argument('--magface_qlt', default=0, type=int)
if __name__ == '__main__':
main()
"""
score_save_path = './IJBC/result'
files = glob.glob(score_save_path + '/MS1MV2*.npy')
methods = []
scores = []
for file in files:
methods.append(Path(file).stem)
scores.append(np.load(file))
methods = np.array(methods)
scores = dict(zip(methods,scores))
colours = dict(zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2')))
#x_labels = [1/(10**x) for x in np.linspace(6, 0, 6)]
x_labels = [10**-6, 10**-5, 10**-4,10**-3, 10**-2, 10**-1]
tpr_fpr_table = PrettyTable(['Methods'] + map(str, x_labels))
fig = plt.figure()
for method in methods:
fpr, tpr, _ = roc_curve(label, scores[method])
roc_auc = auc(fpr, tpr)
fpr = np.flipud(fpr)
tpr = np.flipud(tpr) # select largest tpr at same fpr
plt.plot(fpr, tpr, color=colours[method], lw=1, label=('[%s (AUC = %0.4f %%)]' % (method.split('-')[-1], roc_auc*100)))
tpr_fpr_row = []
tpr_fpr_row.append(method)
for fpr_iter in np.arange(len(x_labels)):
_, min_index = min(list(zip(abs(fpr-x_labels[fpr_iter]), range(len(fpr)))))
tpr_fpr_row.append('%.4f' % tpr[min_index])
tpr_fpr_table.add_row(tpr_fpr_row)
plt.xlim([10**-6, 0.1])
plt.ylim([0.3, 1.0])
plt.grid(linestyle='--', linewidth=1)
plt.xticks(x_labels)
plt.yticks(np.linspace(0.3, 1.0, 8, endpoint=True))
plt.xscale('log')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC on IJB-C')
plt.legend(loc="lower right")
plt.show()
#fig.savefig('IJB-B.pdf')
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
269,
85,
17,
198,
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
6738,
... | 2.343269 | 1,040 |
# Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Execute Fashion-MNIST baseline locally in Docker."""
import argparse
import concurrent.futures
import configparser
from logging import INFO
from os import path
from time import strftime
from typing import List, Optional
import flwr_experimental.baseline.tf_cifar.settings as tf_cifar_settings
import flwr_experimental.baseline.tf_fashion_mnist.settings as tf_fashion_mnist_settings
import flwr_experimental.baseline.tf_hotkey.settings as tf_hotkey_settings
from flwr.logger import configure, log
from flwr_experimental.baseline import command
from flwr_experimental.ops.cluster import Cluster, Instance
from flwr_experimental.ops.compute.adapter import Adapter
from flwr_experimental.ops.compute.docker_adapter import DockerAdapter
from flwr_experimental.ops.compute.ec2_adapter import EC2Adapter
OPS_INI_PATH = path.normpath(
f"{path.dirname(path.realpath(__file__))}/../../../.flower_ops"
)
# Read config file and extract all values which are needed further down.
CONFIG = configparser.ConfigParser()
CONFIG.read(OPS_INI_PATH)
WHEEL_FILENAME = CONFIG.get("paths", "wheel_filename")
WHEEL_LOCAL_PATH = path.expanduser(CONFIG.get("paths", "wheel_dir")) + WHEEL_FILENAME
DOCKER_PRIVATE_KEY = path.realpath(path.dirname(__file__) + "/../../../docker/ssh_key")
def now() -> str:
"""Return current date and time as string."""
return strftime("%Y%m%dT%H%M%S")
def configure_cluster(
adapter: str, instances: List[Instance], baseline: str, setting: str
) -> Cluster:
"""Return configured compute cluster."""
adapter_instance: Optional[Adapter] = None
private_key: Optional[str] = None
if adapter == "docker":
adapter_instance = DockerAdapter()
user = "root"
private_key = DOCKER_PRIVATE_KEY
elif adapter == "ec2":
adapter_instance = EC2Adapter(
image_id=CONFIG.get("aws", "image_id"),
key_name=path.expanduser(CONFIG.get("aws", "key_name")),
subnet_id=CONFIG.get("aws", "subnet_id"),
security_group_ids=CONFIG.get("aws", "security_group_ids").split(","),
tags=[
("Purpose", "flwr_experimental.baseline"),
("Baseline Name", baseline),
("Baseline Setting", setting),
],
)
user = "ubuntu"
private_key = path.expanduser(CONFIG.get("ssh", "private_key"))
else:
raise Exception(f"Adapter of type {adapter} does not exist.")
cluster = Cluster(
adapter=adapter_instance,
ssh_credentials=(user, private_key),
instances=instances,
timeout=60,
)
return cluster
# pylint: disable=too-many-arguments, too-many-locals
def run(baseline: str, setting: str, adapter: str) -> None:
"""Run baseline."""
print(f"Starting baseline with {setting} settings.")
wheel_remote_path = (
f"/root/{WHEEL_FILENAME}"
if adapter == "docker"
else f"/home/ubuntu/{WHEEL_FILENAME}"
)
if baseline == "tf_cifar":
settings = tf_cifar_settings.get_setting(setting)
elif baseline == "tf_fashion_mnist":
settings = tf_fashion_mnist_settings.get_setting(setting)
elif baseline == "tf_hotkey":
settings = tf_hotkey_settings.get_setting(setting)
else:
raise Exception("Setting not found.")
# Get instances and add a logserver to the list
instances = settings.instances
instances.append(
Instance(name="logserver", group="logserver", num_cpu=2, num_ram=2)
)
# Configure cluster
log(INFO, "(1/9) Configure cluster.")
cluster = configure_cluster(adapter, instances, baseline, setting)
# Start the cluster; this takes some time
log(INFO, "(2/9) Start cluster.")
cluster.start()
# Upload wheel to all instances
log(INFO, "(3/9) Upload wheel to all instances.")
cluster.upload_all(WHEEL_LOCAL_PATH, wheel_remote_path)
# Install the wheel on all instances
log(INFO, "(4/9) Install wheel on all instances.")
cluster.exec_all(command.install_wheel(wheel_remote_path))
# Download datasets in server and clients
log(INFO, "(5/9) Download dataset on server and clients.")
cluster.exec_all(
command.download_dataset(baseline=baseline), groups=["server", "clients"]
)
# Start logserver
log(INFO, "(6/9) Start logserver.")
logserver = cluster.get_instance("logserver")
cluster.exec(
logserver.name,
command.start_logserver(
logserver_s3_bucket=CONFIG.get("aws", "logserver_s3_bucket"),
logserver_s3_key=f"{baseline}_{setting}_{now()}.log",
),
)
# Start Flower server on Flower server instances
log(INFO, "(7/9) Start server.")
cluster.exec(
"server",
command.start_server(
log_host=f"{logserver.private_ip}:8081", baseline=baseline, setting=setting,
),
)
# Start Flower clients
log(INFO, "(8/9) Start clients.")
server = cluster.get_instance("server")
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
# Start the load operations and mark each future with its URL
concurrent.futures.wait(
[
executor.submit(
cluster.exec,
client_setting.instance_name,
command.start_client(
log_host=f"{logserver.private_ip}:8081",
server_address=f"{server.private_ip}:8080",
baseline=baseline,
setting=setting,
cid=client_setting.cid,
),
)
for client_setting in settings.clients
]
)
# Shutdown server and client instance after 10min if not at least one Flower
# process is running it
log(INFO, "(9/9) Start shutdown watcher script.")
cluster.exec_all(command.watch_and_shutdown("flower", adapter))
# Give user info how to tail logfile
private_key = (
DOCKER_PRIVATE_KEY
if adapter == "docker"
else path.expanduser(CONFIG.get("ssh", "private_key"))
)
log(
INFO,
"If you would like to tail the central logfile run:\n\n\t%s\n",
command.tail_logfile(adapter, private_key, logserver),
)
def main() -> None:
"""Start Flower baseline."""
parser = argparse.ArgumentParser(description="Flower")
parser.add_argument(
"--baseline",
type=str,
required=True,
choices=["tf_cifar", "tf_fashion_mnist", "tf_hotkey"],
help="Name of baseline name to run.",
)
parser.add_argument(
"--setting",
type=str,
required=True,
choices=list(
set(
list(tf_cifar_settings.SETTINGS.keys())
+ list(tf_fashion_mnist_settings.SETTINGS.keys())
+ list(tf_hotkey_settings.SETTINGS.keys())
)
),
help="Name of setting to run.",
)
parser.add_argument(
"--adapter",
type=str,
required=True,
choices=["docker", "ec2"],
help="Set adapter to be used.",
)
args = parser.parse_args()
# Configure logger
configure(f"flower_{args.baseline}_{args.setting}")
run(baseline=args.baseline, setting=args.setting, adapter=args.adapter)
if __name__ == "__main__":
main()
| [
2,
15069,
12131,
1215,
499,
402,
2022,
39,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
35... | 2.462264 | 3,286 |
#
# Copyright 2009-2013 by The Regents of the University of California
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may obtain a copy of the License from
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Calculate the persons UCR Net ID.
# Get first and last name and number from user.
first_name = raw_input("Enter your first name: ")
last_name = raw_input("Enter your last name: ")
number = raw_input("Enter a number: ")
# Normalize the input.
first_name = first_name.lower()
last_name = last_name.lower()
int_number = int(number)
# Create UCR Net ID
net_id = "{0}{1}{2:03d}".format(first_name[0], last_name[0:4], int_number)
# Print the UCR Net ID
print "Your UCR Net ID is {0}.".format(net_id) | [
2,
198,
2,
15069,
3717,
12,
6390,
416,
383,
3310,
658,
286,
262,
2059,
286,
3442,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846... | 3.28739 | 341 |
# -*- encoding:utf-8 -*-
import pandas as pd
import numpy as np
from datetime import datetime
dire = '../../data/'
start = datetime.now()
orderHistory_train = pd.read_csv(dire + 'train/orderHistory_train.csv', encoding='utf-8')
orderFuture_train = pd.read_csv(dire + 'train/orderFuture_train3.csv', encoding='utf-8')
userProfile_train = pd.read_csv(dire + 'train/userProfile_train.csv', encoding='utf-8')
userComment_train = pd.read_csv(dire + 'train/userComment_train.csv', encoding='utf-8')
action_train = pd.read_csv(dire + 'train/action_train.csv', encoding='utf-8')
orderHistory_test = pd.read_csv(dire + 'test/orderHistory_test.csv', encoding='utf-8')
orderFuture_test = pd.read_csv(dire + 'test/orderFuture_test3.csv', encoding='utf-8')
userProfile_test = pd.read_csv(dire + 'test/userProfile_test.csv', encoding='utf-8')
userComment_test = pd.read_csv(dire + 'test/userComment_test.csv', encoding='utf-8')
action_test = pd.read_csv(dire + 'test/action_test.csv', encoding='utf-8')
# """
############# 1.user feature #############
"""
# 1. 用户地点划分1 2 3 线城市
"""
# # orderFuture_train = province_123(userProfile_train, orderFuture_train)
# # orderFuture_test = province_123(userProfile_test, orderFuture_test)
############# 2.history order feature #############
"""
# 1.
"""
# 历史纪录中城市的精品占比
# orderFuture = pd.concat([orderFuture_train,orderFuture_test])
# orderHistory = pd.concat([orderHistory_train,orderHistory_test])
# dataset = history_type1_rate(orderFuture, orderHistory)
# orderFuture_train = dataset[dataset.orderType.notnull()]
# orderFuture_test = dataset[dataset.orderType.isnull()]
############# 3.action feature #############
"""
# 1. action中大于6出现的次数
# 2. 对应点击2-4的和值 与 5-9 的比值
# 3. 全部点击2-4的和值 与 5-9 的比值
# 4. 对应浏览记录 1-9 操作所用平均时间
# 5. 全部浏览记录 1-9 操作所用平均时间
# """
# action中大于6出现的次数
orderFuture_train = greater_6_c(orderFuture_train)
orderFuture_test = greater_6_c(orderFuture_test)
# 对应点击2-4的和值 与 5-9 的比值
orderFuture_train = rate_24_59_c(orderFuture_train)
orderFuture_test = rate_24_59_c(orderFuture_test)
# 全部点击2-4的和值 与 5-9 的比值
orderFuture_train = rate_24_59(orderFuture_train)
orderFuture_test = rate_24_59(orderFuture_test)
# 全部action 最后一次 的类型
orderFuture_train = latest_actionType(orderFuture_train, action_train)
orderFuture_test = latest_actionType(orderFuture_test, action_test)
# 全部 action 倒数第2-6次操作的类型
orderFuture_train = latest2_actionType(orderFuture_train, action_train)
orderFuture_test = latest2_actionType(orderFuture_test, action_test)
# 时间间隔
# 最后1 2 3 4 次操作的时间间隔
# 时间间隔的均值 最小值 最大值 方差
orderFuture_train = time_interval(orderFuture_train, action_train)
orderFuture_test = time_interval(orderFuture_test, action_test)
# action 最后4 5 6 次操作时间的方差 和 均值
orderFuture_train = var_actionTime(orderFuture_train, action_train)
orderFuture_test = var_actionTime(orderFuture_test, action_test)
# 对应浏览记录浏览平均时间(可以改成最近几天的)
orderFuture_train = sum_actionType_time(orderFuture_train, action_train)
orderFuture_test = sum_actionType_time(orderFuture_test, action_test)
# 对应浏览记录 1-9 操作所用平均时间
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 1)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 2)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 3)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 4)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 5)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 6)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 7)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 8)
# orderFuture_test = avg_time_action_c(orderFuture_test, action_test, 9)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 1)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 2)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 3)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 4)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 5)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 6)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 7)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 8)
# orderFuture_train = avg_time_action_c(orderFuture_train, action_train, 9)
# 全部浏览记录 1-9 操作所用平均时间
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 1)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 2)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 3)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 4)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 5)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 6)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 7)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 8)
# orderFuture_test = avg_time_action(orderFuture_test, action_test, 9)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 1)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 2)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 3)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 4)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 5)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 6)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 7)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 8)
# orderFuture_train = avg_time_action(orderFuture_train, action_train, 9)
############# 4.time feature #############
"""
# 1. 季节特征
"""
orderFuture_train = season(orderFuture_train)
orderFuture_test = season(orderFuture_test)
# print(orderFuture_train)
# print(orderFuture_test)
print("开始提取:", start)
print("提取完成:", datetime.now())
orderFuture_train.to_csv(dire + 'train3.csv', index=False, encoding='utf-8')
orderFuture_test.to_csv(dire + 'test3.csv', index=False, encoding='utf-8') | [
2,
532,
9,
12,
21004,
25,
40477,
12,
23,
532,
9,
12,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
4818,
8079,
1330,
4818,
8079,
628,
198,
67,
557,
796,
705,
40720,
40720,
7890,
14,
6,
198,
... | 2.354183 | 2,606 |
from abc import ABC, abstractmethod
from tqdm import tqdm, tqdm_notebook
from ..processors import Processor
class BaseAgent(ABC):
"""Abstract Agentclass
Parameters
----------
action_spec: dict
Have to define 'type' and 'shape'
state_spec: dict, optional
Have to define 'type' and 'shape'
is_debug: bool
If True, print out certain properties for debugging
Note
----
You need to define the followings:
_observe: observe and store
"""
@abstractmethod
@abstractmethod
| [
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
11,
256,
80,
36020,
62,
11295,
2070,
198,
198,
6738,
11485,
14681,
669,
1330,
32893,
628,
198,
4871,
7308,
36772,
7,
24694,
2599,
198,
... | 2.720588 | 204 |
from datetime import datetime,timedelta
current:datetime = None | [
6738,
4818,
8079,
1330,
4818,
8079,
11,
16514,
276,
12514,
198,
198,
14421,
25,
19608,
8079,
796,
6045
] | 3.555556 | 18 |
# -*- coding: utf-8 -*-
"""
fysql.databases
~~~~~~~~~~~~~~~
:copyright: (c) 2016 by Gasquez Florian
:license: MIT, see LICENSE for more details.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from warnings import filterwarnings
import mysql.connector as mysql_connector
from .exceptions import FysqlException
from .tables import Tables
from .static import Tables
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
197,
69,
893,
13976,
13,
19608,
18826,
198,
197,
15116,
8728,
4907,
93,
198,
197,
25,
22163,
4766,
25,
357,
66,
8,
1584,
416,
14345,
22281,
4432,
666,
198,... | 3.295082 | 122 |
"""Fixtures to testing Opencart login page"""
import os
import sys
import pytest
from selenium import webdriver as WD
from Selenium.Opencart_windows_operations.models.page_objects.page_objects import LoginPage, \
ProductPage, ProductsPage, ProductManager, DownloadPage, DownloadManager, CustomMenuDesignPage, CustomMenuDesigner
image = os.path.abspath('C:/Users/60064265/PycharmProjects/Homework/Selenium/Opencart_windows_operations/1.JPG')
def pytest_addoption(parser):
"""Setting base URL Openacart and parametrize command line options for select
browsers and set username or password """
parser.addoption("--address", action="store", default="http://192.168.56.103/opencart/",
help="Opencart web address")
parser.addoption("--address2", action="store", default="http://demo23.opencart.pro/",
help="Opencart web address")
parser.addoption("--browser", action="store", default="chrome", help="Browser name")
parser.addoption("--username", action="store", default="admin", help="User Name")
parser.addoption("--password", action="store", default="admin", help="User Password")
parser.addoption("--username2", action="store", default="demo", help="User Name")
parser.addoption("--password2", action="store", default="demo", help="User Password")
parser.addoption("--iwait", action="store", default="30000", help="Implicitly wait parameter")
parser.addoption("--pltimeout", action="store", default="1000", help="Page load timeout")
parser.addoption("--productname", action="store", default="New Product", help="Product Name")
parser.addoption("--keywords", action="store",
default="New Meta Tag Keyword",
help="Meta Tag Keyword")
parser.addoption("--modelname", action="store", default="New model", help="Model Name")
parser.addoption("--meta", action="store", default="New meta", help="Meta Tag Title")
parser.addoption("--dname", action="store", default="New File for Download", help="Download name")
parser.addoption("--filename", action="store", default="New File Name", help="File name")
parser.addoption("--maskname", action="store", default="New Mask", help="Mask Name")
@pytest.fixture(scope="session", autouse=True)
def driver(request):
"""Launching webdriver"""
browser_name = request.config.getoption("--browser")
print(browser_name)
if browser_name == 'firefox':
capabilities = WD.DesiredCapabilities.FIREFOX.copy()
capabilities['timeouts'] = {'implicit': 300000, 'pageLoad': 300000, 'script': 30000}
capabilities['loggingPrefs'] = {'browser': 'ALL', 'client': 'ALL', 'driver': 'ALL',
'performance': 'ALL', 'server': 'ALL'}
capabilities['unexpectedAlertBehaviour'] = 'accept'
profile = WD.FirefoxProfile()
profile.set_preference('app.update.auto', False)
profile.set_preference('app.update.enabled', False)
profile.accept_untrusted_certs = True
wd = WD.Firefox(firefox_profile=profile, capabilities=capabilities)
wd.maximize_window()
elif browser_name == 'chrome':
capabilities = WD.DesiredCapabilities.CHROME.copy()
capabilities['acceptSslCerts'] = True
capabilities['acceptInsecureCerts'] = True
capabilities['unexpectedAlertBehaviour'] = 'dismiss'
wd = WD.Chrome(desired_capabilities=capabilities)
wd.fullscreen_window()
else:
print('Unsupported browser!')
sys.exit(1)
wd.implicitly_wait((request.config.getoption("--iwait")))
wd.set_page_load_timeout((request.config.getoption("--pltimeout")))
implicitly_wait = request.config.getoption("--iwait")
page_load_timeout = request.config.getoption("--pltimeout")
print(implicitly_wait)
print(page_load_timeout)
yield wd
wd.quit()
@pytest.fixture(scope="function")
def open_store_page(driver, request):
"""Get base URL and attend admin link"""
return driver.get("".join([request.config.getoption("--address")]))
@pytest.fixture(scope="function")
def open_opencart_admin_url(driver, request):
"""Get base URL and attend admin link"""
url = 'admin/'
return driver.get("".join([request.config.getoption("--address2"), url]))
@pytest.fixture(scope="function")
def login_form_operator(driver, open_opencart_admin_url):
"""Use class from page objects module for managing elements on the page"""
return LoginPage(driver)
@pytest.fixture(scope="function")
def set_login_data(login_form_operator, request, driver):
"""Open admin login page and login in"""
login_form_operator.login(request.config.getoption("--username2"), request.config.getoption("--password2"))
@pytest.fixture(scope="function")
def products_page_opening(driver, open_opencart_admin_url):
"""Use class from page objects module for managing elements on the page"""
return ProductsPage(driver)
@pytest.fixture(scope="function")
def downloads_page_opening(driver, open_opencart_admin_url):
"""Use class from page objects module for managing elements on the page"""
return DownloadPage(driver)
@pytest.fixture()
def custom_menu_page_opening(driver, open_opencart_admin_url):
"""Use"""
return CustomMenuDesignPage(driver)
@pytest.fixture(scope="function")
def products_page_operator(driver, open_opencart_admin_url):
"""Use class from page objects module for managing elements on the page"""
return ProductPage(driver)
@pytest.fixture(scope="function")
def product_manager(driver, open_opencart_admin_url):
"""Use class from page objects module for managing elements on the page"""
return ProductManager(driver)
@pytest.fixture(scope="function")
def store_manager(driver, open_store_page):
"""Use class from page objects module for managing elements on the page"""
return ProductManager(driver)
@pytest.fixture(scope="function")
def downloads_manager(driver, open_opencart_admin_url):
"""Use class from page objects module for managing elements on the page"""
return DownloadManager(driver)
@pytest.fixture()
def custom_menu_designer(driver, open_opencart_admin_url):
"""USe"""
return CustomMenuDesigner(driver)
# @pytest.fixture(scope="function")
# def add_new_product(driver, set_login_data, products_page_opening, product_manager, request):
# product_manager.add_new_product(request.config.getoption("--productname"),
# request.config.getoption("--modelname"))
@pytest.fixture(scope='function')
def add_product_with_image(driver, set_login_data, products_page_opening, product_manager, request):
"""Adding new product"""
product_manager.add_new_product_with_image(request.config.getoption("--productname"),
request.config.getoption("--meta"),
request.config.getoption("--modelname"),
image)
@pytest.fixture(scope='function')
def find_product_image(driver, open_store_page, store_manager):
"""Find image"""
store_manager.find_product_image("MacBook Pro")
src = store_manager.get_image_link()
print(type(src))
print(src)
return src
@pytest.fixture(scope='function')
def upload_file(driver, set_login_data, downloads_page_opening, downloads_manager, request):
"""Upload file to Downloads page"""
downloads_manager.add_file(request.config.getoption("--dname"),
request.config.getoption("--filename"),
request.config.getoption("--maskname"),
image)
@pytest.fixture(scope='function')
def check_uploaded_file(driver, set_login_data, downloads_page_opening, downloads_manager):
"""Upload file to Downloads page"""
return downloads_manager.get_file_name()
@pytest.fixture(scope='function')
def drag_and_drop_custom(driver, set_login_data, custom_menu_page_opening, custom_menu_designer):
"""Return products list with names"""
return custom_menu_designer.drag_and_drop_menu()
| [
37811,
37,
25506,
284,
4856,
4946,
26674,
17594,
2443,
37811,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
12972,
9288,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
355,
48963,
198,
198,
6738,
15300,
47477,
13,
11505,
26674,
62,
2... | 2.799931 | 2,914 |
TRUE = 1 # ACK, YES
FALSE = 0 # NAK, NO
class i2c (object): # for polymorphism
'''
i2c class hierarchy
-------------------
i2c
\
aardvark_i2c
/ (aardv.py)
aardvark
'''
def choose_master (rpt=FALSE):
'''
TO CONSIDER FOLLOWING SCENARIOS
-------------------------------
1. use AARDARK in a non-Windows system
'''
from aardv import aardvark_i2c as aa_i2c
num = aa_i2c().enum (rpt)
return aa_i2c(0) # i2cmst
if __name__ == '__main__':
i2cmst = choose_master (rpt=TRUE)
from basic import *
if not no_argument ():
# if i2cmst!=0:
if sys.argv[1]=='probe' : print i2cmst.probe ()
elif sys.argv[1]=='baud' : print i2cmst.baud (argv_dec[2])
elif sys.argv[1]=='write' : print i2cmst.i2cw (argv_hex[2:])[1]
elif sys.argv[1]=='read' : print ['0x%02X' % xx for xx in i2cmst.read (argv_hex[2], argv_hex[3], argv_hex[4])]
else: print "command not recognized"
# else: print "I2C master not found"
| [
198,
5446,
8924,
220,
796,
352,
1303,
7125,
42,
11,
21560,
198,
37,
23719,
796,
657,
1303,
399,
10206,
11,
8005,
198,
198,
4871,
1312,
17,
66,
357,
15252,
2599,
1303,
329,
34196,
1042,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
... | 1.893543 | 573 |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 4 16:35:53 2019
@author: technosap
"""
import ctypes
import time
import math
from modular_client import ModularClient # for nozzle control
if __name__ == "__main__":
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
7653,
220,
604,
1467,
25,
2327,
25,
4310,
13130,
198,
198,
31,
9800,
25,
1579,
418,
499,
198,
37811,
198,
198,
11748,
269,
19199,
198,
1... | 2.804878 | 82 |
"""Create CSVs from all tables on a Wikipedia article."""
import csv
import os
from bs4 import BeautifulSoup
import requests
from pprint import pprint
def scrape(url, output_name):
"""Create CSVs from all tables in a Wikipedia article.
ARGS:
url (str): The full URL of the Wikipedia article to scrape tables from.
output_name (str): The base file name (without filepath) to write to.
"""
# Read tables from Wikipedia article into list of HTML strings
resp = requests.get(url)
soup = BeautifulSoup(resp.content, "lxml")
table_classes = {"class": ["sortable", "plainrowheaders"]}
wikitables = soup.findAll("table", table_classes)
# Create folder for output if it doesn't exist
os.makedirs(output_name, exist_ok=True)
for index, table in enumerate(wikitables):
# Make a unique file name for each CSV
if index == 0:
filename = output_name
else:
filename = output_name + "_" + str(index)
filepath = os.path.join(output_name, filename) + ".csv"
with open(filepath, mode="w", newline="", encoding="utf-8") as output:
csv_writer = csv.writer(output, quoting=csv.QUOTE_ALL, lineterminator="\n")
write_html_table_to_csv(table, csv_writer)
def first_row_check(header):
"""This function checks whether there is a column in header row
spanning multiple subcolumns under the same category.
If there is such a column this function will reapeat the header element
for each one of the corresponding subcolumns.
ARGS:
header (bs4.Tag): The bs4 Tag object being analyzed.
RETURNS:
cells (list): List of elements from bs4.ResultSet.
"""
cells = []
for elem in header.findAll(["th", "td"]):
if elem.has_attr("colspan"):
span = int(elem["colspan"])
counter = 0
while counter < span:
#pprint(vars(elem))
cells.append(elem)
counter += 1
else:
cells.append(elem)
return cells
def write_html_table_to_csv(table, writer):
"""Write HTML table from Wikipedia to a CSV file.
ARGS:
table (bs4.Tag): The bs4 Tag object being analyzed.
writer (csv.writer): The csv Writer object creating the output.
"""
# Hold elements that span multiple rows in a list of
# dictionaries that track 'rows_left' and 'value'
first_row = True
saved_rowspans = []
for row in table.findAll("tr"):
if first_row:
cells = first_row_check(row)
first_row = False
else:
cells = row.findAll(["th", "td"])
# If the first row, use it to define width of table
if len(saved_rowspans) == 0:
saved_rowspans = [None for _ in cells]
# Insert values from cells that span into this row
elif len(cells) != len(saved_rowspans):
for index, rowspan_data in enumerate(saved_rowspans):
if rowspan_data is not None:
# Insert the data from previous row; decrement rows left
value = rowspan_data["value"]
cells.insert(index, value)
if saved_rowspans[index]["rows_left"] == 1:
saved_rowspans[index] = None
else:
saved_rowspans[index]["rows_left"] -= 1
# If an element with rowspan, save it for future cells
for index, cell in enumerate(cells):
if cell.has_attr("rowspan"):
rowspan_data = {"rows_left": int(cell["rowspan"]), "value": cell}
saved_rowspans[index] = rowspan_data
if cells:
# Clean the data of references and unusual whitespace
cleaned = clean_data(cells)
# Fill the row with empty columns if some are missing
# (Some HTML tables leave final empty cells without a <td> tag)
columns_missing = len(saved_rowspans) - len(cleaned)
if columns_missing:
cleaned += [None] * columns_missing
writer.writerow(cleaned)
def clean_data(row):
"""Clean table row list from Wikipedia into a string for CSV.
ARGS:
row (bs4.ResultSet): The bs4 result set being cleaned for output.
RETURNS:
cleaned_cells (list[str]): List of cleaned text items in this row.
"""
cleaned_cells = []
for cell in row:
# Strip references from the cell
references = cell.findAll("sup", {"class": "reference"})
if references:
for ref in references:
ref.extract()
# Strip sortkeys from the cell
sortkeys = cell.findAll("span", {"class": "sortkey"})
if sortkeys:
for ref in sortkeys:
ref.extract()
# Strip footnotes from text and join into a single string
text_items = cell.findAll(text=True)
no_footnotes = [text for text in text_items if text[0] != "["]
cleaned = (
"".join(no_footnotes) # Combine elements into single string
.replace("\xa0", " ") # Replace non-breaking spaces
.replace("\n", " ") # Replace newlines
.strip()
)
cleaned_cells += [cleaned]
return cleaned_cells
| [
37811,
16447,
9429,
23266,
422,
477,
8893,
319,
257,
15312,
2708,
526,
15931,
198,
198,
11748,
269,
21370,
198,
11748,
28686,
198,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
7007,
198,
6738,
279,
4798,
1330,
279,
4798,
... | 2.285354 | 2,376 |
import time
import socket
import curses
import netaddr
import threading
import collections
from terminaltables import SingleTable
import evillimiter.networking.utils as netutils
from .menu import CommandMenu
from evillimiter.networking.utils import BitRate
from evillimiter.console.io import IO
from evillimiter.console.chart import BarChart
from evillimiter.console.banner import get_main_banner
from evillimiter.networking.host import Host
from evillimiter.networking.limit import Limiter, Direction
from evillimiter.networking.spoof import ARPSpoofer
from evillimiter.networking.scan import HostScanner
from evillimiter.networking.monitor import BandwidthMonitor
from evillimiter.networking.watch import HostWatcher
| [
11748,
640,
201,
198,
11748,
17802,
201,
198,
11748,
43878,
201,
198,
11748,
2010,
29851,
201,
198,
11748,
4704,
278,
201,
198,
11748,
17268,
201,
198,
6738,
5651,
2501,
2977,
1330,
14206,
10962,
201,
198,
201,
198,
11748,
819,
359,
320... | 3.372727 | 220 |
""" A helper module to assist with gen:call-style message parsing and replying.
A generic incoming message looks like ``{$gen_call, {From, Ref}, Message}``.
"""
from Pyrlang import Term
class GenBase:
""" Base class for Gen messages, do not use directly. See
``GenIncomingMessage`` and ``GenIncomingCall``.
"""
def reply(self, local_pid, result):
""" Reply with a gen:call result
"""
from Pyrlang.node import Node
Node.singleton.send(sender=local_pid,
receiver=self.sender_,
message=(self.ref_, result))
def reply_exit(self, local_pid, reason):
""" Reply to remote gen:call with EXIT message which causes reason to be
re-raised as exit() on the caller side
NOTE: The gen:call caller attempts to monitor the target first. If
the monitor attempt fails, the exit here won't work
"""
from Pyrlang.node import Node
reply = ('monitor_p_exit', local_pid, self.sender_, self.ref_, reason)
Node.singleton.dist_command(receiver_node=self.sender_.node_.text_,
message=reply)
class GenIncomingMessage(GenBase):
""" A helper class which contains elements from a generic incoming
``gen_server`` message.
For those situations when gen message is not a call, or is an incoming
``gen_server`` call.
"""
class GenIncomingCall(GenBase):
""" A helper class which contains elements from the incoming
``gen:call`` RPC call message.
"""
def get_args(self):
""" Returns parsed args for the RPC call. """
if isinstance(self.args_, list): return self.args_
return self.args_.elements_
def get_mod_str(self):
""" Returns module name as a string. """
return self.mod_.text_
def get_fun_str(self):
""" Returns function name as a string. """
return self.fun_.text_
def parse_gen_call(msg):
""" Determine if msg is a gen:call message
:param msg: An Erlang tuple hopefully starting with a '$gen_call'
:return: str with error if msg wasn't a call message, otherwise
constructs and returns a ``GenIncomingCall`` object.
"""
# Incoming {$gen_call, {From, Ref}, {call, Mod, Fun, Args}}
if type(msg) != tuple: # ignore all non-tuple messages
return "Only {tuple} messages allowed"
# ignore tuples with non-atom 1st, ignore non-gen_call mesages
if not isinstance(msg[0], Term.Atom) or msg[0].text_ != '$gen_call':
return "Only {$gen_call, _, _} messages allowed"
(_, _sender_mref, _call_mfa_gl) = msg
(msender, mref) = _sender_mref
if len(_call_mfa_gl) != 5:
return "Expecting a 5-tuple (with a 'call' atom)"
# TODO: Maybe also check first element to be an atom 'call'
# A gen_call call tuple has 5 elements, otherwise see below
(call, m, f, args, group_leader) = _call_mfa_gl
if not isinstance(m, Term.Atom):
return "Module must be an atom: %s" % str(m)
if not isinstance(f, Term.Atom):
return "Function must be an atom: %s" % str(f)
return GenIncomingCall(mod=m,
fun=f,
args=args,
group_leader=group_leader,
sender=msender, # pid of the sender
ref=mref # reference used in response
)
def parse_gen_message(msg):
""" Might be an 'is_auth' request which is not a call
:return: string on error, otherwise a ``GenIncomingMessage`` object
"""
# Incoming {$gen_call, {From, Ref}, Message}
if type(msg) != tuple: # ignore all non-tuple messages
return "Only {tuple} messages allowed"
# ignore tuples with non-atom 1st, ignore non-gen_call mesages
if not isinstance(msg[0], Term.Atom) or msg[0].text_ != '$gen_call':
return "Only {$gen_call, _, _} messages allowed"
(_, _sender_mref, gcmsg) = msg
(msender, mref) = _sender_mref
return GenIncomingMessage(sender=msender,
ref=mref,
message=gcmsg)
__all__ = ['GenIncomingCall', 'GenIncomingMessage',
'parse_gen_call', 'parse_gen_message']
| [
37811,
317,
31904,
8265,
284,
3342,
351,
2429,
25,
13345,
12,
7635,
3275,
32096,
290,
1128,
3157,
13,
198,
220,
220,
220,
317,
14276,
15619,
3275,
3073,
588,
7559,
90,
3,
5235,
62,
13345,
11,
1391,
4863,
11,
6524,
5512,
16000,
92,
1... | 2.353896 | 1,848 |
r"""
Relative Interiors of Polyhedra and Cones
"""
# ****************************************************************************
# Copyright (C) 2021 Matthias Koeppe
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.geometry.convex_set import ConvexSet_relatively_open
class RelativeInterior(ConvexSet_relatively_open):
r"""
The relative interior of a polyhedron or cone
This class should not be used directly. Use methods
:meth:`~sage.geometry.polyhedron.Polyhedron_base.relative_interior`,
:meth:`~sage.geometry.polyhedron.Polyhedron_base.interior`,
:meth:`~sage.geometry.cone.ConvexRationalPolyhedralCone.relative_interior`,
:meth:`~sage.geometry.cone.ConvexRationalPolyhedralCone.interior` instead.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: segment.relative_interior()
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: octant = Cone([(1,0,0), (0,1,0), (0,0,1)])
sage: octant.relative_interior()
Relative interior of 3-d cone in 3-d lattice N
"""
def __init__(self, polyhedron):
r"""
Initialize ``self``.
INPUT:
- ``polyhedron`` - an instance of :class:`Polyhedron_base` or
:class:`ConvexRationalPolyhedralCone`.
TESTS::
sage: P = Polyhedron([[1, 2], [3, 4]])
sage: from sage.geometry.relative_interior import RelativeInterior
sage: TestSuite(RelativeInterior(P)).run()
"""
self._polyhedron = polyhedron
if hasattr(polyhedron, "is_mutable") and polyhedron.is_mutable():
if hasattr(polyhedron, "_add_dependent_object"):
polyhedron._add_dependent_object(self)
def __hash__(self):
r"""
TESTS::
sage: P = Polyhedron([[1, 2], [3, 4]])
sage: Q = Polyhedron([[3, 4], [1, 2]])
sage: hash(P.relative_interior()) == hash(Q.relative_interior())
True
"""
return hash(self._polyhedron) ^ 1789
def __contains__(self, point):
r"""
Return whether ``self`` contains ``point``.
EXAMPLES::
sage: octant = Cone([(1,0,0), (0,1,0), (0,0,1)])
sage: ri_octant = octant.relative_interior(); ri_octant
Relative interior of 3-d cone in 3-d lattice N
sage: (1, 1, 1) in ri_octant
True
sage: (1, 0, 0) in ri_octant
False
"""
return self._polyhedron.relative_interior_contains(point)
def ambient(self):
r"""
Return the ambient convex set or space.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.ambient()
Vector space of dimension 2 over Rational Field
"""
return self._polyhedron.ambient()
def ambient_vector_space(self, base_field=None):
r"""
Return the ambient vector space.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.ambient_vector_space()
Vector space of dimension 2 over Rational Field
"""
return self._polyhedron.ambient_vector_space(base_field=base_field)
def ambient_dim(self):
r"""
Return the dimension of the ambient space.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: segment.ambient_dim()
2
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.ambient_dim()
2
"""
return self._polyhedron.ambient_dim()
def an_affine_basis(self):
r"""
Return points that form an affine basis for the affine hull.
The points are guaranteed to lie in the topological closure of ``self``.
EXAMPLES::
sage: segment = Polyhedron([[1, 0], [0, 1]])
sage: segment.relative_interior().an_affine_basis()
[A vertex at (1, 0), A vertex at (0, 1)]
"""
return self._polyhedron.an_affine_basis()
def dim(self):
r"""
Return the dimension of ``self``.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: segment.dim()
1
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.dim()
1
"""
return self._polyhedron.dim()
def interior(self):
r"""
Return the interior of ``self``.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.interior()
The empty polyhedron in ZZ^2
sage: octant = Cone([(1,0,0), (0,1,0), (0,0,1)])
sage: ri_octant = octant.relative_interior(); ri_octant
Relative interior of 3-d cone in 3-d lattice N
sage: ri_octant.interior() is ri_octant
True
"""
return self._polyhedron.interior()
def relative_interior(self):
r"""
Return the relative interior of ``self``.
As ``self`` is already relatively open, this method just returns ``self``.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.relative_interior() is ri_segment
True
"""
return self
def closure(self):
r"""
Return the topological closure of ``self``.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.closure() is segment
True
"""
return self._polyhedron
def is_universe(self):
r"""
Return whether ``self`` is the whole ambient space
OUTPUT:
Boolean.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.is_universe()
False
"""
# Relies on ``self`` not set up for polyhedra that are already
# relatively open themselves.
assert not self._polyhedron.is_universe()
return False
def is_closed(self):
r"""
Return whether ``self`` is closed.
OUTPUT:
Boolean.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.is_closed()
False
"""
# Relies on ``self`` not set up for polyhedra that are already
# relatively open themselves.
assert not self._polyhedron.is_relatively_open()
return False
def _some_elements_(self):
r"""
Generate some points of ``self``.
If ``self`` is empty, no points are generated; no exception will be raised.
EXAMPLES::
sage: P = polytopes.simplex()
sage: ri_P = P.relative_interior()
sage: ri_P.an_element() # indirect doctest
(1/4, 1/4, 1/4, 1/4)
sage: ri_P.some_elements() # indirect doctest
[(1/4, 1/4, 1/4, 1/4), (1/2, 1/4, 1/8, 1/8)]
"""
for p in self._polyhedron.some_elements():
if p in self:
yield p
def _repr_(self):
r"""
Return a description of ``self``.
EXAMPLES::
sage: P = Polyhedron(vertices = [[1,2,3,4],[2,1,3,4],[4,3,2,1]])
sage: P.relative_interior()._repr_()
'Relative interior of a 2-dimensional polyhedron in ZZ^4 defined as the convex hull of 3 vertices'
sage: P.rename('A')
sage: P.relative_interior()._repr_()
'Relative interior of A'
"""
repr_P = repr(self._polyhedron)
if repr_P.startswith('A '):
repr_P = 'a ' + repr_P[2:]
return 'Relative interior of ' + repr_P
def __eq__(self, other):
r"""
Compare ``self`` and ``other``.
INPUT:
- ``other`` -- any object
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: segment2 = Polyhedron([[1, 2], [3, 4]], base_ring=AA)
sage: ri_segment2 = segment2.relative_interior(); ri_segment2
Relative interior of
a 1-dimensional polyhedron in AA^2 defined as the convex hull of 2 vertices
sage: ri_segment == ri_segment2
True
TESTS::
sage: empty = Polyhedron(ambient_dim=2)
sage: ri_segment == empty
False
"""
if type(self) != type(other):
return False
return self._polyhedron == other._polyhedron
def __ne__(self, other):
r"""
Compare ``self`` and ``other``.
INPUT:
- ``other`` -- any object
TESTS::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of
a 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: segment2 = Polyhedron([[1, 2], [3, 4]], base_ring=AA)
sage: ri_segment2 = segment2.relative_interior(); ri_segment2
Relative interior of
a 1-dimensional polyhedron in AA^2 defined as the convex hull of 2 vertices
sage: ri_segment != ri_segment2
False
"""
return not (self == other)
def dilation(self, scalar):
"""
Return the dilated (uniformly stretched) set.
INPUT:
- ``scalar`` -- A scalar
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of a
1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: A = ri_segment.dilation(2); A
Relative interior of a
1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: A.closure().vertices()
(A vertex at (2, 4), A vertex at (6, 8))
sage: B = ri_segment.dilation(-1/3); B
Relative interior of a
1-dimensional polyhedron in QQ^2 defined as the convex hull of 2 vertices
sage: B.closure().vertices()
(A vertex at (-1, -4/3), A vertex at (-1/3, -2/3))
sage: C = ri_segment.dilation(0); C
A 0-dimensional polyhedron in ZZ^2 defined as the convex hull of 1 vertex
sage: C.vertices()
(A vertex at (0, 0),)
"""
return self.closure().dilation(scalar).relative_interior()
def linear_transformation(self, linear_transf, **kwds):
"""
Return the linear transformation of ``self``.
By [Roc1970]_, Theorem 6.6, the linear transformation of a relative interior
is the relative interior of the linear transformation.
INPUT:
- ``linear_transf`` -- a matrix
- ``**kwds`` -- passed to the :meth:`linear_transformation` method of
the closure of ``self``.
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of a
1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: T = matrix([[1, 1]])
sage: A = ri_segment.linear_transformation(T); A
Relative interior of a
1-dimensional polyhedron in ZZ^1 defined as the convex hull of 2 vertices
sage: A.closure().vertices()
(A vertex at (3), A vertex at (7))
"""
return self.closure().linear_transformation(linear_transf, **kwds).relative_interior()
def translation(self, displacement):
"""
Return the translation of ``self`` by a ``displacement`` vector.
INPUT:
- ``displacement`` -- a displacement vector or a list/tuple of
coordinates that determines a displacement vector
EXAMPLES::
sage: segment = Polyhedron([[1, 2], [3, 4]])
sage: ri_segment = segment.relative_interior(); ri_segment
Relative interior of a
1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: t = vector([100, 100])
sage: ri_segment.translation(t)
Relative interior of a
1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: ri_segment.closure().vertices()
(A vertex at (1, 2), A vertex at (3, 4))
"""
return self.closure().translation(displacement).relative_interior()
| [
81,
37811,
198,
6892,
876,
4225,
12706,
286,
12280,
704,
430,
290,
1482,
274,
198,
37811,
198,
198,
2,
41906,
17174,
46068,
198,
2,
220,
220,
220,
220,
220,
220,
15069,
357,
34,
8,
33448,
45524,
4448,
509,
2577,
27768,
198,
2,
198,
... | 2.146271 | 6,946 |
import ipaddress
import re
from lennoxs30api.s30exception import EC_AUTHENTICATE, EC_LOGIN, S30Exception
import voluptuous as vol
from . import Manager
from .const import (
CONF_ALLERGEN_DEFENDER_SWITCH,
CONF_APP_ID,
CONF_CLOUD_CONNECTION,
CONF_CREATE_INVERTER_POWER,
CONF_CREATE_SENSORS,
CONF_FAST_POLL_INTERVAL,
CONF_INIT_WAIT_TIME,
CONF_LOG_MESSAGES_TO_FILE,
CONF_MESSAGE_DEBUG_FILE,
CONF_MESSAGE_DEBUG_LOGGING,
CONF_PII_IN_MESSAGE_LOGS,
LENNOX_DEFAULT_CLOUD_APP_ID,
LENNOX_DEFAULT_LOCAL_APP_ID,
CONF_LOCAL_CONNECTION,
)
from .util import dict_redact_fields, redact_email
from homeassistant.data_entry_flow import FlowResult
from homeassistant import config_entries
from homeassistant.core import HomeAssistant, callback
from homeassistant.const import (
CONF_HOST,
CONF_EMAIL,
CONF_PASSWORD,
CONF_PROTOCOL,
CONF_SCAN_INTERVAL,
)
from homeassistant.helpers import config_validation as cv
import logging
DEFAULT_POLL_INTERVAL: int = 10
DEFAULT_FAST_POLL_INTERVAL: float = 0.75
MAX_ERRORS = 5
RETRY_INTERVAL_SECONDS = 60
DOMAIN = "lennoxs30"
_LOGGER = logging.getLogger(__name__)
STEP_ONE = vol.Schema(
{
vol.Required(CONF_LOCAL_CONNECTION, default=True): cv.boolean,
vol.Required(CONF_CLOUD_CONNECTION, default=False): cv.boolean,
}
)
STEP_CLOUD = vol.Schema(
{
vol.Required(CONF_EMAIL): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_APP_ID, default=LENNOX_DEFAULT_CLOUD_APP_ID): cv.string,
vol.Optional(CONF_CREATE_SENSORS, default=True): cv.boolean,
vol.Optional(CONF_ALLERGEN_DEFENDER_SWITCH, default=False): cv.boolean,
}
)
STEP_LOCAL = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_APP_ID, default=LENNOX_DEFAULT_LOCAL_APP_ID): cv.string,
vol.Optional(CONF_CREATE_SENSORS, default=True): cv.boolean,
vol.Optional(CONF_ALLERGEN_DEFENDER_SWITCH, default=False): cv.boolean,
vol.Optional(CONF_CREATE_INVERTER_POWER, default=False): cv.boolean,
vol.Optional(CONF_PROTOCOL, default="https"): cv.string,
}
)
def host_valid(hostport: str):
"""Return True if hostname or IP address is valid."""
# We allow an host:port syntax.
splits = hostport.split(":")
host = splits[0]
try:
if ipaddress.ip_address(host).version == (4 or 6):
return True
except ValueError:
disallowed = re.compile(r"[^a-zA-Z\d\-]")
return all(x and not disallowed.search(x) for x in host.split("."))
@callback
def lennox30_entries(hass: HomeAssistant):
"""Return the hosts already configured."""
return set(
entry.data[CONF_HOST] for entry in hass.config_entries.async_entries(DOMAIN)
)
class lennoxs30ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Lennox S30 configflow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def _host_in_configuration_exists(self, host) -> bool:
"""Return True if host exists in configuration."""
if host in lennox30_entries(self.hass):
return True
return False
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
self.config_input = {}
_LOGGER.debug(f"async_step_user user_input [{dict_redact_fields(user_input)}]")
if user_input is not None:
cloud_local = user_input[CONF_CLOUD_CONNECTION]
local_connection = user_input[CONF_LOCAL_CONNECTION]
if cloud_local == local_connection:
errors[CONF_LOCAL_CONNECTION] = "select_cloud_or_local"
else:
dict = {CONF_CLOUD_CONNECTION: cloud_local}
self.config_input.update(dict)
if cloud_local:
return await self.async_step_cloud()
else:
return await self.async_step_local()
return self.async_show_form(step_id="user", data_schema=STEP_ONE, errors=errors)
async def async_step_cloud(self, user_input=None):
"""Handle the initial step."""
errors = {}
_LOGGER.debug(f"async_step_cloud user_input [{dict_redact_fields(user_input)}]")
if user_input is not None:
await self.async_set_unique_id(DOMAIN + "_" + user_input[CONF_EMAIL])
self._abort_if_unique_id_configured()
try:
await self.try_to_connect(user_input)
self.config_input.update(user_input)
return await self.async_step_advanced()
except S30Exception as e:
_LOGGER.error(e.as_string())
if e.error_code == EC_LOGIN:
errors["base"] = "unable_to_connect_login"
else:
errors["base"] = "unable_to_connect_cloud"
return self.async_show_form(
step_id="cloud", data_schema=STEP_CLOUD, errors=errors
)
async def async_step_local(self, user_input=None):
"""Handle the initial step."""
errors = {}
_LOGGER.debug(f"async_step_local user_input [{dict_redact_fields(user_input)}]")
if user_input is not None:
host = user_input[CONF_HOST]
if self._host_in_configuration_exists(host):
errors[CONF_HOST] = "already_configured"
elif not host_valid(user_input[CONF_HOST]):
errors[CONF_HOST] = "invalid_hostname"
else:
await self.async_set_unique_id(DOMAIN + "_" + user_input[CONF_HOST])
self._abort_if_unique_id_configured()
try:
await self.try_to_connect(user_input)
self.config_input.update(user_input)
return await self.async_step_advanced()
except S30Exception as e:
_LOGGER.error(e.as_string())
errors[CONF_HOST] = "unable_to_connect_local"
return self.async_show_form(
step_id="local", data_schema=STEP_LOCAL, errors=errors
)
async def async_step_import(self, user_input) -> FlowResult:
"""Handle the import step."""
self.config_input = {}
_LOGGER.debug(
f"async_step_import user_input [{dict_redact_fields(user_input)}]"
)
self.config_input.update(user_input)
return await self.create_entry()
@staticmethod
@callback
| [
11748,
20966,
21975,
198,
11748,
302,
198,
6738,
300,
1697,
1140,
82,
1270,
15042,
13,
82,
1270,
1069,
4516,
1330,
13182,
62,
32,
24318,
3525,
2149,
6158,
11,
13182,
62,
25294,
1268,
11,
311,
1270,
16922,
198,
198,
11748,
2322,
37623,
... | 2.093389 | 3,116 |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 25 14:51:38 2020
@author: haukeh
"""
#%%Import of required packages
import numpy as np
import pandas as pd
import os
import sys
import plotly.graph_objs as go
from plotly.offline import plot
#%% Function to read results csv files
#%% Function to create dictionaries containing dictionaries for each scenario that contain the results as dataframes
#%% Function to creat a df with the production by technology annual
#%% Function to create dictionary with information
#%% Dictionary of dictionaries with colour schemes
colour_schemes = dict(
dES_colours = dict(
Coal = 'rgb(0, 0, 0)',
Oil = 'rgb(121, 43, 41)',
Gas = 'rgb(86, 108, 140)',
Nuclear = 'rgb(186, 28, 175)',
Waste = 'rgb(138, 171, 71)',
Biomass = 'rgb(172, 199, 119)',
Biofuel = 'rgb(79, 98, 40)',
Hydro = 'rgb(0, 139, 188)',
Wind = 'rgb(143, 119, 173)',
Solar = 'rgb(230, 175, 0)',
Geo = 'rgb(192, 80, 77)',
Ocean ='rgb(22, 54, 92)',
Imports = 'rgb(232, 133, 2)'),
TIMES_PanEU_colours = dict(
Coal = 'rgb(0, 0, 0)',
Oil = 'rgb(202, 171, 169)',
Gas = 'rgb(102, 77, 142)',
Nuclear = 'rgb(109, 109, 109)',
Waste = 'rgb(223, 134, 192)',
Biomass = 'rgb(80, 112, 45)',
Biofuel = 'rgb(178, 191, 225)',
Hydro = 'rgb(181, 192, 224)',
Wind = 'rgb(103, 154, 181)',
Solar = 'rgb(210, 136, 63)',
Geo = 'rgb(178, 191, 225)',
Ocean ='rgb(178, 191, 225)',
Imports = 'rgb(232, 133, 2)')
)
#%% functions for returning positives and negatives
#%% Function to create dfs with import and export of electricity for selected country
#%% Function to create figure
#%% main function to execute the script
#%% If executed as script
if __name__ == '__main__':
selec_region = sys.argv[1]
scens = sys.argv[2:]
main(selec_region,scens) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
8621,
1679,
1478,
25,
4349,
25,
2548,
12131,
198,
198,
31,
9800,
25,
387,
4649,
71,
198,
37811,
198,
198,
2,
16626,
20939,
286,
2672,
10... | 2.289072 | 851 |
from typing import Callable, Dict, Hashable, Optional, Tuple, Union
from .._internal.utils import SlotsReprMixin
from ..core import DependencyInstance, DependencyProvider
class LazyCall(SlotsReprMixin):
"""
Dependency which is the result of the call of the given function with the
given arguments.
.. doctest::
>>> from antidote import LazyCall, world
>>> def f(x, y):
... print("Computing {} + {}".format(x, y))
... return x + y
>>> A = LazyCall(f)(2, y=3)
>>> world.get(A)
Computing 2 + 3
5
"""
__slots__ = ('_func', '_args', '_kwargs', '_singleton')
def __init__(self, func: Callable, singleton: bool = True):
"""
Args:
func: Function to lazily call, any arguments given by calling
to the instance of :py:class:`~.LazyCall` will be passed on.
singleton: Whether or not this is a singleton or not.
"""
self._singleton = singleton
self._func = func
self._args = () # type: Tuple
self._kwargs = {} # type: Dict
def __call__(self, *args, **kwargs):
"""
All argument are passed on to the lazily called function.
"""
self._args = args
self._kwargs = kwargs
return self
class LazyMethodCall(SlotsReprMixin):
"""
Similar to :py:class:`~.LazyCall` but adapted to methods within a class
definition. The class has to be a registered service, as the class
instantiation itself is also lazy.
.. doctest::
>>> from antidote import LazyMethodCall, register, world
>>> @register
... class Constants:
... def get(self, x: str):
... return len(x)
... A = LazyMethodCall(get)('test')
>>> Constants.A
LazyMethodCallDependency(...)
>>> world.get(Constants.A)
4
>>> Constants().A
4
:py:class:`~.LazyMethodCall` has two different behaviors:
- if retrieved as a class attribute it returns a dependency which identifies
the result for Antidote.
- if retrieved as a instance attribute it returns the result for this
instance. This makes testing a lot easier as it does not require Antidote.
Check out :py:class:`~.helpers.conf.LazyConstantsMeta` for simple way
to declare multiple constants.
"""
__slots__ = ('_method_name', '_args', '_kwargs', '_singleton', '_key')
def __init__(self, method: Union[Callable, str], singleton: bool = True):
"""
Args:
method: Method to be called or the name of it.
singleton: Whether or not this is a singleton or not.
"""
self._singleton = singleton
# Retrieve the name of the method, as injection can be done after the class
# creation which is typically the case with @register.
self._method_name = method if isinstance(method, str) else method.__name__
self._args = () # type: Tuple
self._kwargs = {} # type: Dict
self._key = None
def __call__(self, *args, **kwargs):
"""
All argument are passed on to the lazily called function.
"""
self._args = args
self._kwargs = kwargs
return self
# The attribute is expected to be found in owner, as one should not call
# directly __get__.
| [
6738,
19720,
1330,
4889,
540,
11,
360,
713,
11,
21059,
540,
11,
32233,
11,
309,
29291,
11,
4479,
198,
198,
6738,
11485,
62,
32538,
13,
26791,
1330,
3454,
1747,
6207,
81,
35608,
259,
198,
6738,
11485,
7295,
1330,
37947,
1387,
33384,
11... | 2.463821 | 1,382 |
import torch
#from pytorch_lightning import LightingModule
from transformers import AutoModelForSeq2SeqLM, MBartForConditionalGeneration
PATH = "../longformer/models/LongTinyMBART"
model = AutoModelForSeq2SeqLM.from_pretrained(PATH)
# model_torch = torch.load(PATH)
# model = model_torch
# model_lightning = MyLightingModule.load_from_checkpoint(PATH)
# model = model_lightning
print(sum([param.nelement() for param in model.parameters()]))
if __name__ == '__main__':
with open("model_summary/LongTinyMBART.txt", 'w') as f:
f.write(repr(model))
f.close()
print(repr(model)) | [
11748,
28034,
198,
2,
6738,
12972,
13165,
354,
62,
2971,
768,
1330,
43150,
26796,
198,
6738,
6121,
364,
1330,
11160,
17633,
1890,
4653,
80,
17,
4653,
80,
31288,
11,
10771,
433,
1890,
25559,
1859,
8645,
341,
198,
198,
34219,
796,
366,
... | 2.705357 | 224 |
import pytest
import csv
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits as pyfits
from astropy.time import Time as astropyTime
from pyke import kepconvert
from pyke import kepio
from ..kepio import delete
fake_lc = get_pkg_data_filename("data/golden-lc.fits")
SUPPORTED_CONVERSION = ['jd', 'mjd', 'decimalyear', 'unix', 'cxcsec', 'gps',
'plot_date', 'datetime', 'iso', 'isot', 'yday', 'fits',
'byear', 'jyear', 'byear_str', 'jyear_str']
| [
11748,
12972,
9288,
198,
11748,
269,
21370,
198,
6738,
6468,
28338,
13,
26791,
13,
7890,
1330,
651,
62,
35339,
62,
7890,
62,
34345,
198,
6738,
6468,
28338,
13,
952,
1330,
11414,
355,
12972,
21013,
198,
6738,
6468,
28338,
13,
2435,
1330,... | 2.346847 | 222 |
import logging
import sys
| [
11748,
18931,
198,
11748,
25064,
198
] | 4.333333 | 6 |
from __future__ import print_function, division
import os
import argparse
import torch
import torch.nn as nn
from os.path import exists
from torch.utils.data import Dataset, DataLoader
from model.cnn_geometric_model import CNNGeometric, TwoStageCNNGeometric
from data.pf_dataset import PFDataset, PFPascalDataset
from data.download_datasets import download_PF_willow
from image.normalization import NormalizeImageDict, normalize_image
from util.torch_util import BatchTensorToVars, str_to_bool
from geotnf.transformation import GeometricTnf
from geotnf.point_tnf import *
import matplotlib.pyplot as plt
from skimage import io
from collections import OrderedDict
import torch.nn.functional as F
# for compatibility with Python 2
try:
input = raw_input
except NameError:
pass
"""
Script to demonstrate evaluation on a trained model
"""
print('WeakAlign demo script')
# Argument parsing
parser = argparse.ArgumentParser(description='WeakAlign PyTorch implementation')
# Paths
parser.add_argument('--model', type=str, default='trained_models/weakalign_resnet101_affine_tps.pth.tar', help='Trained two-stage model filename')
parser.add_argument('--model-aff', type=str, default='', help='Trained affine model filename')
parser.add_argument('--model-tps', type=str, default='', help='Trained TPS model filename')
parser.add_argument('--pf-path', type=str, default='datasets/proposal-flow-pascal', help='Path to PF dataset')
parser.add_argument('--feature-extraction-cnn', type=str, default='resnet101', help='feature extraction CNN model architecture: vgg/resnet101')
parser.add_argument('--tps-reg-factor', type=float, default=0.0, help='regularisation factor for tps tnf')
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
do_aff = not args.model_aff==''
do_tps = not args.model_tps==''
if args.pf_path=='':
args.args.pf_path='datasets/proposal-flow-pascal/'
# Download dataset if needed
if not exists(args.pf_path):
download_PF_pascal(args.pf_path)
# Create model
print('Creating CNN model...')
model = TwoStageCNNGeometric(use_cuda=use_cuda,
return_correlation=False,
feature_extraction_cnn=args.feature_extraction_cnn)
# Load trained weights
print('Loading trained model weights...')
if args.model!='':
checkpoint = torch.load(args.model, map_location=lambda storage, loc: storage)
checkpoint['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint['state_dict'].items()])
for name, param in model.FeatureExtraction.state_dict().items():
model.FeatureExtraction.state_dict()[name].copy_(checkpoint['state_dict']['FeatureExtraction.' + name])
for name, param in model.FeatureRegression.state_dict().items():
model.FeatureRegression.state_dict()[name].copy_(checkpoint['state_dict']['FeatureRegression.' + name])
for name, param in model.FeatureRegression2.state_dict().items():
model.FeatureRegression2.state_dict()[name].copy_(checkpoint['state_dict']['FeatureRegression2.' + name])
else:
checkpoint_aff = torch.load(args.model_aff, map_location=lambda storage, loc: storage)
checkpoint_aff['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint_aff['state_dict'].items()])
for name, param in model.FeatureExtraction.state_dict().items():
model.FeatureExtraction.state_dict()[name].copy_(checkpoint_aff['state_dict']['FeatureExtraction.' + name])
for name, param in model.FeatureRegression.state_dict().items():
model.FeatureRegression.state_dict()[name].copy_(checkpoint_aff['state_dict']['FeatureRegression.' + name])
checkpoint_tps = torch.load(args.model_tps, map_location=lambda storage, loc: storage)
checkpoint_tps['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint_tps['state_dict'].items()])
for name, param in model.FeatureRegression2.state_dict().items():
model.FeatureRegression2.state_dict()[name].copy_(checkpoint_tps['state_dict']['FeatureRegression.' + name])
# Dataset and dataloader
dataset = PFPascalDataset(csv_file=os.path.join(args.pf_path, 'test_pairs_pf_pascal.csv'),
dataset_path=args.pf_path,
transform=NormalizeImageDict(['source_image','target_image']))
dataloader = DataLoader(dataset, batch_size=1,
shuffle=True, num_workers=4)
batchTensorToVars = BatchTensorToVars(use_cuda=use_cuda)
# Instatiate image transformers
affTnf = GeometricTnf(geometric_model='affine', use_cuda=use_cuda)
for i, batch in enumerate(dataloader):
# get random batch of size 1
batch = batchTensorToVars(batch)
source_im_size = batch['source_im_size']
target_im_size = batch['target_im_size']
source_points = batch['source_points']
target_points = batch['target_points']
# warp points with estimated transformations
target_points_norm = PointsToUnitCoords(target_points,target_im_size)
model.eval()
# Evaluate model
theta_aff,theta_aff_tps=model(batch)
warped_image_aff = affTnf(batch['source_image'],theta_aff.view(-1,2,3))
warped_image_aff_tps = affTpsTnf(batch['source_image'],theta_aff, theta_aff_tps)
# Un-normalize images and convert to numpy
source_image = normalize_image(batch['source_image'],forward=False)
source_image = source_image.data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy()
target_image = normalize_image(batch['target_image'],forward=False)
target_image = target_image.data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy()
warped_image_aff = normalize_image(warped_image_aff,forward=False)
warped_image_aff = warped_image_aff.data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy()
warped_image_aff_tps = normalize_image(warped_image_aff_tps,forward=False)
warped_image_aff_tps = warped_image_aff_tps.data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy()
# check if display is available
exit_val = os.system('python -c "import matplotlib.pyplot as plt;plt.figure()" > /dev/null 2>&1')
display_avail = exit_val==0
if display_avail:
N_subplots = 4
fig, axs = plt.subplots(1,N_subplots)
axs[0].imshow(source_image)
axs[0].set_title('src')
axs[1].imshow(target_image)
axs[1].set_title('tgt')
axs[2].imshow(warped_image_aff)
axs[2].set_title('aff')
axs[3].imshow(warped_image_aff_tps)
axs[3].set_title('aff+tps')
for i in range(N_subplots):
axs[i].axis('off')
print('Showing results. Close figure window to continue')
plt.show()
else:
print('No display found. Writing results to:')
fn_src = 'source.png'
print(fn_src)
io.imsave(fn_src, source_image)
fn_tgt = 'target.png'
print(fn_tgt)
io.imsave(fn_tgt, target_image)
fn_aff = 'result_aff.png'
print(fn_aff)
io.imsave(fn_aff, warped_image_aff)
fn_aff_tps = 'result_aff_tps.png'
print(fn_aff_tps)
io.imsave(fn_aff_tps,warped_image_aff_tps)
res = input('Run for another example ([y]/n): ')
if res=='n':
break
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
7297,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
28686,
13,
6978,
1330,
7160,
198,
6738,
28034,
13,
26791,
13,... | 2.494697 | 2,923 |
# Copyright © 2020 Interplanetary Database Association e.V.,
# Planetmint and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
from planetmint.backend.tarantool.connection import TarantoolDB
| [
2,
15069,
10673,
12131,
4225,
11578,
8527,
24047,
5396,
304,
13,
53,
1539,
198,
2,
11397,
34289,
290,
314,
5760,
33,
3788,
20420,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
357,
25189,
4891,
12,
17,
13,
15,
5357,
12624,
... | 3.033333 | 90 |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""
-------
main.py
-------
Main methods (views + routes) implemented in the API.
.. moduleauthor:: Fabio Madeira
:module_version: 1.0
:created_on: 28-02-2015
"""
import webapp2
import logging
import os
import jinja2
import urllib
from tools import *
from google.appengine.ext.webapp import template
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader('templates'),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
default_search = '"PLoS One"[jour]'
default_feeds = 10
default_rssguid= "1h9kEWSfxImUd3q0TuDX7eLhEJoM4-k3pB8scCPrUmcSn3lkLl"
class RssPubmed(webapp2.RequestHandler):
"""Generate a rss feed from Pubmed - based on the main page search."""
class RssBot(webapp2.RequestHandler):
"""
Consumes a feed and checks if there are new entries in db.
If so, gets a shortened url and tweets the new status.
"""
debug = os.environ.get('SERVER_SOFTWARE', '').startswith('Dev')
app = webapp2.WSGIApplication(routes=[
webapp2.Route(r'/', handler='main.MainPage', name='home'),
webapp2.Route(r'/search_output=<search_output:[^/]+>', handler='main.MainPage', name='search_output'),
webapp2.Route(r'/rssguid_output=<rssguid_output:[^/]+>', handler='main.MainPage', name='rssguid_output'),
webapp2.Route(r'/search', handler='main.Search'),
webapp2.Route(r'/rss', handler='main.Rss'),
webapp2.Route(r'/twitter', handler='main.Twitter'),
webapp2.Route(r'/search/pubmed/string=<string:[^/]+>', handler='main.SearchPubmed', name='string'),
webapp2.Route(r'/search/pubmed/<string:[^/]+>', handler='main.SearchPubmed', name='string'),
webapp2.Route(r'/rss/pubmed/string=<string:[^/]+>&feeds=<feeds:[^/]+>', handler='main.RssPubmed', name='string'),
webapp2.Route(r'/rss/pubmed/<string:[^/]+>&<feeds:[^/]+>', handler='main.RssPubmed', name='string'),
webapp2.Route(r'/rss/pubmed/string=<string:[^/]+>', handler='main.RssPubmed', name='string'),
webapp2.Route(r'/rss/pubmed/<string:[^/]+>', handler='main.RssPubmed', name='string'),
webapp2.Route(r'/twitter_bot&rss_guid=<rss_guid:[^/]+>', handler='main.RssBot', name='rss_guid'),
webapp2.Route(r'/twitter_bot&<rss_guid:[^/]+>', handler='main.RssBot', name='rss_guid'),
webapp2.Route(r'/twitter_bot', handler='main.RssBot', name='rss_guid'),
], debug=debug)
app.error_handlers[404] = handle_404
app.error_handlers[500] = handle_500
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
13,
22,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
26866,
198,
12417,
13,
9078,
198,
26866,
198,
13383,
5050,
357,
33571,
1343,
11926,
8,
9... | 2.475524 | 1,001 |
#!/scratch/Anaconda2.4.0/bin/python
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib import dates as matdates
from matplotlib import lines as matlines
import os
import time
from datetime import datetime
"""
This Module is structured to work with a raw data at the following JSON format:
{'setup_name': {'test1_name':[QUERY1,QUERY2,QUERY3],
'test2_name':[QUERY1,QUERY2,QUERY3]
}
'setup_name2': {'test1_name':[QUERY1,QUERY2,QUERY3],
'test2_name':[QUERY1,QUERY2,QUERY3]
}
}
The Query structure is set (currently) to this:
(test_name,state, date,hour,minute,mpps_result,mpps_min,mpps_max,build_id) example:
["syn attack - 64 bytes, single CPU", "stl", "20161226", "01", "39", "9.631898", "9.5", "11.5", "54289"]
it can be changed to support other formats of queries, simply change the query class to support your desired structure
the query class specify the indexes of the data within the query tuple
"""
| [
2,
48443,
1416,
36722,
14,
2025,
330,
13533,
17,
13,
19,
13,
15,
14,
8800,
14,
29412,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
198,
198,
6759,
29487,
8019,
13,
1904,
10... | 2.637931 | 406 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module declares the different meanings that the Orbit 6 components can take
and their conversions
"""
from numpy import cos, arccos, sin, arcsin, arctan2, sqrt, arctanh, sinh, cosh
import numpy as np
from ..errors import UnknownFormError
from ..utils.node import Node
class Form(Node):
"""Base class for orbital form definition
"""
alt = {
"theta": "θ",
"phi": "φ",
"raan": "Ω",
"Omega": "Ω",
"omega": "ω",
"nu": "ν",
"theta_dot": "θ_dot",
"phi_dot": "φ_dot",
"aol": "u",
"H": "E", # The hyperbolic anomaly is available under the eccentric anomaly
}
def __call__(self, orbit, new_form):
"""Gives the result of the transformation without in-place modifications
Args:
orbit (Orbit):
new_form (str or Form):
Returns:
Coord
"""
if isinstance(new_form, Form):
new_form = new_form.name
coord = orbit.copy()
if new_form != orbit.form.name:
for a, b in self.steps(new_form):
coord = getattr(
self, "_{}_to_{}".format(a.name.lower(), b.name.lower())
)(coord, orbit.frame.center)
return coord
@classmethod
def _cartesian_to_keplerian(cls, coord, center):
"""Conversion from cartesian (position and velocity) to keplerian
The keplerian form is
* a : semi-major axis
* e : eccentricity
* i : inclination
* Ω : right-ascension of ascending node
* ω : Argument of perigee
* ν : True anomaly
"""
r, v = coord[:3], coord[3:]
h = np.cross(r, v) # angular momentum vector
h_norm = np.linalg.norm(h)
r_norm = np.linalg.norm(r)
v_norm = np.linalg.norm(v)
K = v_norm ** 2 / 2 - center.µ / r_norm # specific energy
a = -center.µ / (2 * K) # semi-major axis
e = sqrt(1 - h_norm ** 2 / (a * center.µ)) # eccentricity
p = a * (1 - e ** 2) # semi parameter
i = arccos(h[2] / h_norm) # inclination
Ω = arctan2(h[0], -h[1]) % (2 * np.pi) # right ascension of the ascending node
ω_ν = arctan2(r[2] / sin(i), r[0] * cos(Ω) + r[1] * sin(Ω))
ν = arctan2(sqrt(p / center.µ) * np.dot(v, r), p - r_norm) % (2 * np.pi)
ω = (ω_ν - ν) % (2 * np.pi) # argument of the perigee
return np.array([a, e, i, Ω, ω, ν], dtype=float)
@classmethod
def _keplerian_to_cartesian(cls, coord, center):
"""Conversion from Keplerian to Cartesian coordinates
"""
a, e, i, Ω, ω, ν = coord
p = a * (1 - e ** 2)
r = p / (1 + e * cos(ν))
h = sqrt(center.µ * p)
x = r * (cos(Ω) * cos(ω + ν) - sin(Ω) * sin(ω + ν) * cos(i))
y = r * (sin(Ω) * cos(ω + ν) + cos(Ω) * sin(ω + ν) * cos(i))
z = r * sin(i) * sin(ω + ν)
vx = x * h * e / (r * p) * sin(ν) - h / r * (
cos(Ω) * sin(ω + ν) + sin(Ω) * cos(ω + ν) * cos(i)
)
vy = y * h * e / (r * p) * sin(ν) - h / r * (
sin(Ω) * sin(ω + ν) - cos(Ω) * cos(ω + ν) * cos(i)
)
vz = z * h * e / (r * p) * sin(ν) + h / r * sin(i) * cos(ω + ν)
return np.array([x, y, z, vx, vy, vz], dtype=float)
@classmethod
def _keplerian_to_keplerian_eccentric(cls, coord, center):
"""Conversion from Keplerian to Keplerian Eccentric
"""
a, e, i, Ω, ω, ν = coord
if e < 1:
# Elliptic case
cos_E = (e + cos(ν)) / (1 + e * cos(ν))
sin_E = (sin(ν) * sqrt(1 - e ** 2)) / (1 + e * cos(ν))
E = arctan2(sin_E, cos_E) % (2 * np.pi)
else:
# Hyperbolic case, E usually marked as H
cosh_E = (e + cos(ν)) / (1 + e * cos(ν))
sinh_E = (sin(ν) * sqrt(e ** 2 - 1)) / (1 + e * cos(ν))
E = arctanh(sinh_E / cosh_E)
return np.array([a, e, i, Ω, ω, E], dtype=float)
@classmethod
def _keplerian_eccentric_to_keplerian_mean(cls, coord, center):
"""Conversion from Keplerian Eccentric to Keplerian Mean
"""
a, e, i, Ω, ω, E = coord
if e < 1:
M = E - e * sin(E)
else:
# Hyperbolic case, E usually marked as H
M = e * sinh(E) - E
return np.array([a, e, i, Ω, ω, M], dtype=float)
@classmethod
def _keplerian_mean_to_keplerian_eccentric(cls, coord, center):
"""Conversion from Mean Keplerian to Keplerian Eccentric
"""
a, e, i, Ω, ω, M = coord
E = cls.M2E(e, M)
return np.array([a, e, i, Ω, ω, E], dtype=float)
@classmethod
def _keplerian_eccentric_to_keplerian(cls, coord, center):
"""Conversion from Mean Keplerian to True Keplerian
"""
a, e, i, Ω, ω, E = coord
if e < 1:
cos_ν = (cos(E) - e) / (1 - e * cos(E))
sin_ν = (sin(E) * sqrt(1 - e ** 2)) / (1 - e * cos(E))
else:
# Hyperbolic case, E usually marked as H
cos_ν = (cosh(E) - e) / (1 - e * cosh(E))
sin_ν = -(sinh(E) * sqrt(e ** 2 - 1)) / (1 - e * cosh(E))
ν = arctan2(sin_ν, cos_ν) % (np.pi * 2)
return np.array([a, e, i, Ω, ω, ν], dtype=float)
@classmethod
def M2E(cls, e, M):
"""Conversion from Mean Anomaly to Eccentric anomaly,
or Hyperbolic anomaly.
from Vallado
"""
tol = 1e-8
if e < 1:
# Ellipse
if -np.pi < M < 0 or M > np.pi:
E = M - e
else:
E = M + e
E1 = next_E(E, e, M)
while abs(E1 - E) >= tol:
E = E1
E1 = next_E(E, e, M)
return E1
else:
# Hyperbolic
if e < 1.6:
if -np.pi < M < 0 or M > np.pi:
H = M - e
else:
H = M + e
else:
if e < 3.6 and abs(M) > np.pi:
H = M - np.sign(M) * e
else:
H = M / (e - 1)
H1 = next_H(H, e, M)
while abs(H1 - H) >= tol:
H = H1
H1 = next_H(H, e, M)
return H1
@classmethod
@classmethod
def _keplerian_circular_to_keplerian(cls, coord, center):
"""Conversion from Keplerian near-circular elements to Mean Keplerian
"""
a, ex, ey, i, Ω, u = coord
e = sqrt(ex ** 2 + ey ** 2)
ω = arctan2(ey / e, ex / e)
ν = u - ω
return np.array([a, e, i, Ω, ω, ν], dtype=float)
@classmethod
def _keplerian_to_keplerian_circular(cls, coord, center):
"""Conversion from Mean Keplerian to Keplerian near-circular elements
"""
a, e, i, Ω, ω, ν = coord
ex = e * cos(ω)
ey = e * sin(ω)
u = (ω + ν) % (np.pi * 2)
return np.array([a, ex, ey, i, Ω, u], dtype=float)
@classmethod
def _tle_to_keplerian_mean(cls, coord, center):
"""Conversion from the TLE standard format to the Mean Keplerian
see :py:class:`Tle` for more information.
"""
i, Ω, e, ω, M, n = coord
a = (center.µ / n ** 2) ** (1 / 3)
return np.array([a, e, i, Ω, ω, M], dtype=float)
@classmethod
def _keplerian_mean_to_tle(cls, coord, center):
"""Mean Keplerian to TLE format conversion
"""
a, e, i, Ω, ω, M = coord
n = sqrt(center.µ / a ** 3)
return np.array([i, Ω, e, ω, M, n], dtype=float)
@classmethod
def _cartesian_to_spherical(cls, coord, center):
"""Cartesian to Spherical conversion
.. warning:: The spherical form is equatorial, not zenithal
"""
x, y, z, vx, vy, vz = coord
r = np.linalg.norm(coord[:3])
phi = arcsin(z / r)
theta = arctan2(y, x)
r_dot = (x * vx + y * vy + z * vz) / r
phi_dot = (vz * (x ** 2 + y ** 2) - z * (x * vx + y * vy)) / (
r ** 2 * sqrt(x ** 2 + y ** 2)
)
theta_dot = (x * vy - y * vx) / (x ** 2 + y ** 2)
return np.array([r, theta, phi, r_dot, theta_dot, phi_dot], dtype=float)
@classmethod
def _spherical_to_cartesian(cls, coord, center):
"""Spherical to cartesian conversion
"""
r, theta, phi, r_dot, theta_dot, phi_dot = coord
x = r * cos(phi) * cos(theta)
y = r * cos(phi) * sin(theta)
z = r * sin(phi)
vx = r_dot * x / r - y * theta_dot - z * phi_dot * cos(theta)
vy = r_dot * y / r + x * theta_dot - z * phi_dot * sin(theta)
vz = r_dot * z / r + r * phi_dot * cos(phi)
return np.array([x, y, z, vx, vy, vz], dtype=float)
TLE = Form("tle", ["i", "Ω", "e", "ω", "M", "n"])
"""TLE special form
* i : inclination
* Ω : right-ascension of ascending node
* e : eccentricity
* ω : argument of perigee
* M : mean anomaly
* n : mean motion
see :py:class:`~beyond.orbits.tle.Tle` for details
"""
KEPL_C = Form("keplerian_circular", ["a", "ex", "ey", "i", "Ω", "u"])
"""Special case for near-circular orbits
* a : semi-major axis
* ex : e * cos(ω)
* ey : e * sin(ω)
* i : inclination
* Ω : right-ascension of ascending node
* u : argument of latitude (ω + ν)
"""
KEPL_E = Form("keplerian_eccentric", ["a", "e", "i", "Ω", "ω", "E"])
"""Same as Keplerian, but replaces True anomaly with
`Eccentric anomaly <https://en.wikipedia.org/wiki/Eccentric_anomaly>`__
"""
KEPL_M = Form("keplerian_mean", ["a", "e", "i", "Ω", "ω", "M"])
"""Same as Keplerian, but replaces True anomaly with
`Mean anomaly <https://en.wikipedia.org/wiki/Mean_anomaly>`__
"""
KEPL = Form("keplerian", ["a", "e", "i", "Ω", "ω", "ν"])
"""The keplerian form is
* a : semi-major axis
* e : eccentricity
* i : inclination
* Ω : right-ascension of ascending node
* ω : Argument of perigee
* ν : True anomaly
see `wikipedia <https://en.wikipedia.org/wiki/Orbital_elements>`__ for details
"""
SPHE = Form("spherical", ["r", "θ", "φ", "r_dot", "θ_dot", "φ_dot"])
"""Spherical form
* r : radial distance / altitude
* θ : azimuth / longitude
* φ : elevation / latitude
* r_dot : first derivative of radial distance / altitude
* θ_dot : first derivative of azimuth / longitude
* φ_dot : first derivative of elevation / latitude
"""
CART = Form("cartesian", ["x", "y", "z", "vx", "vy", "vz"])
"""Cartesian form"""
SPHE + CART + KEPL + KEPL_E + KEPL_M + TLE
KEPL + KEPL_C
_cache = {
"tle": TLE,
"keplerian_circular": KEPL_C,
"keplerian_mean": KEPL_M,
"keplerian_eccentric": KEPL_E,
"keplerian": KEPL,
"spherical": SPHE,
"cartesian": CART,
}
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
1212,
8265,
24183,
262,
1180,
26368,
326,
262,
38161,
718,
6805,
460,
1011,
198,
392,
511,
32626,
198,
3781... | 1.91614 | 5,700 |
# Copyright (c) 2020 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
import threading
from kombu.connection import BrokerConnection
from kombu.messaging import Exchange
from kombu.entity import TRANSIENT_DELIVERY_MODE
import os
import sys
import traceback
from chroma_core.services.log import log_register, trace
import settings
class ChromaService(object):
"""Define a subclass of this for each service. Must implement `start` and `stop`
methods: typically starting a server/thread in `start` and tearing it down in `stop`.
Use the `log` instance attribute for all logging, this is set up with a logger that
tags messages with the service name.
"""
@property
class ServiceThread(threading.Thread):
"""Sometimes a single service may have multiple threads of execution. Use this
class rather than the bare threading.Thread to help Chroma keep track of your threads.
This wraps a Thread-like object which has a `run` and `stop` method, passed in at
construction time`
"""
| [
2,
15069,
357,
66,
8,
12131,
20084,
45,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
17168,
12,
7635,
198,
2,
5964,
326,
460,
307,
1043,
287,
262,
38559,
24290,
2393,
13,
628,
198,
11748,
47... | 3.572347 | 311 |
# Generated by Django 3.1.12 on 2021-06-23 17:25
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
1065,
319,
33448,
12,
3312,
12,
1954,
1596,
25,
1495,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.8 | 30 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""
python -m tf2onnx.convert : tool to convert a frozen tensorflow to onnx
"""
from __future__ import division
from __future__ import print_function
import argparse
import sys
import onnx
from onnx import helper
import tensorflow as tf
import tf2onnx.utils
from tf2onnx.optimizer.transpose_optimizer import TransposeOptimizer
from tf2onnx.tfonnx import process_tf_graph, tf_optimize, DEFAULT_TARGET, POSSIBLE_TARGETS
_TENSORFLOW_DOMAIN = "ai.onnx.converters.tensorflow"
# pylint: disable=unused-argument
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--input", required=True, help="input model file")
parser.add_argument("--output", help="output model file")
parser.add_argument("--inputs", required=True, help="model input_names")
parser.add_argument("--outputs", required=True, help="model output_names")
parser.add_argument("--opset", type=int, default=None, help="highest opset to use")
parser.add_argument("--custom-ops", help="list of custom ops")
parser.add_argument("--target", default=",".join(DEFAULT_TARGET), help="target platform")
parser.add_argument("--continue_on_error", help="continue_on_error", action="store_true")
parser.add_argument("--verbose", help="verbose output", action="store_true")
parser.add_argument("--fold_const", help="enable tf constant_folding transformation before conversion",
action="store_true")
# experimental
parser.add_argument("--inputs-as-nchw", help="transpose inputs as from nhwc to nchw")
# depreciated, going to be removed some time in the future
parser.add_argument("--unknown-dim", type=int, default=-1, help="default for unknown dimensions")
args = parser.parse_args()
args.shape_override = None
if args.inputs:
args.inputs, args.shape_override = tf2onnx.utils.split_nodename_and_shape(args.inputs)
if args.outputs:
args.outputs = args.outputs.split(",")
if args.inputs_as_nchw:
args.inputs_as_nchw = args.inputs_as_nchw.split(",")
if args.target:
args.target = args.target.split(",")
for target in args.target:
if target not in POSSIBLE_TARGETS:
print("unknown target ", target)
sys.exit(1)
return args
main()
| [
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
5964,
13,
198,
198,
37811,
198,
29412,
532,
76,
48700,
17,
261,
77,
87,
13,
1102,
1851,
1058,
2891,
284,
10385,
257,
12912,
11192,
273,
... | 2.704241 | 896 |
from .compare import (
CompareABC,
CompareDicts,
CompareEndswith,
CompareIgnore,
CompareIgnoreOrder
)
from .message import Message
from .request import Request
from .response import (
BaseBodyParser,
JSONBodyParser,
Response,
XMLBodyParser,
get_schema,
validate_response
)
| [
6738,
764,
5589,
533,
1330,
357,
198,
220,
220,
220,
27814,
24694,
11,
198,
220,
220,
220,
27814,
35,
14137,
11,
198,
220,
220,
220,
27814,
12915,
2032,
342,
11,
198,
220,
220,
220,
27814,
32916,
382,
11,
198,
220,
220,
220,
27814,
... | 2.756522 | 115 |
#!/usr/bin/env python3
"""
Convert a base64 string back to a normal string (decode).
"""
import readline # to overcome the 4k input limit
from lib.jhash import base64_to_str as back
##############################################################################
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
198,
3103,
1851,
257,
2779,
2414,
4731,
736,
284,
257,
3487,
4731,
357,
12501,
1098,
737,
198,
37811,
198,
198,
11748,
1100,
1370,
220,
220,
220,
1303,
284,
10980,
262,... | 3.5 | 88 |
import numpy as np
from ai_economist.foundation.base.base_component import BaseComponent, component_registry
@component_registry.add
class GetEducated(BaseComponent):
"""
Environments expand the agents' state/action spaces by querying:
get_n_actions
get_additional_state_fields
Environments expand their dynamics by querying:
component_step
generate_observations
generate_masks
Environments expand logging behavior by querying:
get_metrics
get_dense_log
Because they are built as Python objects, component instances can also be
stateful. Stateful attributes are reset via calls to:
additional_reset_steps
"""
name = "GetEducated"
required_entities = ["Coin", "Labor", "build_skill"]
agent_subclasses = ["BasicMobileAgent"]
def agent_can_get_educated(self, agent):
"""Return True if agent can actually get educated."""
# See if the agent has the resources necessary to complete the action
if agent.state["inventory"]["Coin"] < self.tuition:
return False
# # Do nothing if skill is already max
# if True: # TODO see how to get skill
# return False
# If we made it here, the agent can go to college.
return True
def get_additional_state_fields(self, agent_cls_name):
"""
Return a dictionary of {state_field: reset_val} managed by this Component
class for agents of type agent_cls_name. This also partially controls reset
behavior.
Args:
agent_cls_name (str): name of the Agent class for which additional states
are being queried. For example, "BasicMobileAgent".
Returns:
extra_state_dict (dict): A dictionary of {"state_field": reset_val} for
each extra state field that this component adds/manages to agents of
type agent_cls_name. This extra_state_dict is incorporated into
agent.state for each agent of this type. Note that the keyed fields
will be reset to reset_val when the environment is reset.
"""
if agent_cls_name not in self.agent_subclasses:
return {}
if agent_cls_name == "BasicMobileAgent":
return {"tuition_payment": float(self.tuition)} # check
raise NotImplementedError
def get_n_actions(self, agent_cls_name):
"""
Args:
agent_cls_name (str): name of the Agent class for which number of actions
is being queried. For example, "BasicMobileAgent".
Returns:
action_space (None, int, or list): If the component does not add any
actions for agents of type agent_cls_name, return None. If it adds a
single action space, return an integer specifying the number of
actions in the action space. If it adds multiple action spaces,
return a list of tuples ("action_set_name", num_actions_in_set).
See below for further detail.
"""
if agent_cls_name == "BasicMobileAgent":
return 1
return None
def component_step(self):
"""
See base_component.py for detailed description.
Convert coin to skill for agents that choose to go to school and can.
"""
world = self.world
build = []
# Apply any go_to_school actions taken by the mobile agents
for agent in world.get_random_order_agents():
action = agent.get_component_action(self.name)
# This component doesn't apply to this agent!
if action is None:
continue
# NO-OP!
if action == 0:
pass
# Learn! (If you can.)
elif action == 1:
if self.agent_can_get_educated(agent):
# Remove the resources
agent.state["inventory"]["Coin"] -= self.tuition
# Receive skills for going to school
agent.state["build_skill"] += self.skill_gain
# self.payment_max_skill_multiplier += self.skill_gain
# Incur the labor cost for going to school
agent.state["endogenous"]["Labor"] += self.education_labor
# self.number_times_educated += 1
else:
raise ValueError
# self.builds.append(build) | [
11748,
299,
32152,
355,
45941,
198,
6738,
257,
72,
62,
13926,
396,
13,
42526,
13,
8692,
13,
8692,
62,
42895,
1330,
7308,
21950,
11,
7515,
62,
2301,
4592,
198,
198,
31,
42895,
62,
2301,
4592,
13,
2860,
198,
4871,
3497,
33380,
515,
7,... | 2.3804 | 1,898 |
# This file is part of QuTiP.
#
# QuTiP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# QuTiP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with QuTiP. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2011 and later, Paul D. Nation & Robert J. Johansson
#
###########################################################################
import platform
import json
import numpy as np
from scipy import *
from qutip import *
from tests import *
#
# command-line parsing
#
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--run-profiler",
help="run profiler on qutip benchmarks",
action='store_true')
parser.add_argument("-o", "--output-file",
help="file name for benchmark output",
default="qutip-benchmarks.json", type=str)
parser.add_argument("-N", "--runs",
help="number of times to perform each benchmark",
default=1, type=int)
args = parser.parse_args()
qutip_info = [{'label': 'QuTiP', 'value': qutip.__version__},
{'label': 'Python', 'value': platform.python_version()},
{'label': 'NumPy', 'value': numpy.__version__},
{'label': 'SciPy', 'value': scipy.__version__}]
#---------------------
# Run Python Benchmarks
#---------------------
if args.run_profiler:
import cProfile
cProfile.run('run_tests(1)', 'qutip_benchmarks_profiler')
import pstats
p = pstats.Stats('qutip_benchmarks_profiler')
p.sort_stats('cumulative').print_stats(50)
else:
times, names = run_tests(args.runs)
data = [{'name': names[n], 'time': times[n]} for n in range(len(names))]
qutip_info.append({'label': 'Acc. time', 'value': "%.2f s" % sum(times)})
qutip_bm = {"info": qutip_info, "data": data}
with open(args.output_file, "w") as outfile:
json.dump(qutip_bm, outfile, sort_keys=True, indent=4)
| [
2,
770,
2393,
318,
636,
286,
2264,
40533,
47,
13,
198,
2,
198,
2,
220,
220,
220,
2264,
40533,
47,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
198,
2,
220,
220,
220,
340,
739,
262,
2846,
286,
262,
22961,... | 2.623094 | 918 |
from math import radians, sin, cos, tan
angulo = float(input('Digita o angulo que voce deseja: '))
seno = sin(radians(angulo))
print('O angulo de {} tem o seno de {:.2f}'.format(angulo, seno))
coseno = cos(radians(angulo))
print('O angulo de {} tem o cosseno de {:.2f}'.format(angulo, coseno))
tangente = tan(radians(angulo))
print('O angulo de {} tem a tangente de {:.2f}'. format(angulo, tangente))
""" DESCOBRIR O SENO COSSENO E A TANGENTE """ | [
6738,
10688,
1330,
2511,
1547,
11,
7813,
11,
8615,
11,
25706,
198,
648,
43348,
796,
12178,
7,
15414,
10786,
19511,
5350,
267,
3550,
43348,
8358,
7608,
344,
748,
68,
6592,
25,
705,
4008,
198,
6248,
78,
796,
7813,
7,
6335,
1547,
7,
64... | 2.52514 | 179 |
# -*- coding: utf-8 -*-
#
# Copyright 2016 Continuum Analytics, Inc.
# May be copied and distributed freely only as part of an Anaconda or
# Miniconda installation.
#
"""
This folder contains jinja2 templates used by Anaconda Navigator.
This folder is defined as a python module so that some convenience global
variables can be defined.
"""
# Standard library imports
from __future__ import absolute_import, division
import os.path as osp
DATA_PATH = osp.dirname(osp.realpath(__file__))
EMPTY_TEMPLATE_PATH = osp.join(DATA_PATH, 'empty.html')
VIDEO_TEMPLATE_PATH = osp.join(DATA_PATH, 'video.html')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
1584,
6389,
13814,
30437,
11,
3457,
13,
198,
2,
1737,
307,
18984,
290,
9387,
12748,
691,
355,
636,
286,
281,
1052,
330,
13533,
393,
198,
2,
1855,
2... | 3.157068 | 191 |
from flask import Flask, render_template, abort, redirect, url_for
from flask_script import Manager
from jinja2 import TemplateNotFound
app = Flask(__name__)
app.config['TESTING'] = True
manager = Manager(app)
@app.route('/', endpoint='frontend-index')
@app.route('/<page>', endpoint='frontend-pages')
def show(page='index'):
"""
Try to Deliver a page.
:param page: name of the page
:return: template.
"""
try:
return render_template('pages/index.html')
except (TemplateNotFound,):
abort(404)
if __name__ == '__main__':
manager.run()
| [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
15614,
11,
18941,
11,
19016,
62,
1640,
198,
6738,
42903,
62,
12048,
1330,
9142,
198,
6738,
474,
259,
6592,
17,
1330,
37350,
3673,
21077,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,... | 2.734884 | 215 |
import requests
from decouple import config
from django.shortcuts import render, redirect
from django.core.exceptions import ObjectDoesNotExist
from users.models import UserPreferences
from diary.models import DiaryEntry
from diary.forms import DiaryEntryForm
| [
11748,
7007,
198,
6738,
875,
43846,
1330,
4566,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
9515,
13921,
3673,
3109,
396,
198,
6738,
2985,
13,
27530,
1330,
... | 4.278689 | 61 |
"""
关于logo创意:
1. 基础元素为M和K的负空间设计
2. 白色部分创意来自弦图
3. 整体图案本身可生成一个不错的分形
4. 配色致敬3B1B(具体的蓝色和棕色还得再微调一下)
logo主要创意由@GrakePCH提供,@GZTime、@cigar666、@鹤翔万里都提供了不少宝贵意见。目前设计工作还在继续完善,希望大家多提意见和建议
"""
from manimlib.imports import *
# final
| [
198,
37811,
198,
17739,
111,
12859,
236,
6404,
78,
26344,
249,
35707,
237,
171,
120,
248,
628,
197,
16,
13,
220,
10263,
253,
118,
163,
94,
222,
17739,
225,
163,
112,
254,
10310,
118,
44,
161,
240,
234,
42,
21410,
164,
112,
253,
16... | 0.733129 | 326 |
import uuid
from .client import QuickBooks
from .exceptions import QuickbooksException
from .objects.batchrequest import IntuitBatchRequest, BatchItemRequest, BatchOperation, BatchResponse, BatchItemResponse
| [
11748,
334,
27112,
198,
198,
6738,
764,
16366,
1330,
12029,
30650,
198,
6738,
764,
1069,
11755,
1330,
12029,
12106,
16922,
198,
6738,
764,
48205,
13,
43501,
25927,
1330,
2558,
5013,
33,
963,
18453,
11,
347,
963,
7449,
18453,
11,
347,
96... | 4 | 53 |
from arizona.asr.learner import Wav2AsrLearner
learner = Wav2AsrLearner(
pretrain_model='path/to/pretrain.pt',
finetune_model='path/to/finetune.pt',
dictionary='path/to/dict.ltr.txt',
lm_type='kenlm',
lm_lexicon='path/to/lm/lexicon.txt',
lm_model='path/to/lm/lm.bin',
lm_weight=1.5,
word_score=-1,
beam_size=50
)
hypos = learner.transcribe([
'./data/test_1.wav',
'./data/test_1.wav'
])
print("===")
print(hypos) | [
6738,
257,
380,
7551,
13,
292,
81,
13,
3238,
1008,
1330,
370,
615,
17,
1722,
81,
14961,
1008,
198,
198,
3238,
1008,
796,
370,
615,
17,
1722,
81,
14961,
1008,
7,
198,
220,
220,
220,
2181,
3201,
62,
19849,
11639,
6978,
14,
1462,
14,... | 1.961864 | 236 |
"""Test Satellite model."""
import os
import tempfile
import numpy as np
import pytest
from nowcasting_dataset.data_sources.fake import satellite_fake
from nowcasting_dataset.data_sources.satellite.satellite_model import Satellite
| [
37811,
14402,
33530,
2746,
526,
15931,
198,
11748,
28686,
198,
11748,
20218,
7753,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
198,
6738,
783,
19913,
62,
19608,
292,
316,
13,
7890,
62,
82,
2203,
13,
30706,
133... | 3.42029 | 69 |
#
# Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Registry of available host test plugins."""
class HostTestRegistry:
"""Register and store host test plugins for further usage."""
# Here we actually store all the plugins
PLUGINS = {} # 'Plugin Name' : Plugin Object
def print_error(self, text):
"""Print an error message to the console.
Args:
text: Error message reason.
"""
print("Plugin load failed. Reason: %s" % text)
def register_plugin(self, plugin):
"""Store a plugin in the registry.
This method also calls the plugin's setup() method to configure the plugin.
Args:
plugin: Plugin instance.
Returns:
True if plugin setup was successful and plugin can be registered, else
False.
"""
# TODO:
# - check for unique caps for specified type
if plugin.name not in self.PLUGINS:
if plugin.setup(): # Setup plugin can be completed without errors
self.PLUGINS[plugin.name] = plugin
return True
else:
self.print_error("%s setup failed" % plugin.name)
else:
self.print_error("%s already loaded" % plugin.name)
return False
def call_plugin(self, type, capability, *args, **kwargs):
"""Execute the first plugin found with a particular 'type' and 'capability'.
Args:
type: Plugin type.
capability: Plugin capability name.
args: Additional plugin parameters.
kwargs: Additional plugin parameters.
Returns:
True if a plugin was found and execution succeeded, otherwise False.
"""
for plugin_name in self.PLUGINS:
plugin = self.PLUGINS[plugin_name]
if plugin.type == type and capability in plugin.capabilities:
return plugin.execute(capability, *args, **kwargs)
return False
def get_plugin_caps(self, type):
"""List all capabilities for plugins with the specified type.
Args:
type: Plugin type.
Returns:
List of capabilities found. If there are no capabilities an empty
list is returned.
"""
result = []
for plugin_name in self.PLUGINS:
plugin = self.PLUGINS[plugin_name]
if plugin.type == type:
result.extend(plugin.capabilities)
return sorted(result)
def load_plugin(self, name):
"""Import a plugin module.
Args:
name: Name of the module to import.
Returns:
Imported module.
Raises:
ImportError: The module with the given name was not found.
"""
mod = __import__("module_%s" % name)
return mod
def get_string(self):
"""User friendly printing method to show hooked plugins.
Returns:
PrettyTable formatted string describing the contents of the plugin
registry.
"""
from prettytable import PrettyTable, HEADER
column_names = [
"name",
"type",
"capabilities",
"stable",
"os_support",
"required_parameters",
]
pt = PrettyTable(column_names, junction_char="|", hrules=HEADER)
for column in column_names:
pt.align[column] = "l"
for plugin_name in sorted(self.PLUGINS.keys()):
name = self.PLUGINS[plugin_name].name
type = self.PLUGINS[plugin_name].type
stable = self.PLUGINS[plugin_name].stable
capabilities = ", ".join(self.PLUGINS[plugin_name].capabilities)
is_os_supported = self.PLUGINS[plugin_name].is_os_supported()
required_parameters = ", ".join(
self.PLUGINS[plugin_name].required_parameters
)
row = [
name,
type,
capabilities,
stable,
is_os_supported,
required_parameters,
]
pt.add_row(row)
return pt.get_string()
def get_dict(self):
"""Return a dictionary of registered plugins."""
result = {}
for plugin_name in sorted(self.PLUGINS.keys()):
name = self.PLUGINS[plugin_name].name
type = self.PLUGINS[plugin_name].type
stable = self.PLUGINS[plugin_name].stable
capabilities = self.PLUGINS[plugin_name].capabilities
is_os_supported = self.PLUGINS[plugin_name].is_os_supported()
required_parameters = self.PLUGINS[plugin_name].required_parameters
result[plugin_name] = {
"name": name,
"type": type,
"stable": stable,
"capabilities": capabilities,
"os_support": is_os_supported,
"required_parameters": required_parameters,
}
return result
def __str__(self):
"""Return str representation of object."""
return self.get_string()
| [
2,
198,
2,
15069,
357,
66,
8,
33448,
7057,
15302,
290,
25767,
669,
13,
1439,
2489,
10395,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
2,
198,
37811,
8081,
4592,
286,
1695,
2583,
1332,
20652,
... | 2.207683 | 2,369 |
import logging
import os
import sys
from genofunk import apply
| [
11748,
18931,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
6738,
2429,
1659,
2954,
1330,
4174,
628
] | 3.823529 | 17 |
#!/usr/bin/env python2
# encoding: utf-8
####################################################
#
# Description: Alert for invasion using pushbullet.
# Author: Donie Leigh<donie.leigh at gmail.com>
# License: MIT
#
####################################################
import sys, getopt, time, json
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
from systemd import journal
CONFIG_FILE = '/etc/smarthome.conf'
cfg = None
host = None
username = None
password = None
topic_msg = None
topic_image = None
timePoint = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
alertMsg = timePoint + ',发现入侵者!!!'
apiKey = None
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], "f:")
imageFile = ''
for op, value in opts:
if op == "-f":
imageFile = value
if len(imageFile) == 0:
sys.stderr.write("Image file missing.\n")
sys.exit()
try:
cfg = ConfigParser()
cfg.read(CONFIG_FILE)
protocol = cfg.get('global', 'protocol')
if protocol == 'pushbullet':
from yapbl import PushBullet
apiKey = cfg.get('global', 'apiKey')
if apiKey is None or len(apiKey) == 0:
raise NoOptionError('apiKey', 'global')
send_alert_by_pushbullet(imageFile)
else:
import paho.mqtt.client as mqtt
username = cfg.get('mosquitto', 'user')
if username is None or len(username) == 0:
raise NoOptionError('user', 'mosquitto')
password = cfg.get('mosquitto', 'password')
if password is None or len(password) == 0:
raise NoOptionError('password', 'mosquitto')
host = cfg.get('mosquitto', 'host')
if host is None or len(host) == 0:
raise NoOptionError('host', 'mosquitto')
topic_msg = cfg.get('mosquitto', 'topic_msg')
if topic_msg is None or len(topic_msg) == 0:
raise NoOptionError('topic_msg', 'mosquitto')
topic_image = cfg.get('mosquitto', 'topic_image')
if topic_image is None or len(topic_image) == 0:
raise NoOptionError('topic_image', 'mosquitto')
send_alert_by_mosquitto(imageFile)
except (NoSectionError, NoOptionError), e:
err = 'Config file is missing or invalid: ' + str(e)
journal.send(err, PRIORITY=journal.LOG_ERR)
sys.stderr.write(err + "\n")
sys.exit(1)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
29113,
14468,
4242,
198,
2,
198,
2,
12489,
25,
220,
23276,
329,
11796,
1262,
4574,
15065,
1616,
13,
198,
2,
6434,
25,
220,
220,
220,
... | 2.219663 | 1,129 |
import objects
import logging
import sqlite3
import threading
import json
import serverprotocol
import sqlite3
import base64
import socket
tileset = []
SCODE_NEEDAUTH = 0
SCODE_BANNED = 1
SCODE_BADAUTH = 2
SCODE_BADREG = 3
| [
11748,
5563,
198,
11748,
18931,
198,
11748,
44161,
578,
18,
198,
11748,
4704,
278,
198,
11748,
33918,
198,
11748,
4382,
11235,
4668,
198,
11748,
44161,
578,
18,
198,
11748,
2779,
2414,
198,
11748,
17802,
628,
198,
83,
2915,
316,
796,
17... | 2.825 | 80 |
# Generated by Django 2.1 on 2018-08-21 18:45
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
319,
2864,
12,
2919,
12,
2481,
1248,
25,
2231,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.892857 | 28 |
from flask import current_app
from flask_restplus import Resource
from ._api import API
@API.route('/version')
| [
6738,
42903,
1330,
1459,
62,
1324,
198,
6738,
42903,
62,
2118,
9541,
1330,
20857,
198,
198,
6738,
47540,
15042,
1330,
7824,
198,
198,
31,
17614,
13,
38629,
10786,
14,
9641,
11537,
628
] | 3.5625 | 32 |
#!bin/usr/env python
#Jianheng Liu @ Zhanglab, SYSU
#Feb, 2018
#Email: liujh26@mail2.sysu.edu.cn
#Usage: This program is used to merge multiple BAM filtes to one, then sort and index it
#Input: [.bam]
import sys,os
import argparse
import pysam
import time
from time import gmtime, strftime
if __name__ == "__main__":
description = """
"""
parser = argparse.ArgumentParser(prog="concat_bam",version="1.0",fromfile_prefix_chars='@',description=description,formatter_class=argparse.RawTextHelpFormatter)
# Require
group_required = parser.add_argument_group("Required")
group_required.add_argument("-i","--input",dest="input", nargs='*',required=True,help="Input bam files")
group_required.add_argument("-o","--output",dest="output",required=True,help="Output bam")
group_optional = parser.add_argument_group("Optional")
group_optional.add_argument("--sort",dest="sort",default=False,action="store_true",help="Sort bam (and delete unsort)")
group_optional.add_argument("--no-del-bam",dest="no_del_bam",default=False,action="store_true",help="Do not del bam file after sorting")
group_optional.add_argument("--index",dest="index",default=False,action="store_true",help="Index sorted bam")
group_optional.add_argument("-t","--threads",dest="threads",default=1,type=int,help="Threads for samtools sort, default=1")
group_optional.add_argument("-m","--memory",dest="memory",default="1G",help="Memory for samtools sort, default=4G")
options = parser.parse_args()
hid = 0
hid_dict = {}
lift_over = {}
new_header = {}
new_header['HD'] = {'SO': 'unsorted', 'VN': '1.0'}
new_header['SQ'] = []
for fn in options.input:
hid,new_header,hid_dict,lift_over = read_headers(fn,hid,new_header,hid_dict,lift_over)
with pysam.AlignmentFile(options.output, 'wb', header = new_header) as OUTPUT:
for fn in options.input:
merge_bam(fn,OUTPUT)
if options.sort == True:
sys.stderr.write("[%s]Sorting bam...\n" % strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
if options.threads > 1:
pysam.sort("-@",str(options.threads),"-m",options.memory,"-o", options.output.replace(".bam",".sorted.bam"),options.output)
else:
pysam.sort("-m",options.memory,"-o", options.output.replace(".bam",".sorted.bam"), options.output)
if options.index == True:
sys.stderr.write("[%s]Indexing bam...\n" % strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
pysam.index(options.output.replace(".bam",".sorted.bam"))
if options.no_del_bam == False:
os.remove(options.output) | [
2,
0,
8800,
14,
14629,
14,
24330,
21015,
198,
198,
2,
41,
666,
31753,
18258,
2488,
19439,
23912,
11,
19704,
12564,
198,
2,
15146,
11,
2864,
198,
2,
15333,
25,
7649,
23577,
71,
2075,
31,
4529,
17,
13,
17597,
84,
13,
15532,
13,
3152... | 2.62685 | 946 |