content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def get_best_model(X ,y):
"""Select best model from RandomForestClassifier and AdaBoostClassifier"""
ensembles = [
(RandomForestClassifier, SelectParam({
'estimator': RandomForestClassifier(warm_start=True, random_state=7),
'param_grid': {
'n_estimators': [10, 15, 20],
'criterion': ['gini', 'entropy'],
'max_features': [FEATURE_NUM+n for n in [-4, -2, 0]],
'max_depth': [10, 15],
'bootstrap': [True],
'warm_start': [True],
},
'n_jobs':1
})),
(AdaBoostClassifier, SelectParam({
'estimator': AdaBoostClassifier(random_state=7),
'param_grid': {
'algorithm': ['SAMME', 'SAMME.R'],
'n_estimators': [10, 15, 20],
'learning_rate': [1e-3, 1e-2, 1e-1]
},
'n_jobs': 1
}))
]
best_score = 0
best_model = None
for ensemble, select in ensembles:
param = select.get_param(X, y)
model = ensemble(**param)
score = cross_val_score(model, X, y).mean()
if best_score < score:
best_score = score
best_model = model
return best_model | 28,200 |
def compareDates(dateA: list, dateB: list) -> int:
"""
Compares dateA and dateB\n
returns: 1 if dateA > dateB,\n
-1 if dateA <dateB,
0 if dateA == dateB \n
raise Exception if dates are invalid
"""
if not checkDateValidity(dateA, dateB):
raise invalidDateException('Invalid Dates')
i = 2
while i >= 0:
if dateA[i] < dateB[i]:
return -1
elif dateA[i] > dateB[i]:
return 1
else:
if i == 0:
return 0
i -= 1 | 28,201 |
def test_1():
""" Run regression test 1 """
raw = struct.pack('<BBIIIIIIBIIIIBII',
3, 3, 0, 100, 1, 101, 2, 102, 2, 3, 103, 4, 104, 1, 5, 105)
packet = Packet1.from_raw(raw)
# Parent packet has 3 sub packets
assert packet['size1'] == 3
assert len(packet['data1']) == 3
# Sub packet 0, has 3 sub-sub packets
assert packet['data1'][0]['size2'] == 3
assert len(packet['data1'][0]['data2']) == 3
assert packet['data1'][0]['data2'][0]['entry1'] == 0
assert packet['data1'][0]['data2'][0]['entry2'] == 100
assert packet['data1'][0]['data2'][1]['entry1'] == 1
assert packet['data1'][0]['data2'][1]['entry2'] == 101
assert packet['data1'][0]['data2'][2]['entry1'] == 2
assert packet['data1'][0]['data2'][2]['entry2'] == 102
# Sub packet 1 has 2 sub-sub packets
assert packet['data1'][1]['size2'] == 2
assert len(packet['data1'][1]['data2']) == 2
assert packet['data1'][1]['data2'][0]['entry1'] == 3
assert packet['data1'][1]['data2'][0]['entry2'] == 103
assert packet['data1'][1]['data2'][1]['entry1'] == 4
assert packet['data1'][1]['data2'][1]['entry2'] == 104
# Sub packet 3 has 1 sub sub packet
assert packet['data1'][2]['size2'] == 1
assert len(packet['data1'][2]['data2']) == 1
assert packet['data1'][2]['data2'][0]['entry1'] == 5
assert packet['data1'][2]['data2'][0]['entry2'] == 105 | 28,202 |
def shell():
"""Jump into a Python Shell"""
env = {
'__grains__': __grains__,
'__opts__': __opts__,
'__pillar__': __pillar__,
'__salt__': __salt__,
'__out__': salt.loader.outputters(__opts__),
'pprint': pprint.pprint,
}
try:
import readline
except ImportError:
pass
else:
import rlcompleter
readline.set_completer(rlcompleter.Completer(env).complete)
if(sys.platform == 'darwin'):
readline.parse_and_bind("bind ^I rl_complete")
else:
readline.parse_and_bind("tab:complete")
code.interact(local=env) | 28,203 |
def renderPage(res, topLevelContext=context.WebContext,
reqFactory=FakeRequest):
"""
Render the given resource. Return a Deferred which fires when it has
rendered.
"""
req = reqFactory()
ctx = topLevelContext(tag=res)
ctx.remember(req, inevow.IRequest)
render = appserver.NevowRequest(None, True).gotPageContext
result = render(ctx)
result.addCallback(lambda x: req.accumulator)
return result | 28,204 |
def test_generate_vols():
"""Generate all isovolume files."""
# assert flags
r1 = r2 = False
# test database path
db = test_dir + "/test-gen-vols"
if isdir(db):
shutil.rmtree(db)
# init ivdb obj
iv = ivdb.IvDb(levels=levels, data=data, db=db)
iv.generate_vols(test_mesh)
# check that files produced are the same
gen_vols_dir = db + "/vols"
res = filecmp.cmpfiles(exp_vols_dir, gen_vols_dir, common_files)
match_list = res[0]
non_match = res[1]
if match_list == common_files:
r1 = True
if non_match == []:
r2 = True
# remove files
shutil.rmtree(iv.db)
# check results
assert(all([r1, r2])) | 28,205 |
def get_header_value(headers, name, default=None):
""" Return header value, doing case-insensitive match """
if not headers:
return default
if isinstance(headers, dict):
headers = headers.items()
name = to_bytes(name.lower())
for k, v in headers:
if name == to_bytes(k.lower()):
return v
return default | 28,206 |
def fill_tract_income_marginals(marginals, tract_geoid, cols, total_col='b19025_001'):
"""Fill in missing income marginals for a tract, based on the proportion of the
income each race holds in the puma"""
tract_agg_income = marginals.loc[cols, tract_geoid]
agg_income = marginals.loc[total_col, tract_geoid] # Aggregate income for the tract
if pd.isna(agg_income):
return
race_agg_income = marginals.loc[cols, tract_geoid]
tract_agg_income.sum() / marginals.loc[total_col, 'puma']
missing = tract_agg_income.isnull()
missing_idx = tract_agg_income.index[missing]
left_over = agg_income - race_agg_income[~missing].sum() # Tract agg income not accounted for
# What portion of the missing income should we allocate to each of the
# missing race entries?
puma_missing_inc = marginals.loc[missing_idx, 'puma']
missing_prop = puma_missing_inc / puma_missing_inc.sum()
try:
marginals.loc[missing_idx, tract_geoid] = (missing_prop * left_over).round(0).astype('Int64')
except ValueError:
# Too many nans, so just punt and fill them in with zeros
marginals.loc[missing_idx, tract_geoid] = 0
# Check that the result is close enough. THis only works for the race columns, not the
# eth columns, although the eth columns will be really close. For the eth columns, nhwites+hispanics
# will be larger than whites, because hispanics includes some non-whites.
if total_col == 'b19025_001':
# print (marginals.loc[cols, tract_geoid].sum(), marginals.loc[total_col, tract_geoid])
assert (marginals.loc[cols, tract_geoid].sum() - marginals.loc[total_col, tract_geoid]).round(-1) == 0 | 28,207 |
def login_to_garmin_connect(args):
"""
Perform all HTTP requests to login to Garmin Connect.
"""
if python3:
username = args.username if args.username else input('Garmin Account Email: ')
else:
username = args.username if args.username else raw_input('Garmin Account Email: ')
password = args.password if args.password else getpass()
logging.debug("Login params: %s", urlencode(DATA))
# Initially, we need to get a valid session cookie, so we pull the login page.
print('Connecting to Garmin Connect...', end='')
logging.info('Connecting to %s', URL_GC_LOGIN)
connect_response = http_req_as_string(URL_GC_LOGIN)
# write_to_file('connect_response.html', connect_response, 'w')
for cookie in COOKIE_JAR:
logging.debug("Cookie %s : %s", cookie.name, cookie.value)
print(' Done.')
# Now we'll actually login.
# Fields that are passed in a typical Garmin login.
post_data = {
'username': username,
'password': password,
'embed': 'false',
'rememberme': 'on'
}
headers = {
'referer': URL_GC_LOGIN
}
print('Requesting Login ticket...', end='')
login_response = http_req_as_string(URL_GC_LOGIN + '#', post_data, headers)
for cookie in COOKIE_JAR:
logging.debug("Cookie %s : %s", cookie.name, cookie.value)
# write_to_file('login-response.html', login_response, 'w')
# extract the ticket from the login response
pattern = re.compile(r".*\?ticket=([-\w]+)\";.*", re.MULTILINE | re.DOTALL)
match = pattern.match(login_response)
if not match:
raise Exception('Couldn\'t find ticket in the login response. Cannot log in. '
'Did you enter the correct email and password?')
login_ticket = match.group(1)
print(' Done. Ticket=', login_ticket, sep='')
print("Authenticating...", end='')
logging.info('Authentication URL %s', URL_GC_POST_AUTH + 'ticket=' + login_ticket)
http_req(URL_GC_POST_AUTH + 'ticket=' + login_ticket)
print(' Done.') | 28,208 |
def parse_gt_from_anno(img_anno, classes):
"""parse_gt_from_anno"""
print('parse ground truth files...')
ground_truth = {}
for img_name, annos in img_anno.items():
objs = []
for anno in annos:
if anno[1] == 0. and anno[2] == 0. and anno[3] == 0. and anno[4] == 0.:
continue
if int(anno[0]) == -1:
continue
xmin = anno[1]
ymin = anno[2]
xmax = xmin + anno[3] - 1
ymax = ymin + anno[4] - 1
xmin = int(xmin)
ymin = int(ymin)
xmax = int(xmax)
ymax = int(ymax)
cls = classes[int(anno[0])]
gt_box = {'class': cls, 'box': [xmin, ymin, xmax, ymax]}
objs.append(gt_box)
ground_truth[img_name] = objs
return ground_truth | 28,209 |
def pt_to_tup(pt):
"""
Convenience method to generate a pair of two ints from a tuple or list.
Parameters
----------
pt : list OR tuple
Can be a list or a tuple of >=2 elements as floats or ints.
Returns
-------
pt : tuple of int
A pair of two ints.
"""
return (int(pt[0]),int(pt[1])); | 28,210 |
def item_vector():
"""
get item vectors
"""
args = parse_args()
# check argument
assert os.path.exists(
args.model_path), 'The model_path path does not exist.'
assert os.path.exists(
args.feature_dict), 'The feature_dict path does not exist.'
paddle.init(use_gpu=False, trainer_count=1)
with open(args.feature_dict) as f:
feature_dict = cPickle.load(f)
# load the trained model.
with gzip.open(args.model_path) as f:
parameters = paddle.parameters.Parameters.from_tar(f)
nid_dict = feature_dict['history_clicked_items']
nid_to_word = dict((v, k) for k, v in nid_dict.items())
nce_w = parameters.get("nce_w")
nce_b = parameters.get("nce_b")
item_vector = convt_simple_lsh(get_item_vec_from_softmax(nce_w, nce_b))
for i in range(0, len(item_vector)):
itemid = nid_to_word[i]
print itemid + "\t" + ",".join(map(str, item_vector[i])) | 28,211 |
def _operation(m1, m2, op, k):
"""Generalized function for basic"""
"""matrix operations"""
n = len(m1)
res = [n*[0] for i in range(n)]
if n == len(m2):
for i in range(n):
for j in range(n):
tab = {
"+" : m1[i][j]+m2[i][j],
"-" : m1[i][j]-m2[i][j],
"*s": m1[i][j]*k,
}
res[i][j] = tab[op]
return res | 28,212 |
def jacobi_d1(x, n, alpha, beta):
"""Evaluate the first derivative of Jacobi polynomial at x using eq. A.1.8
Args:
x: the location where the value will be evaluated
n: the order of Jacobi polynomial
alpha: the alpha parameter of Jacobi polynomial
beta: the beta parameter of Jacobi polynomial
Returns:
the first derivative of Jacobi polynomial at x
Raises:
None
"""
jacobi_check(n, alpha, beta)
if n == 0:
return 0.
else:
return 0.5 * (alpha + beta + n + 1) * \
jacobi_r(x, n - 1, alpha + 1, beta + 1) | 28,213 |
def gate_expand_1toN(U, N, target):
"""
Create a Qobj representing a one-qubit gate that act on a system with N
qubits.
Parameters
----------
U : Qobj
The one-qubit gate
N : integer
The number of qubits in the target space.
target : integer
The index of the target qubit.
Returns
-------
gate : qobj
Quantum object representation of N-qubit gate.
"""
if N < 1:
raise ValueError("integer N must be larger or equal to 1")
if target >= N:
raise ValueError("target must be integer < integer N")
return tensor([identity(2)] * (target) + [U] +
[identity(2)] * (N - target - 1)) | 28,214 |
def build_component_dependency_graph(
pipeline_definition: Dict[str, Any], component_definitions: Dict[str, Any]
) -> DiGraph:
"""
Builds a dependency graph between components. Dependencies are:
- referenced components during component build time (e.g. init params)
- predecessor components in the pipeline that produce the needed input
This enables sorting the components in a working and meaningful order for instantiation using topological sorting.
:param pipeline_definition: the definition of the pipeline (e.g. use get_pipeline_definition() to obtain it)
:param component_definitions: the definition of the pipeline components (e.g. use get_component_definitions() to obtain it)
"""
graph = DiGraph()
for component_name, component_definition in component_definitions.items():
params = component_definition.get("params", {})
referenced_components: List[str] = list()
for param_value in params.values():
# Currently we don't do any additional type validation here.
# See https://github.com/deepset-ai/haystack/pull/2253#discussion_r815951591.
if param_value in component_definitions:
referenced_components.append(param_value)
for referenced_component in referenced_components:
graph.add_edge(referenced_component, component_name)
for node in pipeline_definition["nodes"]:
node_name = node["name"]
graph.add_node(node_name)
for input in node["inputs"]:
if input in component_definitions:
# Special case for (actually permitted) cyclic dependencies between two components:
# e.g. DensePassageRetriever depends on ElasticsearchDocumentStore.
# In indexing pipelines ElasticsearchDocumentStore depends on DensePassageRetriever's output.
# But this second dependency is looser, so we neglect it.
if not graph.has_edge(node_name, input):
graph.add_edge(input, node_name)
return graph | 28,215 |
def count_distribution_artefacts(distribution_artefacts):
"""
Count distribution artefacts in nested list.
:param distribution_artefacts: Nested list containing distribution artefacts mapped to media packages and tenants
:type distribution_artefacts: dict
:return: Amount of distribution artefacts
:rtype: int
"""
return sum([sum([len(distribution_artefacts[tenant][media_package]) for media_package in
distribution_artefacts[tenant].keys()]) for tenant in distribution_artefacts.keys()]) | 28,216 |
def nelson_siegel_yield(tau, theta):
"""For details, see here.
Parameters
----------
tau : array, shape (n_,)
theta : array, shape (4,)
Returns
-------
y : array, shape (n_,)
"""
y = theta[0] - theta[1] * \
((1 - np.exp(-theta[3] * tau)) /
(theta[3] * tau)) + theta[2] * \
((1 - np.exp(-theta[3] * tau)) /
(theta[3] * tau) - np.exp(-theta[3] * tau))
return np.squeeze(y) | 28,217 |
def AskNoti():
"""[To replace the notification from one folder to the storage and send the email to the respective members.]
"""
C_Noti = input("Enter the path of the notification: ")
if not os.path.exists(C_Noti):
print("Path doesnot exists...")
return
T_Path = os.path.join(GetPath(), os.path.split(C_Noti)[1])
if not "Student" in T_Path:
sendPDFMail("A new notification has been added.", C_Noti, subject="A new notification", recievers=listMails)
else:
with open(os.path.join(T_Path, "CREs.txt"), "r") as f:
R = DC(f.read()).split("\n")[2].split(": ")[1].strip()
sendPDFMail("A new notification has been added.", C_Noti, subject="A new notification", recievers=[R])
os.replace(C_Noti, T_Path)
# ! sendMailNoti(T_path) | 28,218 |
def parsing(lst=None):
"""
Function for parsing command line
>>> parsing(["2020", "80", "90", "dataset"])
(2020, 80.0, 90.0, 'dataset')
"""
parser = argparse.ArgumentParser(description="""Module, which reads data from a file\
with a films list, determines films, \
made in the given year, and geolocation of their production places.
Then finds 10 or fewer such nearest to the given point places, makes markers \
for them, and creates a map with a layer of that markers.
Also, there is another layer, which contains markers\
of film shooting places in Ukraine.
You should enter the year of the films' production, the coordinates of the needed point,\
in comparison to which\
nearest films will be displayed (lat, lon), and the path to the dataset with your films.""")
parser.add_argument("year", metavar="Year", type=int, help="Year of films, which\
will be displayed.")
parser.add_argument("latitude", metavar="Latitude", type=float, \
help="Latitude of your point.")
parser.add_argument("longitude", metavar="Longitude", type=float,\
help="Longitude of your point.")
parser.add_argument("path", metavar="Path", help="Path to your dataset.")
if lst:
results = parser.parse_args(lst)
else:
results = parser.parse_args()
universal_message = ", please check your coordinates"
if not -90 <= results.latitude <= 90:
message = "%r not in range [-90, 90]" % (results.latitude,)
raise argparse.ArgumentTypeError(message + universal_message)
if not -90 <= results.longitude <= 90:
message = "%r not in range [-90, 90]" % (results.longitude,)
raise argparse.ArgumentTypeError(message + universal_message)
return results.year, results.latitude, results.longitude, results.path | 28,219 |
def main() -> None:
"""
Process raw data.
Delete blacklisted corrupted images. Trim a footer from each image
and resize it to 512 pixels on its shorter dimension. Write results
to "autofocus/data/processed/images". Reformat labels from CSV and
write to a new file "autofocus/data/processed/labels.csv".
"""
logging.info("Deleting known corrupted files")
for path in CORRUPTED_FILES:
path.unlink()
logging.info(f"Processing images and writing results to {PROCESSED_IMAGE_DIR}")
run_record = _process_images()
logging.info("Processing labels")
labels = _process_labels(run_record)
logging.info(f"Writing processed labels to {PROCESSED_LABELS_CSV_OUTPATH }")
labels.to_csv(PROCESSED_DIR / "labels.csv", index=False) | 28,220 |
def fix_filename(filename):
"""Replace illegal or problematic characters from a filename."""
return filename.translate(_filename_trans) | 28,221 |
def buildgnuf2py(mode, exflags, modulename, sourcefiles, includes=None, opt="O2", wraponly=None,
outdir="./", srcdir="./src/", tmpdir="./tmp/", env=None, verbose=True):
"""
Compiles a single PYD using the gfortran compiler
Parameters:
-----------
mode: str
either `release` or `debug`
exflags: str
permissible gfortran compiler flags, space delimited,
modulename: str
the name of the resulting pyd, e.g. `covasubs` -> covasubs.pyd
sourcefiles: list of str
the source code files in order of dependency
includes: list of str
files that get included in the compile / link step
opt: str
The optimization level, possible to omit the `-`
wraponly: list of str
Names of the functions found in the final fortran file in ``sourcefiles`` that should
be wrapped for python
outdir, srcdir, tmpdir : str
The path for output, sourcecode, tempfiles.
env: dict
A dictionary of environment variables to consider for the compiling process. Generated
with ``env = assert_environ("gnu")`` or ``env = assert_environ("C:/mingw/bin")``
verbose: bool
Write compiler output to the terminal
.. codeauthor:: Ryan Martin - 20-03-2018
"""
tmppath = tmpdir + '{}'.format(modulename) + "/"
ensure_path([outdir, tmpdir, tmppath])
# generate the source files
srcfiles = [os.path.join(srcdir, f) for f in sourcefiles]
f2pycall = ['f2py', '-c', "--f90flags='-fPIC'", '-m', modulename]
if mode == "debug":
f2pycall.append("--debug-capi")
if wraponly is not None:
f2pycall.extend(["only:"] + wraponly + [":"])
gfortbasecall = ["gfortran", "-fPIC", "-c", opt, '-J' + tmppath]
if exflags is not None:
exflagiter = exflags.split() if isinstance(exflags, str) else exflags
for arg in exflagiter:
if arg not in gfortbasecall:
gfortbasecall.append(arg)
objs2add = []
def localbuild(fl):
if '/' in fl:
flname = fl[fl.rfind('/') + 1:fl.rfind('.')]
else:
flname = fl[0:fl.rfind('.')]
localcall = copy.deepcopy(gfortbasecall)
objfl = tmppath + flname + ".o"
objs2add.append(objfl)
localcall.extend([fl, "-o", objfl])
_buildcall(localcall, env, verbose=verbose)
for fl in srcfiles[:-1]:
localbuild(fl)
f2pycall.extend(objs2add + ["-I"+tmppath])
if includes is not None:
if isinstance(includes, list):
f2pycall.extend(includes)
else:
f2pycall.append(includes) ## this should be a string
f2pycall.append(srcfiles[-1])
_buildcall(f2pycall, verbose=verbose) | 28,222 |
def test_epsilon_reduction_unit_recursion(interface: AltInterface, grammar: str):
"""
Testing sequence of grammar algorithms: epsilon rules removal, grammar reduction, unit rules removal and left
recursion removal. In each step checking for valid output type.
:param interface: `pytest fixture` returning :class:`~backend.python_interface.AltInterface` instance
:param grammar: path to XML file containing input grammar
"""
res = read_input(grammar)
for algorithm in [
AlgorithmTypes.GRAMMAR_EPSILON_REMOVAL,
AlgorithmTypes.GRAMMAR_REDUCTION,
AlgorithmTypes.GRAMMAR_UNIT_RULES_REMOVAL,
AlgorithmTypes.GRAMMAR_LEFT_RECURSION_REMOVAL
]:
res = interface.algorithms(res, algorithm)
assert res.endswith('</EpsilonFreeCFG>\n') | 28,223 |
def QDenseModel(weights_f, load_weights=False):
"""Construct QDenseModel."""
x = x_in = Input((RESHAPED,), name="input")
x = QActivation("quantized_relu(4)", name="act_i")(x)
x = QDense(N_HIDDEN, kernel_quantizer=ternary(),
bias_quantizer=quantized_bits(4, 0, 1), name="dense0")(x)
x = QActivation("quantized_relu(2)", name="act0")(x)
x = QDense(
NB_CLASSES,
kernel_quantizer=quantized_bits(4, 0, 1),
bias_quantizer=quantized_bits(4, 0, 1),
name="dense2")(
x)
x = Activation("softmax", name="softmax")(x)
model = Model(inputs=[x_in], outputs=[x])
model.summary()
model.compile(loss="categorical_crossentropy",
optimizer=OPTIMIZER, metrics=["accuracy"])
if load_weights and weights_f:
model.load_weights(weights_f)
print_qstats(model)
return model | 28,224 |
def _load_config():
"""Load the StreamAlert Athena configuration files
Returns:
dict: Configuration settings by file, includes two keys:
lambda, All lambda function settings
global, StreamAlert global settings
Raises:
ConfigError: For invalid or missing configuration files.
"""
config_files = ('lambda', 'global')
config = {}
for config_file in config_files:
config_file_path = 'conf/{}.json'.format(config_file)
if not os.path.exists(config_file_path):
raise ConfigError('The \'{}\' config file was not found'.format(
config_file_path))
with open(config_file_path) as config_fh:
try:
config[config_file] = json.load(config_fh)
except ValueError:
raise ConfigError('The \'{}\' config file is not valid JSON'.format(
config_file))
return config | 28,225 |
def fmt_uncertainty(x, dx, sn=None, sn_cutoff=8, unit=None):
"""Format uncertainty for latex."""
n_decimals = -int(np.floor(np.log10(np.abs(dx))))
leading_magnitude = np.abs(dx)/10**-n_decimals
if leading_magnitude <= 1.5:
n_decimals += 1
if sn is None:
if np.abs(x) >= 10**sn_cutoff or np.abs(x) <= 10**-sn_cutoff:
sn = True
else:
sn = False
if sn:
exponent = int(np.floor(np.log10(np.abs(x))))
x_mag = np.abs(x)/10**exponent
dx_mag = np.abs(dx)/10**exponent
else:
exponent = 0
x_mag = np.abs(x)
dx_mag = np.abs(dx)
x_round = np.round(x_mag, decimals=n_decimals + exponent)
dx_round = np.round(dx_mag, decimals=n_decimals + exponent)
if dx_round > 1.5:
x_str = str(int(x_round))
else:
x_str = str(x_round)
dx_str = str(dx_round)
if sn:
fmt_str = r'(%s \pm %s)\times {10}^{%s}' % (
x_str, dx_str, exponent
)
if x < 0:
fmt_str = f'-{fmt_str}'
else:
fmt_str = r'%s \pm %s' % (x_str, dx_str)
if x < 0:
fmt_str = f'-({fmt_str})'
if unit is not None:
if '(' not in fmt_str:
fmt_str = f'({fmt_str})'
fmt_str += r'\ \mathrm{%s}' % unit
fmt_str = f'${fmt_str}$'
return fmt_str | 28,226 |
def seed_test_input(clusters, limit):
"""
Select the seed inputs for fairness testing
:param clusters: the results of K-means clustering
:param limit: the size of seed inputs wanted
:return: a sequence of seed inputs
"""
i = 0
rows = []
max_size = max([len(c[0]) for c in clusters])
while i < max_size:
if len(rows) == limit:
break
for c in clusters:
if i >= len(c[0]):
continue
row = c[0][i]
rows.append(row)
if len(rows) == limit:
break
i += 1
return np.array(rows) | 28,227 |
def construct_validation_report(group_id, root_dir):
"""
Load data and perform calculations to check how close the approximated
model results conform with NEMDE solutions
"""
# Load results and convert to JSON
results = parse_validation_results(group_id=group_id)
# Construct directory where results are to be saved
output_dir = os.path.join(root_dir, group_id)
os.makedirs(output_dir, exist_ok=True)
# Save basis results
solution_filename_map = [
('TraderSolution', 'traders.csv'),
('InterconnectorSolution', 'interconnectors.csv'),
('RegionSolution', 'regions.csv'),
('ConstraintSolution', 'constraints.csv'),
('PeriodSolution', 'periods.csv')
]
for (key, filename) in solution_filename_map:
path = os.path.join(output_dir, filename)
save_basis_results(results=results, key=key, filename=path)
# Zip validation results and save to disk
shutil.make_archive(base_name=output_dir, format='zip', root_dir=output_dir) | 28,228 |
def calc_senescence_water_shading(
aglivc, bgwfunc, fsdeth_1, fsdeth_3, fsdeth_4):
"""Calculate shoot death due to water stress and shading.
In months where senescence is not scheduled to occur, some shoot death
may still occur due to water stress and shading.
Parameters:
aglivc (numpy.ndarray): state variable, carbon in aboveground live
biomass
bgwfunc (numpy.ndarray): derived, effect of soil moisture on
decomposition and shoot senescence
fsdeth_1 (numpy.ndarray): parameter, maximum shoot death rate at very
dry soil conditions
fsdeth_3 (numpy.ndarray): parameter, additional fraction of shoots
which die when aglivc is greater than fsdeth_4
fsdeth_4 (numpy.ndarray): parameter, threshold value for aglivc
above which shading increases senescence
Returns:
fdeth, fraction of aboveground live biomass that is converted to
standing dead
"""
valid_mask = (
(~numpy.isclose(aglivc, _SV_NODATA)) &
(bgwfunc != _TARGET_NODATA) &
(fsdeth_1 != _IC_NODATA) &
(fsdeth_3 != _IC_NODATA) &
(fsdeth_4 != _IC_NODATA))
fdeth = numpy.empty(aglivc.shape, dtype=numpy.float32)
fdeth[:] = _TARGET_NODATA
fdeth[valid_mask] = fsdeth_1[valid_mask] * (1. - bgwfunc[valid_mask])
shading_mask = ((aglivc > fsdeth_4) & valid_mask)
fdeth[shading_mask] = fdeth[shading_mask] + fsdeth_3[shading_mask]
fdeth[valid_mask] = numpy.minimum(fdeth[valid_mask], 1.)
return fdeth | 28,229 |
def hello(friend_name: float = None) -> str:
"""Function to greet the user, takes a string and return Hello, 'string'"""
if not isinstance(friend_name, str):
raise TypeError("this function expects a string as input")
return f'Hello, {friend_name}!' | 28,230 |
def flatten(lst):
"""Shallow flatten *lst*"""
return [a for b in lst for a in b] | 28,231 |
def _transform_org_units(metadata: dict) -> pd.DataFrame:
"""Transform org units metadata into a formatted DataFrame."""
df = pd.DataFrame.from_dict(metadata.get("organisationUnits"))
df = df[["id", "code", "shortName", "name", "path", "geometry"]]
df.columns = ["ou_uid", "ou_code", "ou_shortname", "ou_name", "path", "geometry"]
df["ou_level"] = df.path.apply(lambda x: x.count("/"))
df = df[
["ou_uid", "ou_code", "ou_shortname", "ou_name", "ou_level", "path", "geometry"]
] # Reorder columns
return df | 28,232 |
def post_process(done_exec, temp_file):
"""For renaissance, `temp_file` is a path to a CSV file into which the
results were written. For other suites, it is `None`."""
if done_exec.suite == "renaissance":
assert temp_file is not None
return post_process_renaissance(done_exec, temp_file)
elif done_exec.suite == "dacapo":
assert temp_file is None
return post_process_dacapo(done_exec)
elif done_exec.suite == "specjvm":
assert temp_file is None
return post_process_specjvm(done_exec)
else:
raise ValueError("unknown suite %s" % done_exec.suite) | 28,233 |
def freeze(regex_frozen_weights):
"""Creates an optimizer that set learning rate to 0. for some weights.
Args:
regex_frozen_weights: The regex that matches the (flatten) parameters
that should not be optimized.
Returns:
A chainable optimizer.
"""
return scale_selected_parameters(regex_frozen_weights, multiplier=0.) | 28,234 |
def white_corners(cube_obj):
"""
Outputs the moves list for solving the down face corner cubies after
solving for the white cross
"""
# search for cubies and move into place
# corner DFR
cstate = cube_obj.cb
cubies = cube_obj.cubies
if is_white_corners(cstate):
print("white corners completed")
print(cube_obj)
return
# print("DFR")
# print(cube_obj)
if not ((cstate[29] == Cl.D) and (cstate[26] == Cl.F) and (cstate[15] == Cl.R)):
loc = find_corner(cubies, tuple(sorted([Cl.D, Cl.F, Cl.R])))
# print(loc)
if loc is None:
print("can't find corner (%s, %s, %s)"
% tuple(sorted([Cl.D, Cl.F, Cl.R])))
else:
move_list = move_DFR(cube_obj, loc)
execute_moves(cube_obj, move_list)
# corner DLF
cstate = cube_obj.cb
cubies = cube_obj.cubies
if is_white_corners(cstate):
print("white corners completed")
print(cube_obj)
return
# print("DLF")
# print(cube_obj)
if not ((cstate[27] == Cl.D) and (cstate[44] == Cl.L) and (cstate[24] == Cl.F)):
loc = find_corner(cubies, tuple(sorted([Cl.D, Cl.L, Cl.F])))
# print(loc)
if loc is None:
print("can't find corner (%s, %s, %s)"
% tuple(sorted([Cl.D, Cl.L, Cl.F])))
else:
move_list = move_DLF(cube_obj, loc)
execute_moves(cube_obj, move_list)
# corner DBL
cstate = cube_obj.cb
cubies = cube_obj.cubies
if is_white_corners(cstate):
print("white corners completed")
print(cube_obj)
return
# print("DBL")
# print(cube_obj)
if not ((cstate[33] == Cl.D) and (cstate[53] == Cl.B) and (cstate[42] == Cl.L)):
loc = find_corner(cubies, tuple(sorted([Cl.D, Cl.B, Cl.L])))
# print(loc)
if loc is None:
print("can't find corner (%s, %s, %s)"
% tuple(sorted([Cl.D, Cl.B, Cl.L])))
else:
move_list = move_DBL(cube_obj, loc)
execute_moves(cube_obj, move_list)
# corner DRB
cstate = cube_obj.cb
cubies = cube_obj.cubies
if is_white_corners(cstate):
print("white corners completed")
print(cube_obj)
return
# print("DRB")
# print(cube_obj)
if not ((cstate[35] == Cl.D) and (cstate[17] == Cl.R) and (cstate[51] == Cl.B)):
loc = find_corner(cubies, tuple(sorted([Cl.D, Cl.R, Cl.B])))
# print(loc)
if loc is None:
print("can't find corner (%s, %s, %s)"
% tuple(sorted([Cl.D, Cl.R, Cl.B])))
else:
move_list = move_DRB(cube_obj, loc)
execute_moves(cube_obj, move_list)
try:
assert(is_white_corners(cube_obj.cb))
print("white corners completed")
print(cube_obj)
except AssertionError:
print("did not successfully reach white corners state\n") | 28,235 |
def test_check_classic_valid_without_preprocessor(points):
"""Test that valid inputs when using no preprocessor raises no warning"""
with pytest.warns(None) as record:
check_input(points, type_of_inputs='classic', preprocessor=None)
assert len(record) == 0 | 28,236 |
def print_line_and_file_at_callsite(indirection_number):
"""
Prints the line and filename.
If 1 is passed it prints at function call site
If 2 is passed it prints at callsite of a function
which called the function which contained this
Purpose:
When you use VSCode it should jump you to the line from clicking on cmd
"""
caller_frame_record = inspect.stack()[indirection_number]
frame = caller_frame_record[0]
info = inspect.getframeinfo(frame)
filename = info.filename
filename = filename.replace("/home/indy/", "")
print(f"-------------------------------------------------------------------\n")
print(f"FILE: {filename}:{info.lineno}")
print(f"FUNCTION: {info.function}\n")
print(f"-------------------------------------------------------------------") | 28,237 |
def runner_setup(loop) -> None:
"""Adds exit signals to the given asyncio loop on which the app exits gracefully"""
signals = (
signal.SIGHUP,
signal.SIGTERM,
signal.SIGINT,
signal.SIGQUIT
)
for s in signals:
loop.add_signal_handler(s, raise_graceful_exit) | 28,238 |
def test_event_loop_fixture(event_loop):
"""Test the injection of the event_loop fixture."""
assert event_loop
ret = event_loop.run_until_complete(async_coro(event_loop))
assert ret == 'ok' | 28,239 |
def store_sourceip_telemetry():
"""
route to handle inbound telemetry from daemonset ip collector agents
"""
# load up some context so we can append to it
context = SourceIpTelemetry.get_instance()
if context.data:
telemetry = context.get()
else:
telemetry = {}
payload = request.get_json(force=True)
log.debug(f'request_data = {payload}')
# setup dict key of hostip with value of external ip
hostname = payload['ipaddr']['host']
ipaddr = payload['ipaddr']['external']
if not (hostname and ipaddr):
log.error('ipaddr.host or ipaddr.external does not contain a valid value')
return make_response(jsonify({'error': 'ipaddr.host or ipaddr.external seems to be missing in payload'}), 400)
if not telemetry.get(hostname):
log.info(f'new host saved in memory host={hostname} ipaddr={ipaddr}')
telemetry[hostname] = ipaddr
log.debug(f'context.set telemetry={telemetry}')
context.set(telemetry)
return 'OK' | 28,240 |
def test_html_whitelist_h2_h3_h4_h5_h6():
"""hN elements represent headings for their sections in ranked order."""
check_html_output_contains_text("""
<h2>Second level</h2>
<h3>Third level</h3>
<h2>Also second-level</h2>
<h3>Third level</h3>
<h4>Fourth level</h4>
<h5>Fifth level</h5>
<h6>Bottom level</h6>
<h4>Also fourth-level</h4>
<h5>Also fifth level</h5>
""") | 28,241 |
def sep_num(number, space=True):
"""
Creates a string representation of a number with separators each thousand. If space is True, then it uses spaces for
the separator otherwise it will use commas
Note
----
Source: https://stackoverflow.com/questions/16670125/python-format-string-thousand-separator-with-spaces
:param number: A number
:type number: int | float
:param space: Separates numbers with spaces if True, else with commas
:type space: bool
:return: string representation with space separation
:rtype: str
"""
if space:
return '{:,}'.format(number).replace(',', ' ')
else:
return '{:,}'.format(number) | 28,242 |
def raw_smooth_l1_loss(diff, delta=1.0, max_val=10.0):
"""
Creates smooth L1 loss. The regular version is sometimes unstable so here what we do is if the difference
is > some value, we will return the log instead.
So it's then
0.5 * x^2 if |x| <= d
0.5 * d^2 + d * (|x| - d) if max_val > |x| > d
0.5 * d^2 + d * (max_val - d) + d*log(1.0+|x|-max_val) if |x| > max_val
:param diff:
:param delta:
:param max_val: It turns into log after here
:return:
"""
abs_diff = tf.abs(diff)
huber_loss = tf.where(
tf.math.less(abs_diff, delta),
0.5 * tf.square(diff),
0.5 * (delta ** 2) + delta * (abs_diff - delta),
)
huber_loss_capped = tf.where(
tf.math.less(abs_diff, max_val),
huber_loss,
0.5 * (delta ** 2) + delta * (max_val - delta) + delta * tf.math.log1p(tf.math.abs(abs_diff - max_val))
)
return huber_loss_capped | 28,243 |
def model_f2_statistics(mode_path, val_index=1, save_dir=None, save_file=None):
"""
对model目录下的所有包含"evaluate"字段的文件进行统计,分别得到all-label、one-label统计
:param mode_path: 需要统计的目录
:param save_file: 输入文件
:return:
"""
evaluate_files = []
for root, dirs, files in os.walk(mode_path):
for file in files:
if ("val%d" % val_index) not in root:
continue
if "evaluate" in file and "evaluate_revise" not in file:
evaluate_files.append(os.path.join(root, file))
all_label = {}
one_label = {}
label_f2_threshold = [{} for i in range(13)]
for file in evaluate_files:
with open(file, "r") as f:
weight_file = ""
for i in f.readlines():
if "Weight" in i:
# 不同人训练出来的模型中,weight_file的根路径不同,此处进行一个转换
weight_file = os.path.join(path.root_path,
pathlib.Path(re.match(r"Weight:.*competition[\\/]*(.*)", i).group(1)))
if "Greedy F2-Score is:" in i:
if weight_file == "":
print("file %s is abnormal" % file)
greedy_f2 = i.split(":")[-1].strip()
all_label[weight_file] = float(greedy_f2)
if "[label" in i:
if weight_file == "":
print("file %s is abnormal" % file)
label = re.match(r".*label *([0-9]*)", i).group(1)
greedy_f2 = re.match(r".*greedy-f2=(.*)\[", i).group(1)
threshold = re.match(r".*greedy-f2=.*\[(.*)\]", i).group(1)
if one_label.get(int(label), None) is None:
one_label[int(label)] = {}
one_label[int(label)][weight_file] = float(greedy_f2)
label_f2_threshold[int(label)][weight_file] = float(threshold)
all_label = sorted(all_label.items(), key=lambda x: x[1], reverse=True)
for i in range(len(one_label)):
one_label[i] = sorted(one_label[i].items(), key=lambda x: x[1], reverse=True)
if save_file is not None:
with open(os.path.join(save_dir, save_file), "w") as f:
f.write("==========================All label==========================\n")
for i in all_label:
f.write("%f: %s\n" % (i[1], i[0]))
for i in range(len(one_label)):
f.write("\n\n\n\n\n==========================One label: %d==========================\n" % i)
for j in one_label[i]:
f.write("%f: %s\n" % (j[1], j[0]))
return all_label, one_label, label_f2_threshold | 28,244 |
def read_parameters(request, view_kwargs):
"""
:param request: HttpRequest with attached api_info
:type request: HttpRequest
:type view_kwargs: dict[str, object]
:rtype: dict[str, object]
"""
params = {}
errors = {}
for param in request.api_info.operation.parameters:
try:
value = get_parameter_value(request, view_kwargs, param)
except KeyError:
if 'default' in param:
params[param['name']] = param['default']
continue
if param.get('required'): # Required but missing
errors[param['name']] = MissingParameter('parameter %s is required but missing' % param['name'])
continue
try:
params[param['name']] = cast_parameter_value(request.api_info, param, value)
except NotImplementedError:
raise
except Exception as e:
errors[param['name']] = e
if errors:
raise ErroneousParameters(errors, params)
return params | 28,245 |
def box_off(ax):
"""
similar to Matlab's box off
"""
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left() | 28,246 |
def lookup_batch():
"""Look up parameters.
The mini-batched version of lookup. The resulting expression will be a mini-batch of parameters, where the "i"th element of the batch corresponds to the parameters at the position specified by the "i"th element of "indices"
Args:
p(LookupParameters): Lookup parameter to pick from
indices(list(int)): Indices to look up for each batch element
Keyword Args:
update(bool): Whether to update the lookup parameter (default: (True))
Returns:
_lookupBatchExpression: Expression for the batched embeddings
"""
pass | 28,247 |
def handler(body, archiveFile=None):
""" Execute the SED-ML files in a COMBINE/OMEX archive.
Args:
body (:obj:`dict`): dictionary with schema ``SimulationRun`` with the
specifications of the COMBINE/OMEX archive to execute and the simulator to execute it with
archiveFile (:obj:`werkzeug.datastructures.FileStorage`, optional): COMBINE/OMEX file
Returns:
:obj:`werkzeug.wrappers.response.Response`: response with the results and log of the run in the
``SimulationRunResults`` schema
"""
archive_file = archiveFile
archive_url = body.get('archiveUrl', None)
simulator_id = body['simulator']
env_vars = body.get('environment', {}).get('variables', [])
# set up environment (i.e. options)
env = {}
for env_var in env_vars:
key = env_var['key']
if key not in IGNORED_ENV_VARS:
env[key] = env_var['value']
if 'REPORT_FORMATS' not in env:
env['REPORT_FORMATS'] = 'h5'
with mock.patch.dict('os.environ', env):
config = get_config()
# process requested return type
accept = connexion.request.headers.get('Accept', 'application/json')
if accept in ['application/json']:
config.COLLECT_COMBINE_ARCHIVE_RESULTS = True
config.COLLECT_SED_DOCUMENT_RESULTS = True
config.REPORT_FORMATS = []
config.VIZ_FORMATS = []
config.BUNDLE_OUTPUTS = False
config.KEEP_INDIVIDUAL_OUTPUTS = True
config.LOG_PATH = ''
return_type = 'json'
elif accept in ['application/x-hdf', 'application/x-hdf5']:
config.COLLECT_COMBINE_ARCHIVE_RESULTS = False
config.COLLECT_SED_DOCUMENT_RESULTS = False
config.REPORT_FORMATS = [
ReportFormat[format.strip().lower()]
for format in env.get('REPORT_FORMATS', 'h5').split(',')
]
config.VIZ_FORMATS = []
config.BUNDLE_OUTPUTS = False
config.KEEP_INDIVIDUAL_OUTPUTS = True
config.LOG_PATH = ''
return_type = 'h5'
elif accept in ['application/zip']:
config.COLLECT_COMBINE_ARCHIVE_RESULTS = False
config.COLLECT_SED_DOCUMENT_RESULTS = False
config.REPORT_FORMATS = [
ReportFormat[format.strip().lower()]
for format in env.get('REPORT_FORMATS', 'h5').split(',')
]
config.VIZ_FORMATS = [
VizFormat[format.strip().lower()]
for format in env.get('VIZ_FORMATS', 'pdf').split(',')
]
config.BUNDLE_OUTPUTS = False
config.KEEP_INDIVIDUAL_OUTPUTS = True
return_type = 'zip'
else:
raise BadRequestException(
title='`Accept` header must be one of `application/hdf5`, `application/json`, or `application/zip`.',
instance=NotImplementedError(),
)
# get the COMBINE/OMEX archive
if archive_file and archive_url:
raise BadRequestException(
title='Only one of `archiveFile` or `archiveUrl` can be used at a time.',
instance=ValueError(),
)
# get COMBINE/OMEX archive
archive_filename = get_temp_file(suffix='.omex')
if archive_file:
archive_file.save(archive_filename)
else:
try:
response = requests.get(archive_url)
response.raise_for_status()
except requests.exceptions.RequestException as exception:
title = 'COMBINE/OMEX archive could not be loaded from `{}`'.format(
archive_url)
raise BadRequestException(
title=title,
instance=exception,
)
# save archive to local temporary file
with open(archive_filename, 'wb') as file:
file.write(response.content)
# get the simulator
simulator = next((simulator for simulator in get_simulators() if simulator['id'] == simulator_id), None)
if simulator is None:
raise BadRequestException(
title='`{}` is not a BioSimulators id of a simulation tool that is available for execution.'.format(simulator_id),
instance=ValueError(),
)
# execute the simulation
out_dir = get_temp_dir()
with mock.patch.dict('os.environ', env):
results, log = exec_in_subprocess(use_simulator_api_to_exec_sedml_docs_in_combine_archive,
simulator['api']['module'],
archive_filename, out_dir,
timeout=TIMEOUT,
config=config)
# transform the results
if return_type == 'json':
archive_dirname = get_temp_dir()
with zipfile.ZipFile(archive_filename, 'r') as zip_file:
zip_file.extractall(archive_dirname)
outputs = []
for sed_doc_location, sed_doc_outputs_results in (results or {}).items():
sed_doc = SedmlSimulationReader().run(os.path.join(archive_dirname, sed_doc_location))
for output in sed_doc.outputs:
if output.id not in sed_doc_outputs_results:
continue
output_results = sed_doc_outputs_results.get(output.id, None)
if isinstance(output, Report):
type = 'SedReport'
report = output
elif isinstance(output, Plot2D):
type = 'SedPlot2D'
report = get_report_for_plot2d(output)
elif isinstance(output, Plot3D):
type = 'SedPlot3D'
report = get_report_for_plot3d(output)
else: # pragma: no cover #
raise NotImplementedError('Outputs of type `{}` are not supported.'.format(output.__class__.__name__))
data = []
for data_set in report.data_sets:
if data_set.id not in output_results:
continue
data_set_results = output_results[data_set.id]
data.append({
'_type': 'SimulationRunOutputDatum',
'id': data_set.id,
'label': data_set.label,
'name': data_set.name,
'shape': '' if data_set_results is None else ','.join(str(dim_len) for dim_len in data_set_results.shape),
'type': '__None__' if data_set_results is None else data_set_results.dtype.name,
'values': None if data_set_results is None else data_set_results.tolist(),
})
outputs.append({
'_type': 'SimulationRunOutput',
'outputId': sed_doc_location + '/' + output.id,
'name': output.name,
'type': type,
'data': data,
})
# return
return {
'_type': 'SimulationRunResults',
'outputs': outputs,
'log': log,
}
elif return_type == 'h5':
h5_filename = os.path.join(out_dir, get_config().H5_REPORTS_PATH)
return flask.send_file(h5_filename,
mimetype=accept,
as_attachment=True,
attachment_filename='outputs.h5')
else:
zip_filename = get_temp_file()
with zipfile.ZipFile(zip_filename, 'w') as zip_file:
for root, dirs, files in os.walk(out_dir):
for file in files:
zip_file.write(os.path.join(root, file),
os.path.relpath(os.path.join(root, file), out_dir))
return flask.send_file(zip_filename,
mimetype=accept,
as_attachment=True,
attachment_filename='outputs.zip') | 28,248 |
def test_plot_html():
"""xfield and yfield defined at plot level"""
generate_html("level_plot.yml") | 28,249 |
def get_passwd():
"""Prompt user for a password
Prompts user to enter and confirm a password. Raises an exception
if the password is deemed to be invalid (e.g. too short), or if
the password confirmation fails.
Returns:
Password string entered by the user.
"""
passwd = getpass.getpass("Enter password for new user: ")
if not validate_password(passwd):
raise Exception("Invalid password: must be 6 or more characters")
passwd2 = getpass.getpass("Confirm password: ")
if passwd2 != passwd:
raise Exception("Passwords don't match")
return passwd | 28,250 |
def _render_footnote_block_open(self, tokens, idx, options, env):
"""Render the footnote opening without the hr tag at the start."""
html = mdit_py_plugins.footnote.index.render_footnote_block_open(
self, tokens, idx, options, env
)
lines = html.split("\n")
if lines[0].strip().startswith("<hr"):
lines = lines[1:]
return "\n".join(lines) | 28,251 |
def project_remove(flox: Flox, feature: str):
"""Remove plugin features from active project"""
if feature not in flox.meta.features:
raise PluginException(
f"Plugin {feature} is not enabled for '{flox.meta.name}' project",
extra=f"You can list installed plugins with `flox info`"
)
flox.meta.features.remove(feature)
flox.meta._save()
info_box(
message=f"'{feature}' integration has been removed from current project",
extra="Please note that flox only disabled any future plugin actions, it's your responsibility "
"to remove / modify any relevant code from your project."
) | 28,252 |
def test_matcher_pipe_with_matches_and_context(nlp: Language) -> None:
"""It returns a stream of Doc objects and matches and context as tuples."""
warnings.filterwarnings("ignore")
doc_stream = (
(nlp("test doc 1: Corvold"), "Jund"),
(nlp("test doc 2: Prosh"), "Jund"),
)
matcher = TokenMatcher(nlp.vocab)
matcher.add(
"DRAGON",
[[{"TEXT": {"FUZZY": "Korvold"}}], [{"TEXT": {"FUZZY": "Prossh"}}]],
)
output = matcher.pipe(doc_stream, return_matches=True, as_tuples=True)
matches = [(entry[0][1], entry[1]) for entry in output]
assert matches == [
([("DRAGON", 4, 5, None)], "Jund"),
([("DRAGON", 4, 5, None)], "Jund"),
] | 28,253 |
def remove_shot_from_scene(scene, shot, client=default):
"""
Remove link between a shot and a scene.
"""
scene = normalize_model_parameter(scene)
shot = normalize_model_parameter(shot)
return raw.delete(
"data/scenes/%s/shots/%s" % (scene["id"], shot["id"]),
client=client
) | 28,254 |
def all_results_failed(subsystems):
"""Check if all results have failed status"""
for subsystem in subsystems.values():
if subsystem['subsystemStatus'] == 'OK':
# Found non-failed subsystem
return False
# All results failed
return True | 28,255 |
def get_scene_id(current_scene_db):
"""gets the scene ID from the scene db"""
from src.praxxis.sqlite import connection
conn = connection.create_connection(current_scene_db)
cur = conn.cursor()
get_scene_id = 'SELECT ID FROM "SceneMetadata"'
cur.execute(get_scene_id)
id = str(cur.fetchone()[0])
conn.close()
return id | 28,256 |
def compile_playable_podcast1(playable_podcast1):
"""
@para: list containing dict of key/values pairs for playable podcasts
"""
items = []
for podcast in playable_podcast1:
items.append({
'label': podcast['title'],
'thumbnail': podcast['thumbnail'],
'path': podcast['url'],
# 'info': podcast['desc'],
'is_playable': True,
})
return items | 28,257 |
def mouseclick(pos):
"""
Define "mouse click" event handler; implements game
"state" logic. It receives a parameter; pair of screen
coordinates, i.e. a tuple of two non-negative integers
- the position of the mouse click.
"""
# User clicks on a "card" of the "deck" (grid of
# evenly distributed cells - cards placeholders).
# Compute the index of this "card", i.e. determine
# which card have been clicked on with the mouse.
# Recall that the sequence of cards entirely fills
# the "canvas".
clicked_card_index = int(math.floor(float(pos[0]) / CARD_PLACEHOLDER_WIDTH))
# If user clicks on a card already "exposed"; ignore
# event and "return" function immediately.
if deck_of_cards_exposed[clicked_card_index]:
return None
# The counter of "turns" playing the game will be
# updated as a global variable.
global turn
# The following block implements the game logic for
# selecting two "cards" and determining if they match.
# State 0 corresponds to the start of the game.
# In state 0, if you click on a card, that card is
# exposed, and you switch to state 1.
# State 1 corresponds to a single exposed unpaired
# card.
# In state 1, if you click on an unexposed card, that
# card is exposed and you switch to state 2.
# State 2 corresponds to the end of a turn.
# In state 2, if you click on an unexposed card, that
# card is exposed and you switch to state 1.
global state
if state == 0:
# Set the "status" of the clicked "card"
# as "exposed".
deck_of_cards_exposed[clicked_card_index] = True
# Store the "index" of the "exposed" card.
# This is the first card "exposed" in this "turn"
# of the game.
index_of_cards_exposed_in_a_turn[0] = clicked_card_index
# Update "turn" counter; incremented after the
# first "card" is flipped during a turn.
turn += 1
label.set_text("Turns = " + str(turn))
# Switch to the next game "state".
state = 1
elif state == 1:
# Set the "status" of the clicked "card"
# as "exposed".
deck_of_cards_exposed[clicked_card_index] = True
# Store the "index" of the "exposed" card.
# This is the second card "exposed" in this "turn"
# of the game.
index_of_cards_exposed_in_a_turn[1] = clicked_card_index
# Switch to the next game "state".
state = 2
else:
# Set the "status" of the clicked "card"
# as "exposed".
deck_of_cards_exposed[clicked_card_index] = True
# Get the value of the cards exposed in the previous
# "turn" of the game (taking advantage of the
# "indexes" stored). Then determine if the previous
# two "exposed" cards are paired or unpaired.
# If unpaired then switch the "status" of these
# cards back to "unexposed"; i.e. flip them back
# over so that they are hidden before moving to
# state 1.
if deck_of_cards[index_of_cards_exposed_in_a_turn[0]] != deck_of_cards[index_of_cards_exposed_in_a_turn[1]]:
deck_of_cards_exposed[index_of_cards_exposed_in_a_turn[0]] = False
deck_of_cards_exposed[index_of_cards_exposed_in_a_turn[1]] = False
# Store the "index" of the "exposed" card.
# This is the first card "exposed" in this "turn"
# of the game, i.e. replace the "index" of the
# first card "exposed" in the previous "turn" of
# the game.
index_of_cards_exposed_in_a_turn[0] = clicked_card_index
# Update "turn" counter; incremented after the
# first "card" is flipped during a turn.
turn += 1
label.set_text("Turns = " + str(turn))
# Switch to the next game "state".
state = 1
return None | 28,258 |
def __get_ip_udp_int_pkt(ip_pkt):
"""
Retrieves the INT UDP packet
:param ip_pkt:
:return:
"""
logger.info('Obtaining INT data')
udp_pkt = UDP(_pkt=ip_pkt.payload)
logger.debug('UDP packet dport - [%s]', udp_pkt.dport)
if udp_pkt.dport == trans_sec.consts.UDP_INT_DST_PORT:
logger.debug('Packet is of type INT, returning UDP packet object')
return ip_pkt, udp_pkt
elif udp_pkt.dport == trans_sec.consts.UDP_TRPT_DST_PORT:
logger.debug('Packet is of type Telemetry Report')
trpt_pkt = TelemetryReport(_pkt=udp_pkt.payload)
trpt_eth = EthInt(_pkt=trpt_pkt.payload)
logger.debug('trpt_eth type - [%s]', trpt_eth.type)
if trpt_eth.type == trans_sec.consts.IPV4_TYPE:
trpt_ip_pkt = IP(_pkt=trpt_eth.payload)
logger.debug('IPv4 src - [%s], dst - [%s]',
trpt_ip_pkt.src, trpt_ip_pkt.dst)
elif trpt_eth.type == trans_sec.consts.IPV6_TYPE:
trpt_ip_pkt = IPv6(_pkt=trpt_eth.payload)
logger.debug('IPv6 src - [%s], dst - [%s]',
trpt_ip_pkt.src, trpt_ip_pkt.dst)
else:
raise Exception('Invalid eth type - [{}]'.format(trpt_eth.type))
return trpt_ip_pkt, UDP(_pkt=trpt_ip_pkt.payload)
else:
logger.warn('Invalid INT packet received with dport - [%s]',
udp_pkt.dport)
return None, None | 28,259 |
def fixture_version_obj(bundle_data: dict, store: Store) -> models.Version:
"""Return a version object"""
return store.add_bundle(bundle_data)[1] | 28,260 |
def redirect_log(job, filename="run.log", formatter=None, logger=None):
"""Redirect all messages logged via the logging interface to the given file.
This method is a context manager. The logging handler is removed when
exiting the context.
Parameters
----------
job : :class:`signac.contrib.job.Job`
The signac job whose workspace will store the redirected logs.
filename : str
File name of the log. (Default value = "run.log")
formatter : :class:`logging.Formatter`
The logging formatter to use, uses a default formatter if None.
(Default value = None)
logger : :class:`logging.Logger`
The instance of logger to which the new file log handler is added.
Defaults to the default logger returned by :meth:`logging.getLogger` if
this argument is not provided.
"""
if formatter is None:
formatter = logging.Formatter(
"%(asctime)s %(name)-12s %(levelname)-8s %(message)s"
)
if logger is None:
logger = logging.getLogger()
filehandler = logging.FileHandler(filename=job.fn("run.log"))
filehandler.setFormatter(formatter)
logger.addHandler(filehandler)
try:
yield
finally:
logger.removeHandler(filehandler) | 28,261 |
def ranges_compute(x, n_bins):
"""Computation of the ranges (borders of the bins).
Parameters
----------
x: pd.DataFrame
the data variable we want to obtain its distribution.
n_bins: int
the number of bins we want to use to plot the distribution.
Returns
-------
ranges: np.ndarray
the borders of the bins.
"""
mini = np.nanmin(np.array(x))
maxi = np.nanmax(np.array(x))
ranges = np.linspace(mini, maxi, n_bins+1)
return ranges | 28,262 |
def creation(basis_size: int, state_index: int) -> spr.csc_matrix:
"""
Generates the matrix of the fermionic creation operator for a given single particle state
:param basis_size: The total number of states in the single particle basis
:param state_index: The index of the state to be created by the operator
:return: The matrix of the many-body creation operator (2^basis_size x 2^basis_size sparse matrix)
"""
many_particle_basis_size = 2**basis_size
temp_matrix = spr.dok_matrix((many_particle_basis_size, many_particle_basis_size))
single_particle_state_mask = 1 << (state_index-1)
for state_to_act_on in range(many_particle_basis_size):
if ~state_to_act_on & single_particle_state_mask:
temp_matrix[state_to_act_on | single_particle_state_mask, state_to_act_on] = (
_anticommutation_factor(state_to_act_on, basis_size, state_index)
)
return temp_matrix.tocsc() | 28,263 |
def save_vis_performance_dfg(dfg: dict, start_activities: dict, end_activities: dict, file_path: str,
aggregation_measure="mean"):
"""
Saves the visualization of a performance DFG
Parameters
----------------
dfg
DFG object
start_activities
Start activities
end_activities
End activities
file_path
Destination path
aggregation_measure
Aggregation measure (default: mean): mean, median, min, max, sum, stdev
"""
format = os.path.splitext(file_path)[1][1:]
from pm4py.visualization.dfg import visualizer as dfg_visualizer
from pm4py.visualization.dfg.variants import performance as dfg_perf_visualizer
dfg_parameters = dfg_perf_visualizer.Parameters
parameters = {}
parameters[dfg_parameters.FORMAT] = format
parameters[dfg_parameters.START_ACTIVITIES] = start_activities
parameters[dfg_parameters.END_ACTIVITIES] = end_activities
parameters[dfg_parameters.AGGREGATION_MEASURE] = aggregation_measure
gviz = dfg_perf_visualizer.apply(dfg, parameters=parameters)
dfg_visualizer.save(gviz, file_path) | 28,264 |
def test_dont_merge_station_epochs():
"""
Stations might have epochs with different information - don't merge these
then.
"""
filename = os.path.join(data_dir, "multi_station_epoch.xml")
inv = obspy.read_inventory(filename)
assert len(inv.get_contents()["stations"]) == 3
assert len(inv.get_contents()["channels"]) == 9
# Also contains some information from another namespace.
assert inv[0][0].extra == {
"something": {"namespace": "https://example.com", "value": "test"}
}
# All of this should survive the merge and isolate operation.
inv2 = isolate_and_merge_station(inv, network_id="XX", station_id="YYY")
assert len(inv2.get_contents()["stations"]) == 3
assert len(inv2.get_contents()["channels"]) == 9
# Also contains some information from another namespace.
assert inv2[0][0].extra == {
"something": {"namespace": "https://example.com", "value": "test"}
}
# In this case nothing actually changed.
assert inv == inv2 | 28,265 |
def LowercaseMutator(current, value):
"""Lower the value."""
return current.lower() | 28,266 |
def gtf2gff(gtfname,gffname, memt=True):
"""Convert GTF to GFF.
Args:
gtfname: path to GTF file
gffname: path for converted GFF file
memt: only select multiexon, multitranscript
Returns:
Pandas.DataFrame containing converted GFF data
"""
eids = read_gtf(gtfname,
onlytypes=['exon'],
parseattrs=['gene_id','transcript_id','exon_number','gene_name'],
rename={'gene_id':'gid','transcript_id':'tid','gene_name':'gname','exon_number':'e#'})
if N.sum(eids['e#']=='')>0: # recalculate exon_number
eids['e#'] = eids.groupby('tid')['gid'].transform(lambda x: N.arange(1,len(x)+1))
else:
eids['e#'] = eids['e#'].astype(int)
eids['ID'] = eids['tid']+':'+eids['e#'].astype(str)
eids['attr'] = 'ID='+eids['ID']+';Parent='+eids['tid']
# groupby tid and get transcript records
LOG.debug( "calculating transcripts...")
gtid = eids.groupby('tid')
tids = gtid.first().copy() # in general get first record
tids['typ'] = 'transcript' # fix typ
tids['st'] = gtid['st'].min() # fix st
tids['ed'] = gtid['ed'].max() # fix ed
tids['#exons'] = gtid.size()
if memt:
tids = tids[tids['#exons']>1]
tids = tids.reset_index()
tids['e#'] = 0
tids['attr'] = 'ID='+ tids['tid']+';Parent='+tids['gid']+\
';num_exons='+tids['#exons'].astype(str)+\
';gene_name='+tids['gname']
# groupby gid and get gene records
LOG.debug( "calculating genes...")
ggid = tids.groupby('gid')
gids = ggid.first().copy()
gids['typ'] = 'gene'
gids['st'] = ggid['st'].min()
gids['ed'] = ggid['ed'].max()
gids['#trans'] = ggid.size()
gids = gids.reset_index()
if memt:
gids = gids[gids['#trans']>1] # multi transcript
tids = tids[tids['gid'].isin(gids['gid'].values)]
eids = eids[eids['tid'].isin(tids['tid'].values)]
gids['tid'] = ''
gids['e#'] = -1
gids['attr'] = 'ID='+gids['gid']+';num_trans='+gids['#trans'].astype(str)
LOG.debug( "merging exons, transcripts, genes...")
gte = PD.concat([gids,tids,eids],ignore_index=True)
# sort by gid,tid,st,ed
gte = gte.sort_values(['chr','gid','tid','e#'])
# write out
LOG.debug( "writing GFF...")
write_gff(gte, gffname)
return gte | 28,267 |
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, verbose_logging, logger):
"""Write final predictions to the json file."""
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit", "rank_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index],
rank_logit=result.rank_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit + x.rank_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit", "rank_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging, logger)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit,
rank_logit=pred.rank_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0, rank_logit=0.))
assert len(nbest) >= 1
span_scores = []
rank_scores = []
for entry in nbest:
span_scores.append(entry.start_logit + entry.end_logit)
rank_scores.append(entry.rank_logit)
final_scores = [span_score + rank_score for span_score, rank_score
in zip(span_scores, rank_scores)]
nbest_indexes = np.argsort(final_scores)[::-1]
nbest_json = []
for index in nbest_indexes:
entry = nbest[index]
output = collections.OrderedDict()
output["text"] = entry.text
output["final_score"] = final_scores[index]
output["span_score"] = span_scores[index]
output["rank_score"] = rank_scores[index]
nbest_json.append(output)
assert len(nbest_json) >= 1
all_predictions[example.qas_id] = nbest_json[0]["text"]
all_nbest_json[example.qas_id] = nbest_json
return all_predictions, all_nbest_json | 28,268 |
def setup_view(view, request, *args, **kwargs):
"""Mimic ``as_view()``, but returns view instance.
Use this function to get view instances on which you can run unit tests,
by testing specific methods.
See https://stackoverflow.com/a/33647251 and
http://django-downloadview.readthedocs.io/en/latest/testing.html#django_downloadview.test.setup_view"""
view.request = request
view.args = args
view.kwargs = kwargs
return view | 28,269 |
def get_density(molecule_name, temperature=273.15, pressure=101325,
cycles=5000, init_cycles="auto",
forcefield="CrystalGenerator"):
"""Calculates the density of a gas through an NPT ensemble.
Args:
molecule_name: The molecule to test for adsorption. A file of the same
name must exist in `$RASPA_DIR/share/raspa/molecules/TraPPE`.
temperature: (Optional) The temperature of the simulation, in Kelvin.
pressure: (Optional) The pressure of the simulation, in Pascals.
cycles: (Optional) The number of simulation cycles to run.
init_cycles: (Optional) The number of initialization cycles to run.
Defaults to the minimum of cycles / 2 and 10,000.
forcefield: (Optional) The forcefield to use. Name must match a folder
in `$RASPA_DIR/share/raspa/forcefield`, which contains the properly
named `.def` files.
Returns:
The density, as a float, in kg/m^3.
"""
print_every = cycles // 10
if init_cycles == "auto":
init_cycles = min(cycles // 2, 10000)
script = dedent("""
SimulationType {simulation_type}
NumberOfCycles {cycles}
NumberOfInitializationCycles {init_cycles}
PrintEvery {print_every}
Forcefield {forcefield}
Box 0
BoxLengths 30 30 30
ExternalTemperature {temperature}
ExternalPressure {pressure}
VolumeChangeProbability 0.25
Component 0 MoleculeName {molecule_name}
MoleculeDefinition TraPPE
TranslationProbability 0.5
ReinsertionProbability 0.5
CreateNumberOfMolecules 256
""".format(**locals())).strip()
output = parse(run_script(script))
return output["Average Density"]["[kg/m^3]"][0] | 28,270 |
def print_json(field):
"""Print the definition of one field, in JSON"""
print(json.dumps(field, indent=4)) | 28,271 |
def is_node_up(config, host):
"""
Calls nodetool statusbinary, nodetool statusthrift or both. This function checks the output returned from nodetool
and not the return code. There could be a normal return code of zero when the node is an unhealthy state and not
accepting requests.
:param health_check: Supported values are cql, thrift, and all. The latter will perform both checks. Defaults to
cql.
:param host: The target host on which to perform the check
:return: True if the node is accepting requests, False otherwise. If both cql and thrift are checked, then the node
must be ready to accept requests for both in order for the health check to be successful.
"""
health_check = config.checks.health_check
if int(config.cassandra.is_ccm) == 1:
args = ['ccm', 'node1', 'nodetool']
if health_check == 'thrift':
return is_ccm_up(args, 'statusthrift')
elif health_check == 'all':
return is_ccm_up(list(args), 'statusbinary') and is_ccm_up(list(args), 'statusthrift')
else:
return is_ccm_up(args, 'statusbinary')
else:
cassandra = Cassandra(config.cassandra)
native_port = cassandra.native_port
rpc_port = cassandra.rpc_port
nc_timeout = 10
args = ['timeout', str(nc_timeout), 'nc', '-zv', host]
if health_check == 'thrift':
return is_cassandra_up(args, rpc_port)
elif health_check == 'all':
return is_cassandra_up(list(args), rpc_port) and is_cassandra_up(list(args), native_port)
else:
# cql only
return is_cassandra_up(args, native_port) | 28,272 |
def audits(program):
"""Create 2 audits mapped to the program"""
return [rest_facade.create_audit(program) for _ in xrange(2)] | 28,273 |
def _create_run(uri, experiment_id, work_dir, entry_point):
"""
Create a ``Run`` against the current MLflow tracking server, logging metadata (e.g. the URI,
entry point, and parameters of the project) about the run. Return an ``ActiveRun`` that can be
used to report additional data about the run (metrics/params) to the tracking server.
"""
if _is_local_uri(uri):
source_name = tracking.utils._get_git_url_if_present(_expand_uri(uri))
else:
source_name = _expand_uri(uri)
existing_run = fluent.active_run()
if existing_run:
parent_run_id = existing_run.info.run_uuid
else:
parent_run_id = None
active_run = tracking.MlflowClient().create_run(
experiment_id=experiment_id,
source_name=source_name,
source_version=_get_git_commit(work_dir),
entry_point_name=entry_point,
source_type=SourceType.PROJECT,
parent_run_id=parent_run_id)
return active_run | 28,274 |
def delete(sock, buffer_size, param):
"""DELETE request
Args:
sock(socket.socket): socket object
buffer_size(int): maximum size of received message buffer
param(str): parameter of pathname
"""
header = 'DELETE /products/%s HTTP/1.1\r\n' \
'Host: localhost:8080\r\n' \
'\r\n' % ( param )
request = header + ''
sock.send( request.encode())
response = sock.recv(buffer_size)
while len(response) > 0:
print(response)
response = sock.recv(buffer_size) | 28,275 |
def _call_pt_lines_to_flows():
""" Call directly ptlines2flows from sumo/tools. """
pt_flows_options = ptlines2flows.get_options(['-n', DEFAULT_NET_XML,
'-e', '86400',
'-p', '600',
'--random-begin',
'--seed', '42',
'--ptstops', DEFAULT_PT_STOPS_XML,
'--ptlines', DEFAULT_PT_LINES,
'-o', DEFAULT_PT_FLOWS,
'--ignore-errors',
'--vtype-prefix', 'pt_',
'--verbose'])
ptlines2flows.main(pt_flows_options) | 28,276 |
def test_str_lower():
"""Test string conversion to lowercase using ``.str.lower()``."""
df = pd.DataFrame(
{
"codes": range(1, 7),
"names": [
"Graham Chapman",
"John Cleese",
"Terry Gilliam",
"Eric Idle",
"Terry Jones",
"Michael Palin",
],
}
)
expected = df.copy()
expected["names"] = expected["names"].str.lower()
result = df.process_text(column="names", string_function="lower")
assert_frame_equal(result, expected) | 28,277 |
def convertBoard(board):
"""
converts board into numerical representation
"""
flatBoard = np.zeros(64)
for i in range(64):
val = board.piece_at(i)
if val is None:
flatBoard[i] = 0
else:
flatBoard[i] = {"P": 1, "N" : 2, "B" : 3, "R" : 4, "Q" : 5, "K" : 6, "p" : 7, "n" : 8, "b" : 9, "r" : 10, "q" : 11, "k" : 12}[val.symbol()]
# return flatBoard
return flatBoard.reshape(64, 1, 1) | 28,278 |
def merge_optional(default_dict: Dict[str, Any], update_dict: Dict[str, Any], tpe: str):
"""
Function to merge dictionaries to add set parameters from update dictionary into default dictionary.
@param default_dict: Default configuraiton dictionary.
@type default_dict: dict
@param update_dict: Update configuration to be merged into default configurations.
@type update_dict: dict
@param tpe: String representation of type of learner.
@type tpe: str
@return: Result of merged dictionaries.
@rtype: dict
"""
default_copy = default_dict.copy()
for k, v in default_copy.items(): # pylint: disable=invalid-name
if k in update_dict:
if all(isinstance(e, MutableMapping) for e in (v, update_dict[k])):
update_dict[k] = merge_optional(v, update_dict[k], tpe)
else:
logging.warning(f"Gotten unknown alternative mapping {k}:{v} for {tpe}")
# Base case
update = list(filter(lambda item: item[1] is not None, update_dict.items()))
for k, v in update: # pylint: disable=invalid-name
if not isinstance(v, dict):
logging.info(f'Updating {k} from {default_copy[k]} to {v} for {tpe}')
default_copy[k] = v
return default_copy | 28,279 |
def reference_pixel_map(dimensions, instrument_name):
"""Create a map that flags all reference pixels as such
Parameters
----------
dimensions : tup
(y, x) dimensions, in pixels, of the map to create
instrument_name : str
Name of JWST instrument associated with the data
Returns
-------
ref_map : numpy.ndarray
2D map showing the locations of reference pixels (1)
"""
yd, xd = dimensions
ref_map = np.zeros(dimensions).astype(np.int)
ref_map[:, 0:4] = 1
ref_map[:, xd-4:xd] = 1
if instrument_name.lower() != 'miri':
ref_map[0:4, :] = 1
ref_map[yd-4:yd, :] = 1
return ref_map | 28,280 |
def geocode(value, spatial_keyword_type='hostname'):
"""convenience function to geocode a value"""
lat, lon = 0.0, 0.0
if spatial_keyword_type == 'hostname':
try:
hostname = urlparse(value).hostname
url = 'http://ip-api.com/json/%s' % hostname
LOGGER.info('Geocoding %s with %s', hostname, url)
content = json.loads(urlopen(url).read())
lat, lon = content['lat'], content['lon']
except Exception as err: # skip storage
msg = 'Could not derive coordinates: %s' % err
LOGGER.exception(msg)
return lat, lon | 28,281 |
def deploy_contract(w3, document_page_url, secret_key):
"""
TDeploy the contract
:param w3: the w3 connection
:param document_page_url: the document page url
:param secret_key: the operator secret key
:return: a pari tx_receipt, abi
"""
# 1. declare contract
document_sc = w3.eth.contract(abi=abi, bytecode=bytecode)
# 2. authenticate operator
gas_required = settings.GAS_CONTRACT_DEPLOY
gas_price = get_gas_price(w3)
acct = w3.eth.account.from_key(secret_key)
check_balance(w3, acct.address, minimum_required=(gas_required * gas_price))
# 3. create the constructor transaction
construct_txn = document_sc.constructor(document_page_url).buildTransaction({
'from': acct.address,
'nonce': w3.eth.get_transaction_count(acct.address),
'gas': gas_required,
'gasPrice': gas_price})
# 4. sign transaction
signed = acct.sign_transaction(construct_txn)
# 5. send signed transaction
tx_hash = w3.eth.send_raw_transaction(signed.rawTransaction)
return w3.eth.wait_for_transaction_receipt(tx_hash) | 28,282 |
def format_date(format_string=None, datetime_obj=None):
"""
Format a datetime object with Java SimpleDateFormat's-like string.
If datetime_obj is not given - use current datetime.
If format_string is not given - return number of millisecond since epoch.
:param format_string:
:param datetime_obj:
:return:
:rtype string
"""
datetime_obj = datetime_obj or datetime.now()
if format_string is None:
seconds = int(datetime_obj.strftime("%s"))
milliseconds = datetime_obj.microsecond // 1000
return str(seconds * 1000 + milliseconds)
else:
formatter = SimpleDateFormat(format_string)
return formatter.format_datetime(datetime_obj) | 28,283 |
def testdata(request):
"""
If expected data is required for a test this fixture returns the path
to a folder with name '.testdata' located in the same director as the
calling test module
"""
import pathlib
testdata_dir = '.testdata'
module_dir = pathlib.Path(request.fspath).parent
return module_dir / testdata_dir | 28,284 |
def delete_user_pool_domain(Domain=None, UserPoolId=None):
"""
Deletes a domain for a user pool.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_user_pool_domain(
Domain='string',
UserPoolId='string'
)
:type Domain: string
:param Domain: [REQUIRED]\nThe domain string.\n
:type UserPoolId: string
:param UserPoolId: [REQUIRED]\nThe user pool ID.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
CognitoIdentityProvider.Client.exceptions.NotAuthorizedException
CognitoIdentityProvider.Client.exceptions.InvalidParameterException
CognitoIdentityProvider.Client.exceptions.ResourceNotFoundException
CognitoIdentityProvider.Client.exceptions.InternalErrorException
:return: {}
:returns:
(dict) --
"""
pass | 28,285 |
def set_price(location, algo, order, price):
"""
https://api.nicehash.com/api?method=orders.set.price&id=8&key=3583b1df-5e93-4ba0-96d7-7d621fe15a17&location=0&algo=0&order=1881&price=2.1
:param location:
:param algo:
:param order:
:param price:
:return:
"""
resp = query('orders.set.price', {'location': location, 'algo': algo, 'order': order, 'price': price})
ret = resp.json()
return ret['result'] | 28,286 |
def setFonts(typ):
"""
Sets fonts for standard font-types
:param typ: one of sans-serif-afm, serif (sans-serif is default on init)
:type typ: str
"""
if typ == 'sans-serif-afm':
baseNameDict = {
'Helvetica': "_a______",
'Helvetica-Bold': "_ab_____",
'Helvetica-Oblique': "_ai_____",
'Helvetica-BoldOblique': "_abi____"
}
for afm, pfb in baseNameDict.items():
faceName = afm
registerFont(faceName, afm, pfb)
_baseFontNames.update({
"normal": pdfmetrics.getFont('Helvetica').fontName
})
_baseFontNames.update({
"bold": pdfmetrics.getFont('Helvetica-Bold').fontName
})
_baseFontNames.update({
"italic": pdfmetrics.getFont('Helvetica-Oblique').fontName
})
_baseFontNames.update({
"bold_italic": pdfmetrics.getFont('Helvetica-BoldOblique').fontName
})
elif typ == 'serif':
setTtfFonts(
'Calibri',
__font_dir__,
normal=('Calibri', 'CALIBRI.TTF'),
italic=('CalibriBd', 'CALIBRIB.TTF'),
bold=('CalibriIt', 'CALIBRII.TTF'),
bold_italic=('CalibriBI', 'CALIBRIZ.TTF')) | 28,287 |
def iterate_datalog_program(datalog_program):
"""
Iterate each rule in the AST generated from the datalog program and print the complete datalog program
"""
rule_count = 0
for datalog_rule in datalog_program:
print(str(rule_count) + ':')
print(iterate_datalog_rule(datalog_rule))
rule_count += 1 | 28,288 |
def jaccard_coef_loss(y_true, y_pred):
"""
Loss based on the jaccard coefficient, regularised with
binary crossentropy
Notes
-----
Found in https://github.com/ternaus/kaggle_dstl_submission
"""
return (-K.log(jaccard_coef(y_true, y_pred)) +
K.binary_crossentropy(y_pred, y_true)) | 28,289 |
def __apply_rule_to_files_dataset_grouping(datasetfiles, locks, replicas, source_replicas, rseselector, rule, preferred_rse_ids=[], source_rses=[], session=None):
"""
Apply a rule to files with ALL grouping.
:param datasetfiles: Dict holding all datasets and files.
:param locks: Dict holding all locks.
:param replicas: Dict holding all replicas.
:param source_replicas: Dict holding all source replicas.
:param rseselector: The RSESelector to be used.
:param rule: The rule object.
:param preferred_rse_ids: Preferred RSE's to select.
:param source_rses: RSE ids of eglible source replicas.
:param session: Session of the db.
:returns: replicas_to_create, locks_to_create, transfers_to_create
:raises: InsufficientQuota, InsufficientTargetRSEs
:attention: This method modifies the contents of the locks and replicas input parameters.
"""
locks_to_create = {} # {'rse_id': [locks]}
replicas_to_create = {} # {'rse_id': [replicas]}
transfers_to_create = [] # [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
for dataset in datasetfiles:
bytes_ = sum([file['bytes'] for file in dataset['files']])
rse_coverage = {} # {'rse_id': coverage }
blocklist = set()
for file in dataset['files']:
for replica in replicas[(file['scope'], file['name'])]:
if replica.state == ReplicaState.BEING_DELETED:
blocklist.add(replica.rse_id)
continue
if replica.state in [ReplicaState.AVAILABLE, ReplicaState.COPYING, ReplicaState.TEMPORARY_UNAVAILABLE]:
if replica.rse_id in rse_coverage:
rse_coverage[replica.rse_id] += file['bytes']
else:
rse_coverage[replica.rse_id] = file['bytes']
if not preferred_rse_ids:
rse_tuples = rseselector.select_rse(size=bytes_,
preferred_rse_ids=[x[0] for x in sorted(rse_coverage.items(), key=lambda tup: tup[1], reverse=True)],
blocklist=list(blocklist),
prioritize_order_over_weight=True,
existing_rse_size=rse_coverage)
else:
rse_tuples = rseselector.select_rse(size=bytes_,
preferred_rse_ids=preferred_rse_ids,
blocklist=list(blocklist),
existing_rse_size=rse_coverage)
for rse_tuple in rse_tuples:
for file in dataset['files']:
if len([lock for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id]) == rule.copies:
continue
if len([lock for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id and lock.rse_id == rse_tuple[0]]) == 1:
# Due to a bug a lock could have been already submitted for this, in that case, skip it
continue
__create_lock_and_replica(file=file,
dataset=dataset,
rule=rule,
rse_id=rse_tuple[0],
staging_area=rse_tuple[1],
availability_write=rse_tuple[2],
locks_to_create=locks_to_create,
locks=locks,
source_rses=source_rses,
replicas_to_create=replicas_to_create,
replicas=replicas,
source_replicas=source_replicas,
transfers_to_create=transfers_to_create,
session=session)
# Add a DatasetLock to the DB
if dataset['scope'] is not None:
try:
session.query(models.DatasetLock).filter(models.DatasetLock.scope == dataset['scope'],
models.DatasetLock.name == dataset['name'],
models.DatasetLock.rule_id == rule.id,
models.DatasetLock.rse_id == rse_tuple[0]).one()
except NoResultFound:
# Get dataset Information
is_open, bytes_, length = True, None, None
try:
is_open, bytes_, length = session.query(models.DataIdentifier.is_open,
models.DataIdentifier.bytes,
models.DataIdentifier.length).filter_by(scope=dataset['scope'], name=dataset['name']).one()
except NoResultFound:
pass
models.DatasetLock(scope=dataset['scope'],
name=dataset['name'],
rule_id=rule.id,
rse_id=rse_tuple[0],
state=LockState.REPLICATING,
account=rule.account,
length=length if not is_open else None,
bytes=bytes_ if not is_open else None).save(flush=False, session=session)
# Add a Dataset Replica to the DB
if dataset['scope'] is not None:
try:
session.query(models.CollectionReplica).filter(models.CollectionReplica.scope == dataset['scope'],
models.CollectionReplica.name == dataset['name'],
models.CollectionReplica.rse_id == rse_tuple[0]).one()
except NoResultFound:
models.CollectionReplica(scope=dataset['scope'],
name=dataset['name'],
did_type=DIDType.DATASET,
rse_id=rse_tuple[0],
bytes=0,
length=0,
available_bytes=0,
available_replicas_cnt=0,
state=ReplicaState.UNAVAILABLE).save(session=session)
models.UpdatedCollectionReplica(scope=dataset['scope'],
name=dataset['name'],
did_type=DIDType.DATASET).save(flush=False, session=session)
return replicas_to_create, locks_to_create, transfers_to_create | 28,290 |
def call_webhook(event, webhook, payload):
"""Build request from event,webhook,payoad and parse response."""
started_at = time()
request = _build_request_for_calling_webhook(event, webhook, payload)
logger.info('REQUEST %(uuid)s %(method)s %(url)s %(payload)s' % dict(
uuid=str(event['uuid']),
url=request['url'],
method=request['method'],
payload=payload,
))
try:
content = dispatch_webhook_request(**request)
logger.debug('RESPONSE %(uuid)s %(method)s %(url)s %(data)s' % dict(
uuid=str(event['uuid']),
url=request['url'],
method=request['method'],
data=content,
))
data = dict(
parent=str(event['uuid']),
content=content,
started_at=started_at,
ended_at=time()
)
except (FailureWebhookError, ConnectionError) as exception:
if sentry.client:
http_context = raven_context(**request)
sentry.captureException(data={'request': http_context})
logger.error('RESPONSE %(uuid)s %(method)s %(url)s %(error)s' % dict(
uuid=str(event['uuid']),
method=request['method'],
url=request['url'],
error=exception.message,))
data = dict(
parent=str(event['uuid']),
error=exception.message,
started_at=started_at,
ended_at=time(),
)
webhook_ran.send(None, data=data)
return data | 28,291 |
def test_compute_min_dist():
"""Test computation of minimum distance between two molecules"""
mol1_pos = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
mol2_pos = np.array([[3, 3, 3], [3, 4, 5]], np.float)
mol3_pos = np.array([[2, 2, 2], [2, 4, 5]], np.float)
assert compute_min_dist(mol1_pos, mol2_pos, mol3_pos) == np.sqrt(3) | 28,292 |
def safe_str(obj):
""" return the byte string representation of obj """
try:
return str(obj)
except UnicodeEncodeError:
# obj is unicode
return unicode(obj).encode('unicode_escape') | 28,293 |
def default_user_agent():
"""Return a string representing the default user agent."""
return f'airslate/{__version__} ({__url__})' | 28,294 |
def join_complementary_byteblocks(block) -> int:
"""
join_complementary_byteblocks used to combine low bit data and high bit data
as the representation of complementary code
Parameters
----------
block : list
Low Digit Block -> int
High Digit Block -> int
Returns
-------
parsed : int
low | high << 8 ... (complementary code)
Example:
LowDigitBlock = 0 # 0xff
HighDigitBlock = 255 # 0x00
block = [LowDigitBlock, HighDigitBlock] # low first
join_complementary_byteblocks(block)
-> -256 # -0x100
"""
n_byte = len(block)
sign_bound = 2 ** (n_byte * 8 - 1)
sign_block = 2 ** (n_byte * 8)
parsed = join_byteblocks(block)
if parsed < sign_bound:
return parsed
else:
return parsed - sign_block | 28,295 |
def seed(func):
""" Decorator to seed the RNG before any function. """
@wraps(func)
def wrapper(*args, **kwargs):
numpy.random.seed(0)
return func(*args, **kwargs)
return wrapper | 28,296 |
def trim_bandstructure(
energy_cutoff: float, band_structure: BandStructure
) -> BandStructure:
"""
Trim the number of bands in a band structure object based on a cutoff.
Args:
energy_cutoff: An energy cutoff within which to keep the bands. If the system
is metallic then the bands to keep will fall within +/- the cutoff around
the Fermi level. If the system has a band gap, the bands from the VBM -
energy_cutoff to CBM + energy_cutoff will be kept.
band_structure: A band structure.
Returns:
A trimmed band structure.
"""
if band_structure.is_metal():
min_e = band_structure.efermi - energy_cutoff
max_e = band_structure.efermi + energy_cutoff
else:
min_e = band_structure.get_vbm()["energy"] - energy_cutoff
max_e = band_structure.get_cbm()["energy"] + energy_cutoff
new_bands = {}
for spin, bands in band_structure.bands.items():
ibands = np.any((bands > min_e) & (bands < max_e), axis=1)
new_bands[spin] = bands[ibands]
return BandStructure(
np.array([k.frac_coords for k in band_structure.kpoints]),
new_bands,
lattice=band_structure.lattice_rec,
efermi=band_structure.efermi,
coords_are_cartesian=False,
structure=band_structure.structure,
) | 28,297 |
def color_text(text: str, *colors: str):
"""
Applies color to a specific string and appends the color code to set the
text to normal. Text can be various colors by adding more args for colors.
Parameters
----------
text: str
String to color
colors: Tuple[str]
Any amount of color codes
Returns
-------
str
Input string with each color in colors prepended to the string in
order with a Colors.normal trailing.
"""
color = ""
for c in colors:
color += c
if os.name == "nt": # Windows (bleh)
return text
else:
return color + text + Colors.normal | 28,298 |
def _run_command(c: InvokeContext, cmd: str) -> CommandResult:
"""
Command runner.
:argument c: InvokeContext
:argument cmd: str the command to run
"""
try:
result = c.run(cmd)
return CommandResult(
exit_code=result.exited,
message=result.stdout,
command=cmd
)
except UnexpectedExit as e:
raise NonZeroExitException(
exit_code=e.result.exited,
message=e.result.stderr,
command=cmd
) | 28,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.