content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def _analyse_operator(operator, param_name='operator'):
"""Checks the properties of an operator and extracts the number of qubits.
Args:
operator: the operator to be analysed.
param_name: the parameter name as displayed in potential error messages.
Returns:
returns: number of qubits.
Raises:
ValueError: if operator does not have a valid shape or if it is not
unitary.
"""
if operator.ndim != 2:
raise ValueError(
'%s must be a 2D array (found: ndim=%d)'
%(param_name, operator.ndim)
)
rows, cols = operator.shape
if rows != cols:
raise ValueError(
'%s must be a square matrix [found: shape=(%d, %d)]'
%(param_name, rows, cols)
)
num_qubits = rows.bit_length()-1
if rows != 2 ** num_qubits:
raise ValueError(
'dimension of %s must be a power of 2 (found: dim=%d)'
%(param_name, rows)
)
return num_qubits
|
b2725dcef28351918647188f57fd36d072c7ed0d
| 50,623
|
def get_data_directory_files(root_directory):
"""Recursively get all the files within the provided root directory.
Get all of the files below the specified directory and return a list of filepath strings in
the expected module manifest format.
Args:
root_directory: Root directory to recursively walk through.
Returns:
A list of properly formatted filepath strings.
"""
data_files = []
# Manifest files only contain .js or .txt files
for extension in ['js', 'txt']:
for file in root_directory.glob(f'**/*.{extension}'):
# Filter manifest or README files from the result set
if not file.stem or file.stem in ['manifest', 'README']:
continue
# Match the expected file format listed within the manifest
data_files.append(str(file.relative_to(root_directory)))
return data_files
|
246fa937a88ca4c022d62c7ac2193412ea9f31bb
| 50,624
|
def comma_separated_str_to_list(s: str) -> list:
"""
Convert a comma separated string to a list.
Args:
s: string to convert
Returns: list value
"""
return s.split(',')
|
9513ca572c50f2c01a3743e6b01ec3b9360bd677
| 50,628
|
def check_buzz(number: int) -> str:
"""If a integer is divisible by five function outputs buzz
Args:
number (int): integer to check if divisible by five
Returns:
str: returns buzz if divisible by five, else continues
Examples:
>>> check_buzz(3)
''
>>> check_buzz(5)
'buzz'
"""
return "buzz" if number % 5 == 0 else ""
|
f9d48422d2880508dd685450e811fbc3b4a3862d
| 50,631
|
def pick(y, column):
"""A helper method to pick a specific output from the simulator node output.
Allows one to create separate nodes for specific outputs, e.g. number of clusters.
"""
return y[column]
|
2b30beba249c4bdcddc89876076e278e4777ad23
| 50,632
|
def is_rule_in_set(rule, rule_list):
"""Check if the given rule is present in the rule_list
:param rule_list: list of existing rules in dictionary format
:param rule: new rule to be added
:return boolean:
"""
for old_rule in rule_list:
if rule['source'] == old_rule['source']\
and rule['destination'] == old_rule['destination']\
and rule['action'] == old_rule['action']\
and rule['priority'] == old_rule['priority']:
return True
return False
|
a2dbbf49d7e084204683045919da2e6e2515245b
| 50,635
|
def parse_word_comment_meta(text):
"""
>>> parse_word_comment_meta('antialkoholista')
('antialkoholista', '', '')
>>> parse_word_comment_meta('absztinens <em>val</em>')
('absztinens', '', '<em>val</em>')
>>> parse_word_comment_meta('bornemissza <em>reg</em>')
('bornemissza', '', '<em>reg</em>')
>>> parse_word_comment_meta('bornemissza (mn)')
('bornemissza', '', '(mn)')
>>> parse_word_comment_meta(' [anyag]: elettelen')
('elettelen', 'anyag', '')
>>> parse_word_comment_meta('')
('', '', '')
>>> parse_word_comment_meta(' ragaszkodik <vkihez>')
('ragaszkodik', 'vkihez', '')
"""
text = text.strip()
if text:
word = meta = comment = ''
if ']:' in text:
comment, text = text.split(']:')
comment = comment.replace('[', '').strip()
elif '<' in text:
text, comment = text.split('<')[:2]
comment = comment.replace('>', '').strip()
elif '</em>:' in text:
meta, text = text.split(':')
text = text.strip()
if text:
word = text.split()[0]
if text and not meta:
meta = text[len(word)+1:].strip()
return word.strip(), comment, meta
else:
return '', '', ''
|
63c55da1254ea9876f9c2d337feb45bc605d3dc3
| 50,636
|
def gen_locale(locale): # type: (str) -> str
"""Returns the generated code for a given locale in the list."""
# We assume that all locale codes have only letters, numbers and hyphens.
assert locale.replace('-', '').isalnum(), locale
# clang-format enforces a four-space indent for initializer lists.
return ' PLATFORM_LOCALE({locale})'.format(locale=locale)
|
747d8018d99b7e530b0fd4ac7476bc27df980270
| 50,637
|
def role_vars(host):
"""Loading standard role variables."""
defaults_files = "file=./defaults/main.yml name=role_defaults"
vars_files = "file=./vars/main.yml name=role_vars"
ansible_vars = host.ansible("include_vars", defaults_files)["ansible_facts"]["role_defaults"]
ansible_vars.update(
host.ansible("include_vars", vars_files)["ansible_facts"]["role_vars"]
)
return ansible_vars
|
19a874a46ff75de0513c2539d45597aeab2e8941
| 50,640
|
import torch
def bin_acc(y_true, y_pred, sigmoid: bool = False):
"""
Returns accuracy per batch (with sigmoid function), i.e. if you get 8/10 right, this returns 0.8, NOT 8
"""
# round predictions to the closest integer
if sigmoid:
round_pred = torch.round(torch.sigmoid(y_pred))
else:
round_pred = torch.round(y_pred)
correct = (round_pred == y_true).float()
acc = correct.sum() / len(correct)
return acc
|
6aeacc8e24312b96a5e9a2c405780e73e774f7a6
| 50,643
|
def getMolParamIDToAtomIndex( oemol, ff):
"""Take an OEMol and a SMIRNOFF forcefield object and return a dictionary, keyed by parameter ID, where each entry is a tuple of ( smirks, [[atom1, ... atomN], [atom1, ... atomN]) giving the SMIRKS corresponding to that parameter ID and a list of the atom groups in that molecule that parameter is applied to.
Parameters
----------
oemol : OEMol
OpenEye OEMol with the molecule to investigate.
ff : ForceField
SMIRNOFF ForceField object (obtained from an ffxml via ForceField(ffxml)) containing FF of interest.
Returns
-------
param_usage : dictionary
Dictionary, keyed by parameter ID, where each entry is a tuple of ( smirks, [[atom1, ... atomN], [atom1, ... atomN]) giving the SMIRKS corresponding to that parameter ID and a list of the atom groups in that molecule that parameter is applied to.
"""
labels = ff.labelMolecules([oemol])
param_usage = {}
for mol_entry in range(len(labels)):
for force in labels[mol_entry].keys():
for (atom_indices, pid, smirks) in labels[mol_entry][force]:
if not pid in param_usage:
param_usage[pid] = (smirks, [atom_indices])
else:
param_usage[pid][1].append( atom_indices )
return param_usage
|
ed7335b7dc671cbbfa5292bc57889a485fcdb5a5
| 50,648
|
def rounded_down (value, granularity) :
"""Returns `value` rounded down to nearest multiple of `granularity`.
>>> rounded_down (3, 5)
0
>>> rounded_down (8, 5)
5
>>> rounded_down (5, 5)
5
>>> rounded_down (-3, 5)
-5
>>> rounded_down (-8, 5)
-10
>>> rounded_down (0.97, 0.01)
0.97
>>> rounded_down (0.971, 0.01)
0.97
>>> rounded_down (0.9699999999, 0.01)
0.96
>>> rounded_down (0.9799999999, 0.01)
0.97
>>> rounded_down (0.97, 0.05)
0.95
>>> rounded_down (-0.97, 0.01)
-0.97
>>> rounded_down (-0.971, 0.01)
-0.98
>>> rounded_down (-0.9699999999, 0.01)
-0.97
>>> rounded_down (-0.9799999999, 0.01)
-0.98
>>> rounded_down (-0.97, 0.05)
-1.0
"""
if 0 < granularity < 1 :
scale = 1.0 / granularity
result = rounded_down (value * scale, 1.0) / scale
else :
result = value - (value % granularity)
return result
|
e324bd512d58ff610444e138f958b47179053bff
| 50,657
|
def eliminateStopwordsDoc(document, stopList):
"""
Eliminate stopwords in a single document
"""
A = []
for word in document.split():
if word not in stopList:
A.append(word)
return ' '.join(A)
|
e9fb527a89f5723d6b7ea7731630109e9b896ebb
| 50,661
|
def bounds_contains(bounds, x):
"""
Returns True if `x` is contained in the bounds, and False otherwise.
Parameters
----------
bounds : numpy.ndarray
Array of shape (d, 2).
Bounds of each dimension [ [x0, y0], [x1, y1], ..., [xd, yd] ],
representing the following cartesian product in R^d:
[x0, y0] X [x1, y1] X ... X [xd, yd].
x : numpy.ndarray
Array of shape (d,)
"""
dim = x.shape[0]
for dd in range(dim):
if x[dd] < bounds[dd, 0] or x[dd] > bounds[dd, 1]:
return False
return True
|
098cf7c12ea75bd7222e20149f4fd0d4580054ce
| 50,664
|
def subreddit_search_key(sr):
"""Search key for subreddit."""
return sr['name']
# return '{} {}'.format(sr['name'], sr['title'])
|
d0d3300cf53d82111c9988930c9bf97ce27359f8
| 50,669
|
def getIndexOfMinVal(lst):
""" Find index of smallest value in a list """
#initialize current min value and index to first element
minIndex = 0 # index of current minimal value
val = lst[0] # current minimal value
# loop through all elements
for i in range(1, len(lst)):
# if current value is smaller than current minimum -> update values
if lst[i] < val:
minIndex = i
val = lst[i]
return minIndex
|
fca37e2a8fdb1a04160098f80544366432d2c61f
| 50,672
|
def safeint(value):
"""safely converts value to integer or none"""
try:
return int(float(value))
except ValueError:
return None
|
8fe8573eb2d0cac83b0851af5d3d171e0855d06b
| 50,676
|
def get_extension(file_path: str):
"""
Returns a file's extension if any, else None
:param str file_path:
:return:
"""
split = file_path.rsplit('.', 1)
return split[1] if len(split) > 1 else None
|
41c91a0628345da0421c32aec77bbc02abb62607
| 50,683
|
def get_task_name(options):
"""
Given a dictionary of command options, return the name of the task
:param options: Options passed to the handle method of the management command
:return: The task name (str)
"""
options_dict = dict(options)
return [task for task in [key for key in options_dict] if str(options_dict[task]) == 'True'][0]
|
e58dfc7a4f7c4c88f5850b8f341b031cfc279e0c
| 50,686
|
def _getRightmost(categories):
"""
Get rightmost toplevel category.
categories -- list of Category, all category from database.
"""
rightmost = None
for cat in categories:
if not rightmost or cat.getRight() > rightmost.getRight():
rightmost = cat
return rightmost
|
e3d7f835b4d85ecfdf8cc6e519b76661f3dac90c
| 50,688
|
def reraise(error):
"""Return a function that raises the given error when evaluated"""
def local_function(*args, **kwargs):
raise error
return local_function
|
22e2f206bcdbc6aae792eff1518507fdcf036cc1
| 50,689
|
import logging
def _num_groups(num_tokens: int,
max_group_size: int,
num_experts: int,
num_expert_replicas: int,
strict_group_size: bool = False) -> int:
"""Returns the number of token routing groups.
Note: For pjit-based training, all quantities are global.
We select the smallest num_groups such that:
- num_groups >= num_tokens / max_group_size (ensuring the group size is no
larger than max_group_size),
- num_tokens % num_groups = 0 (ensuring that the group size evenly divides
into the num_tokens),
- num_groups % (num_expert_replicas * num_experts) = 0 (ensuring that number
of groups can be split across the total number of experts).
Args:
num_tokens: Number of tokens from input batch.
max_group_size: Maximum size of each token routing group. Actual group size
may end up being smaller.
num_experts: Total number of unique experts.
num_expert_replicas: Number of copies of each expert.
strict_group_size: If True, fail if unable to set the token group size equal
to max_group_size.
Returns:
Number of token routing groups.
Raises:
ValueError if we cannot find a group_size satisfying the above requirements.
"""
# For pjit-based partitioning, we manipulated arrays globally. The number of
# experts must evenly divide the number of (global) groups.
min_num_groups = num_tokens // max_group_size
min_num_groups = max(min_num_groups, num_expert_replicas * num_experts)
def viable(n):
"""Returns true iff n is a viable number of groups."""
return num_tokens % n == 0 and n % (num_expert_replicas * num_experts) == 0
# Increase the number of groups (and decrease the group size) until we have
# a viable number of groups.
num_groups = min_num_groups
while num_groups < num_tokens and not viable(num_groups):
num_groups += 1
if num_tokens % num_groups > 0:
raise ValueError(
'Group size and the number of experts must divide evenly into the '
f'global number of tokens, but num_tokens={num_tokens}, while '
f'num_groups={num_groups} for max_group_size={max_group_size} '
f'and num_experts={num_experts}, each with {num_expert_replicas} '
'replicas')
group_size = num_tokens // num_groups
logging.info(
'Selected group_size=%d and num_groups=%d for input num_tokens=%d, '
'max_group_size=%d, num_experts=%d and num_expert_replicas=%d',
group_size, num_groups, num_tokens, max_group_size, num_experts,
num_expert_replicas)
if strict_group_size and group_size != max_group_size:
raise ValueError(
f'Selected group_size={group_size} is less than the '
f'max_group_size={max_group_size}. Exiting because strict mode is '
'active (strict_group_size=True)')
return num_groups
|
e3075322c50a635fa60a6b6c8b7d0aba69b04e95
| 50,696
|
def str_to_bool(s):
""" String to bool used to read config file
Parameters
----------
s : str
String to convert
Returns
-------
s : bool
Boolean value of input string
"""
if s == 'True':
return True
elif s == 'False':
return False
else:
raise ValueError
|
1ccfd2b39ef298fb7b02925fe667cc6d38398614
| 50,698
|
def IR(spot:list, m=1):
"""
IR(): A function to calculate Single Effective Interest Rate from an array of spot rates.
:param spot: An array/List of Spot rates
:type spot: list
:param m: Frequency of Interest Calculation (eg: 2 for Semi-annually), defaults to 1.
:type m: float
:return: float, None for -ve values of m.
:rtype: float
"""
if(m<=0 or len(spot)==0):
return None
return spot[-1]
|
c6c0c8cd221ab5fa52949693f44451576208bff1
| 50,703
|
def Gamma_phi_fn(site,p):
"""
Calculates the value of Gamma and phi given the input site and the input parameters.
Parameters
----------
p : Namespace class containing the parameters of the system
Notes
-----
site.pos[0] is the x value of the position. !!! Could also implement as heavyside step function, but this works as it is.
"""
if site.pos[0] <= p.left[-1]: #
Gamma = p.GammaL; phi = p.phiL
elif p.middle[0] <= site.pos[0] <= p.middle[-1]:
Gamma = 0; phi = 0
elif p.right[0] <= site.pos[0] <= p.right[-1]:
Gamma = p.GammaR; phi = p.phiR
else:
raise ValueError("In Gamma_phi_fn: site.pos[0] was in neither parts of the system. Cannot assign Gamma- and phi-values.")
return [Gamma,phi]
|
514dc0159d63fcd8f2c476605d06355bb3c66172
| 50,704
|
import inspect
def is_rpc_method(object):
""" Returns true if the given object is a method marked with @rpc """
if not inspect.ismethod(object):
return False
return hasattr(object, 'rpc')
|
2aefabf684cdde1f9b73e5ec3879d21d8f8ad7d0
| 50,714
|
def extrep_frac(lst1, lst2):
"""Returns the fraction of items in lst1 that are in lst2"""
if len(lst1) == 0:
return 0
num_rep = len([x for x in lst1 if x in lst2])
return num_rep / len(lst1)
|
02ee94eff05d60bab7035480bb94ea60212c1e1f
| 50,718
|
import string
def tensor_for_label(label):
"""Fake embedding based on occurrence of 26 ASCII letters in the label."""
return tuple(0.1 if c in label else -0.1 for c in string.ascii_lowercase)
|
4a723d6af26183c40cea48b9cd33eb6d0d7847fd
| 50,723
|
from pathlib import Path
def development_parse_input(
path_project, name_method,
path_database_literatur, path_database_inorganics, path_database_predictions,
path_database_metabolites, path_database_pathways,
organisms, pathways, drop_sumpeaks, drop_sumpeaks_no
):
"""
Parse input.
Parse user input and generate dictionary for easy access.
Parameters
----------
path_project : str
Raw string to results folder.
path_database_literatur : str
Raw string to literature data.
path_database_inorganics : str
Raw string to inorganics data.
path_database_predictions : str
Raw string to predictions folder.
path_database_metabolites : str
Raw string to metabolite information file.
path_database_pathways : str
Raw string to pathway folder.
organisms : list
List of KEGG organism identifier.
drop_sumpeaks : bool, default False
Drop convoluted metabolite mass transitions.
drop_sumpeaks_no : int, default 3
If drop_sumpeaks == True, drop convoluted mass transitions greater equal than int
Returns
-------
inp : dict
Dictionary with user input.
"""
inp = {}
# Set paths
inp['path_project'] = Path(path_project)
inp['name_method'] = name_method
inp['path_literature'] = Path(path_database_literatur)
inp['path_inorganics'] = Path(path_database_inorganics)
inp['path_predictions'] = Path(path_database_predictions)
inp['path_metabolites'] = Path(path_database_metabolites)
inp['path_pathways'] = Path(path_database_pathways)
# Set parameter
inp['organisms'] = organisms
inp['pathways'] = pathways
inp['drop_sumpeaks'] = drop_sumpeaks
inp['drop_sumpeaks_no'] = drop_sumpeaks_no
# Set plotting parameter
inp['figsize'] = (6,5)
inp['labelsize'] = 14
return inp
|
194b35e412c9a929095015c062f3b9c766c22e93
| 50,724
|
def OmahaCertificateTag(env, target, source):
"""Adds a superfluous certificate with a magic signature to an EXE or MSI.
The file must be signed with Authenticode in order for Certificate Tagging to
succeed.
Args:
env: The environment.
target: Name of the certificate-tagged file.
source: Name of the file to be certificate-tagged.
Returns:
Output node list from env.Command().
"""
certificate_tag = ('"' + env['ENV']['GOROOT'] + '/bin/go.exe' + '"' +
' run ' +
'$MAIN_DIR/../common/certificate_tag/certificate_tag.go')
magic_bytes = 'Gact2.0Omaha'
padded_length = len(magic_bytes) + 2 + 8192
certificate_tag_cmd = env.Command(
target=target,
source=source,
action=certificate_tag + ' -set-superfluous-cert-tag=' + magic_bytes +
' -padded-length=' + str(padded_length) + ' -out $TARGET $SOURCE',
)
return certificate_tag_cmd
|
bceee2c90d593b84cd7b95413110f23c36fc7246
| 50,728
|
def abbrv(num):
"""
Shortens the amount so it will have a letter at the end to indicate the place value of the number (e.g. 1,500 -> 1.5K)
This goes upto trillion.
"""
abbrv = {"T": 1_000_000_000_000, "B": 1_000_000_000, "M": 1_000_000, "K": 1000}
for abbrv_value in abbrv.values():
if num / abbrv_value >= 1:
shorten_num = str(round((num / abbrv_value), 2)).strip(".0")
for key, value in abbrv.items():
if value == abbrv_value:
return shorten_num + key
|
19955aa5eab46af33c0422ed32c42f572b13cd12
| 50,730
|
def set_params(object,kw,warn=1):
"""Given an object and a dictionary of keyword arguments,
set only those object properties that are already instance
variables of the given object. Returns a new dictionary
without the key,value pairs that have been used. If
all keywords have been used, afterwards, len(kw)==0."""
kw = kw.copy()
for k,v in kw.items():
if hasattr(object,k):
setattr(object,k,v)
del kw[k]
return kw
|
a4f9286a331478de2b93be9b86e79038d75c754c
| 50,733
|
def merge_traces(mtraces):
"""Merge MultiTrace objects.
Parameters
----------
mtraces : list of MultiTraces
Each instance should have unique chain numbers.
Raises
------
A ValueError is raised if any traces have overlapping chain numbers.
Returns
-------
A MultiTrace instance with merged chains
"""
base_mtrace = mtraces[0]
for new_mtrace in mtraces[1:]:
for new_chain, strace in new_mtrace._straces.items():
if new_chain in base_mtrace._straces:
raise ValueError("Chains are not unique.")
base_mtrace._straces[new_chain] = strace
return base_mtrace
|
d94bf6eec17445640a4ad3077e17380793d8233b
| 50,741
|
def get_resource_dict(package_id, resource_id, acl_xml):
"""
Derives a resource_dict dictionary from the supplied package ID,
resource ID, and access control XML values
"""
resource_dict = {"package_id" : package_id,
"resource_id" : resource_id,
"acl_xml" : acl_xml
}
return resource_dict
|
b0b375e136f66d0c1dfaeff3519ef92d82864832
| 50,746
|
def get_bags_contained(rules, color):
""" Counts the bags that are in the specified bag.
Solved with recursion.
:param rules: rules
:param color: color of bag
:return: int
"""
counter = 0
# if no bags contained return 0
if rules[color] is None:
return counter
# iterate over bags that are directly in the current bag
for bag, count in rules[color].items():
# add amount of that bag
counter += count
# add amount of that bag * bags in that bag
counter += count * get_bags_contained(rules, bag)
return counter
|
95c605f44a1178fd28be13206fdca33232418596
| 50,747
|
def walk_down(subgraph, node):
""" return all the nodes below node including node """
res = [node]
for edge in subgraph.out_edges(node.name):
res.extend(walk_down(subgraph, edge.to_node))
return res
|
4213c2dcb9d5b84ec77d9d7b923b9ffb54faf98d
| 50,752
|
def create_grid_string(dots, xsize, ysize):
"""
Creates a grid of size (xx, yy)
with the given positions of dots.
"""
grid = ""
for y in range(ysize):
for x in range(xsize):
grid += "." if (x, y) in dots else "#"
grid += "\n"
return grid
|
b1653b8e710edfe49b3d54e113b6709e51fb92b7
| 50,755
|
def dni_to_value(scale_value):
"""
This method will transform a string value from the DNI scale to its
confidence integer representation.
The scale for this confidence representation is the following:
.. list-table:: DNI Scale to STIX Confidence
:header-rows: 1
* - DNI Scale
- STIX Confidence Value
* - Almost No Chance / Remote
- 5
* - Very Unlikely / Highly Improbable
- 15
* - Unlikely / Improbable
- 30
* - Roughly Even Chance / Roughly Even Odds
- 50
* - Likely / Probable
- 70
* - Very Likely / Highly Probable
- 85
* - Almost Certain / Nearly Certain
- 95
Args:
scale_value (str): A string value from the scale. Accepted strings are
"Almost No Chance / Remote", "Very Unlikely / Highly Improbable",
"Unlikely / Improbable", "Roughly Even Chance / Roughly Even Odds",
"Likely / Probable", "Very Likely / Highly Probable" and
"Almost Certain / Nearly Certain". Argument is case sensitive.
Returns:
int: The numerical representation corresponding to values in the DNI
scale.
Raises:
ValueError: If `scale_value` is not within the accepted strings.
"""
if scale_value == 'Almost No Chance / Remote':
return 5
elif scale_value == 'Very Unlikely / Highly Improbable':
return 15
elif scale_value == 'Unlikely / Improbable':
return 30
elif scale_value == 'Roughly Even Chance / Roughly Even Odds':
return 50
elif scale_value == 'Likely / Probable':
return 70
elif scale_value == 'Very Likely / Highly Probable':
return 85
elif scale_value == 'Almost Certain / Nearly Certain':
return 95
else:
raise ValueError("STIX Confidence value cannot be determined for %s" % scale_value)
|
13b80d6b0c12fbc36d59db9d6626cf747beca627
| 50,756
|
from typing import Sequence
from typing import Generator
def dictionize(fields: Sequence, records: Sequence) -> Generator:
"""Create dictionaries mapping fields to record data."""
return (dict(zip(fields, rec)) for rec in records)
|
ca179ef5d03e8d5860c36872a61405b6a33ad463
| 50,758
|
import warnings
def ratio(class_label, data):
"""Compute ratio of all data that is `class_label`."""
try:
return float(data.count(class_label)) / len(data)
except ZeroDivisionError:
warnings.warn("data {} is empty".format(id(data)))
return 0.
|
7742804261d3aca98dd9ba19e083fd5c158310b9
| 50,760
|
def _cast_to_int(integer_string):
"""Cast a string to a integer."""
ret = int(integer_string)
if ret < -1:
raise ValueError()
return ret
|
3eb4796f19467f95030ffdc8eb18eb7489329e98
| 50,765
|
def _add_system(query, data):
"""Add data from successful system MIB query to original data provided.
Args:
query: MIB query object
data: Three keyed dict of data
Returns:
data: Aggregated data
"""
# Process query
result = query.system()
# Add tag
for primary in result.keys():
for secondary in result[primary].keys():
for tertiary, value in result[primary][secondary].items():
data[primary][secondary][tertiary] = value
# Return
return data
|
cb37d05ac484f9a8f57b7a61afd9a40a971a23d0
| 50,770
|
import re
def unflat(data, separator="_", lseparator=('[', ']')):
"""
Unflat the dict
Args:
data (dict): the dict to unflat. Must be a key/value dict.
separator (:obj:`str`, optional): key separator.
Returns:
dict: Unflatted dict
"""
unflat_dict = {}
for k in sorted(data.keys()):
context = unflat_dict
for sub_key in k.split(separator)[:-1]:
if sub_key not in context:
context[sub_key] = {}
context = context[sub_key]
key = k.split(separator)[-1]
regex = r'(\w*)\{0}(\d)\{1}'.format(lseparator[0], lseparator[1])
match = re.match(regex, key)
if match:
lkey = match.group(1)
lpos = int(match.group(2))
if not lkey in context:
context[lkey] = []
context[lkey].insert(lpos, data[k])
else:
context[key] = data[k]
return unflat_dict
|
dd39961dca2ad7052c72e46cbd313ca6b45ac7f4
| 50,773
|
def load_org2_gateway(network_config):
""" Loads the `org2_gw` Gateway """
return network_config.get_gateway('org2_gw')
|
55f1770759f71a1ab730689920e17eec549cd95f
| 50,782
|
def int_addr(addr):
"""Gets the integer representation of an address"""
return int(addr[1:])
|
3f00ae151bff20516fbaddda73feb147aa1c8424
| 50,783
|
def val_default(val, default=True):
"""Value or default (for attributes)
"""
if val is None:
return default
return val
|
6a7bb5a475428d36ad046b4e7a7099f2573eae76
| 50,784
|
def cs_water_Tp(T=298, p=1):
"""
Calculates chemical shift of water based on Temparature and pressure (according to Maciej).
"""
# according to Maciej
cs = 5.945e-7*p*T -1.612e-4*p -1.025e-2*T + 7.866
return(cs)
|
de450a97255cd245ae02f2219bddc0b5f201bfec
| 50,793
|
from typing import Tuple
from typing import List
def read_network(file: str) -> Tuple[int, int, List[int]]:
"""
Read a Boolean network from a text file:
Line 1: number of state variables
Line 2: number of control inputs
Line 3: transition matrix of the network (linear representation of a logical matrix)
:param file: a text file
:return: (n, m, L), where
n: number of state variables
m: number of control inputs
L: network transition matrix
"""
with open(file, 'r') as f:
n = int(f.readline().strip())
m = int(f.readline().strip())
N = 2 ** n
M = 2 ** m
line = f.readline().strip()
assert line, f'network transition matrix must be provided!'
numbers = line.split()
assert len(numbers) == M * N, f'The transition matrix must have {M * N} columns'
L = [int(num) for num in numbers]
for i in L:
assert 1 <= i <= N, f'All integers in the network transition matrix must be in range [1, {N}]'
return n, m, L
|
16ef7751cbd4af000ed33de7b065273225e35d76
| 50,798
|
def remove_common_molecules(reactants, products):
"""
Removes common species between two lists leaving only reacting species.
Parameters
----------
reactants, products : list of str
List containing strings all molecular species.
Returns
-------
tuple of str
Reduced lists for both reactants and products such that only species
that participate in the reaction remain.
"""
reduced_react = reactants.copy()
reduced_prod = products.copy()
reduced_react.sort()
reduced_prod.sort()
if reduced_react == reduced_prod:
raise Exception("Reactants and products are the same.")
else:
pass
for mol in reactants:
if mol in reduced_prod:
reduced_prod.remove(mol)
reduced_react.remove(mol)
return (reduced_react, reduced_prod)
|
333b29afdd251bc6ad80910902fa7cb1433e493c
| 50,805
|
import random
def random_string(length=6, password_safe=False):
"""Returns a random string with `length` characters of a-z, A-Z
and the digits 0-9.
:param length: number of characters to randomize (default 6 characters).
:password_safe: set to `True` to exclude o, O and 0.
"""
if password_safe:
CHOICES = 'aA1bB2cC3dD4eE5fF6gG7hH8iI9jJkKlLmMnNpPqQrRsStTuUvVwWxXyYzZ'
else:
CHOICES = '0aA1bB2cC3dD4eE5fF6gG7hH8iI9jJkKlLmMnNoOpPqQrRsStTuUvVwW' \
'xXyYzZ'
return ''.join(random.choice(CHOICES) for i in range(length))
|
7d38429fdf0dd11290077558f7eb598d4a43d01e
| 50,806
|
def _FindAndRemoveArgWithValue(command_line, argname):
"""Given a command line as a list, remove and return the value of an option
that takes a value as a separate entry.
Modifies |command_line| in place.
"""
if argname not in command_line:
return ''
location = command_line.index(argname)
value = command_line[location + 1]
command_line[location:location + 2] = []
return value
|
07dc13e9ebfadd37ca0ff24de1407869b8667508
| 50,813
|
def rank(p1, p2):
"""
Ranks two paths
:param p1: First path object
:param p2: Second path object
:return: if p1 is ranked more than p2, then 1; if equal, then 0; else -1
"""
if len(p1.relationships) > len(p2.relationships):
return 1
elif len(p1.relationships) == len(p2.relationships):
return 0
else:
return -1
|
666a08834243c87458f28fca4936dcde67165935
| 50,818
|
import platform
def get_os_name() -> str:
"""Get operating system name that Node.js repos know about."""
return platform.system().lower()
|
b70c031670b27ddf2444d76c7495311a6f9e7eee
| 50,828
|
def get_options(options):
"""
Get options for dcc.Dropdown from a list of options
"""
opts = []
for opt in options:
opts.append({'label': opt.title(), 'value': opt})
return opts
|
61df747bf10a08aff481c509fbc736ef406d006f
| 50,832
|
def get_celery_worker_module_name(app_id):
""" Returns the python module name of the queue worker script.
Args:
app_id: The application ID.
Returns:
A string of the module name.
"""
return 'app___' + app_id
|
52b79ec69da4c064062fcfb5dcd7edb94a095809
| 50,835
|
def solution(l):
"""
Solution 4 again passes all but the last test case. Try to speed things
up some using a dynamic programming-like approach.
This solution wound up passing all of the test cases -- the key here is to
uses a memorization/dynamic programming approach. A core component of this
problem involves finding all multiples of a number after a given number in
the list. In the brute force approach, we do the following:
0: for each li:
1: for each lj such that j > i:
2: if li divides lj:
3: for each lk such that k > j:
4: if lj divides lk:
(li, lj, lk) is a valid solution
Note that steps 3 and 4 involve counting the number of valid values of lk
for a given lj. Since we are evaluating all possible values of lj for each
possible value of li, this means that we would potentially repeat steps 3
and 4 multiple times for the *same value of lj*.
Take the example:
l = [1, 1, 1, 1, 3]
In this case we would evaluate the number of valid lks for the final '1'
3 times. In the worst case, where l is of length N and consists of
all 1's, we would be finding the valid lks for the penultimate lj (N-2)
times.
To improve on this, we can cache/memorize the values as we compute them.
We'll store the smallest computation -- the number of possible values of lk
for a given lj. Then, as we traverse the list, if we have already
computed the values of lk for a given lj, we just use the value that we
previously computed. This touches on the concept of Dynamic Programming.
"""
# Make sure no numbers are less than 1 or greater than 999999
for li in l:
if li > 999999 or li < 1:
return 0
# Get number of elements in the list
n_l = len(l)
# If there are fewer than 3 elements in the list, then there
# can't be any lucky triples, so return 0
if n_l < 3 or n_l > 2000:
return 0
# Initialize counts -- d_cts[j] corresponds to the number of valid values
# of l[k] for l[j].
d_cts = [-1] * n_l
ctr = 0
# First iterate over i
for i in range(n_l-2):
for j in range(i+1, n_l-1):
if l[j] % l[i] == 0:
# Check to see if we already computed this
if d_cts[j] == -1:
# Count the number of valid divisors for l[j]
d_ctr = 0
for k in range(j+1, n_l):
if l[k] % l[j] == 0:
d_ctr += 1
d_cts[j] = d_ctr
# Add the pre-computed value
ctr += d_cts[j]
return ctr
|
0387a39e6d087f381aece01db2718a9e90ba642c
| 50,836
|
import torch
def fista_momentum(cur_Z, prev_Z, momentum):
"""
Calculates a linear combination of the last two sparse codings with a momentum term
:param cur_Z: Sparse code found in current step
:param prev_Z: Sparse code found in previous step
:param momentum: float. Momentum term.
:return: Updated sparse code to be used in next step.
"""
next_momentum = (1 + torch.sqrt(1+4*(momentum**2)))/2
momentum_ratio = (momentum - 1) / next_momentum
pushed_Z = (cur_Z - prev_Z) * momentum_ratio
next_Z = cur_Z + pushed_Z
return next_Z, next_momentum
|
0b8435cae2b006eb18604d71933d46a4c8e3c5d6
| 50,838
|
def blazar_find_old_host_alloc(db):
"""Find computehost allocations tied to expired leases"""
sql = '''\
SELECT ca.id, l.id AS lid, ch.hypervisor_hostname
FROM blazar.computehost_allocations ca
JOIN blazar.reservations r
ON ca.reservation_id=r.id
JOIN blazar.leases l
ON r.lease_id=l.id
JOIN blazar.computehosts ch
ON ca.compute_host_id=ch.id
WHERE ca.deleted is Null
AND l.end_date < curdate()
'''
return db.query(sql, limit=None)
|
717bcb2d788be24d8c1b96bab62d60c54e196a25
| 50,839
|
def connect_streets(st1, st2):
"""
Tells if streets `st1`, `st2` are connected.
@param st1 street 1
@param st2 street 2
@return tuple or tuple (0 or 1, 0 or 1)
Each tuple means:
* 0 or 1 mean first or last extremity or the first street
* 0 or 1 mean first or last extremity or the second street
``((0, 1),)`` means the first point of the first street is connected
to the second extremity of the second street.
"""
a1, b1 = st1[0], st1[-1]
a2, b2 = st2[0], st2[-1]
connect = []
if a1 == a2:
connect.append((0, 0))
if a1 == b2:
connect.append((0, 1))
if b1 == a2:
connect.append((1, 0))
if b1 == b2:
connect.append((1, 1))
return tuple(connect) if connect else None
|
77ee8f4c344277b09340cc3a38e16ee5ae11f702
| 50,842
|
def sum_of_n(n):
"""Sum all numbers from 0 to N, recursively."""
if n == 0:
return 0
return n + sum_of_n(n - 1)
|
cc804ad0fb745ebdc6badfb7d50c1d0fac2e7c5b
| 50,843
|
def get_nested_serializer(instance, key):
"""Returns the instance's nested serializer under the 'key' field with its data filled out
"""
serializer_class = instance._declared_fields.get(key).__class__
serializer = serializer_class(data=instance.validated_data[key])
serializer.is_valid(raise_exception=True)
return serializer
|
2423289a4455783880f83d0804f74d0ceded03ce
| 50,846
|
def _highlight(timing: float) -> str:
"""Highlights a timing based on whether it is under 60 fps."""
if timing < 1 / 60:
return f"[#57A773] {timing}"
return f"[red] {timing}"
|
e4481d0631394850d00900edec3acd5c7f5b0cd6
| 50,848
|
import itertools
def repeat(value, times=-1):
""":yaql:repeat
Returns collection with value repeated.
:signature: value.repeat(times => -1)
:receiverArg value: value to be repeated
:argType value: any
:arg times: how many times repeat value. -1 by default, which means that
returned value will be an iterator to the endless sequence of values
:argType times: int
:returnType: iterable
.. code::
yaql> 1.repeat(2)
[1, 1]
yaql> 1.repeat().take(3)
[1, 1, 1]
"""
if times < 0:
return itertools.repeat(value)
else:
return itertools.repeat(value, times)
|
1554022dbc709ff40e1a600cd892c8a07782c6e1
| 50,849
|
def solution(pence: int) -> int:
"""Returns the number of different ways to make X pence using any number of coins.
The solution is based on dynamic programming paradigm in a bottom-up fashion.
>>> solution(500)
6295434
>>> solution(200)
73682
>>> solution(50)
451
>>> solution(10)
11
"""
coins = [1, 2, 5, 10, 20, 50, 100, 200]
number_of_ways = [0] * (pence + 1)
number_of_ways[0] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(coin, pence + 1, 1):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
|
d7670650fe734b432bbfb55f13c545905fee3bce
| 50,850
|
def remove_nans(frame):
""" Remove rows with nan values
Args:
frame (pandas Series or Dataframe): column(s)
Returns:
frame: the same data structure without nans
"""
frame = frame.dropna()
return frame
|
b6829f06c5fa130f1c42590359e0dc8e15da4e7b
| 50,851
|
def filtered_dict(dictionary: dict, threshold, invert: bool = False):
"""
Removes all keys from a dictionary whose value is less than a given threshold
:param dictionary: The dictionary to filter
:param threshold: The threshold below which to remove elements
:param invert: Whether to invert the threshold (remove elements above threshold)
:return: The filtered dictionary
"""
return {key: value for (key, value) in dictionary.items() if (value < threshold) == invert}
|
de5687a625fd1d5dfa86e9fe06a8cd85b8a899d9
| 50,857
|
def dict_with_keys(dictionary, keys):
"""
Returns a new dictionary including only the specified keys
Args:
dictionary(dict): dictionary to filter keys
keys(iterable): iterable of keys to filter to
Returns:
dict: copy of original dictionary inclusive only of specified keys
"""
return {key: dictionary[key] for key in keys}
|
e93d547b515269c9e2320e41d15ab39ae363c209
| 50,862
|
def iscc_clean(i):
"""Remove leading scheme and dashes"""
return i.split(":")[-1].strip().replace("-", "")
|
66a6705d48528b7c182fc051c9793bae3506f0dc
| 50,864
|
def reverse_range(object):
"""Yields reverse range of object: list(reverse_range([1, 2, 3])) -> [2, 1, 0]."""
return range(len(object) - 1, -1, -1)
|
9bacca7d17e3ae5678b45c37eec1ea8d0fc78cf6
| 50,865
|
import threading
def start_agent_thread(cls, **kwargs):
"""Instantiate an agent class and run it in a new daemon thread.
Returns the thread object.
"""
agent = cls(**kwargs)
thread = threading.Thread(target=agent.run)
thread.daemon = True
thread.start()
return thread
|
2f7adfe81df347ee83932293b3026ab285c0b516
| 50,866
|
def combine_quality( dqarr1, dqarr2 ):
"""
Combines two data quality arrays to make a third.
The bitwise nature of the data quality flags means that two
arrays can be combined without needing to know the meaning
of the underlying flags.
:Parameters:
dqarr1: numpy array or None
numpy array which represents a dq plane (or part of it).
Can be None if there is no data quality information.
dqarr2: numpy array or None
numpy array which represents another dq plane (or part of it).
Can be None if there is no data quality information.
:Returns:
newdq: numpy array
numpy array containing a combination of both DQ arrays.
Can be None if both of the input arrays are None.
"""
# Check which of the arrays are defined.
if dqarr1 is not None and dqarr2 is not None:
# There are two quality arrays - merge them.
# The bitwise OR operation combines the effect of the flags
# without the need to know what they mean.
newdq = dqarr1 | dqarr2
elif dqarr1 is not None:
# Only array 1 is defined - return it.
newdq = dqarr1
elif dqarr2 is not None:
# Only array 2 is defined - return it.
newdq = dqarr2
else:
# Neither array is defined - return None.
newdq = None
return newdq
|
0d24c74309b761d054455b5d77061330d48e20e2
| 50,867
|
def list_to_string(a_list, quote=False):
"""
Converts a list to a set of strings separated by a comma.
:param a_list: The list to convert.
:type a_list: list
:param quote: If true, then surround each item with quotes.
:type quote: bool
:return: The string version of the list.
"""
if quote:
string = ', '.join(('"{0}"'.format(v) for v in a_list))
else:
string = ', '.join(('{0}'.format(v) for v in a_list))
return string
|
d7a517225e1a5a70320f05b0c7c69e3b5c558f4c
| 50,872
|
def argtopk_preprocess(a, idx):
""" Preparatory step for argtopk
Put data together with its original indices in a tuple.
"""
return a, idx
|
55e7566a5a88bf45a0eda76513a10528180dc8a9
| 50,876
|
import hashlib
def sha1_hasher(s):
""" A simple utility function for producing a sha1 digest of a string. """
return hashlib.sha1(s).digest()
|
a214bc5cc78e7398373f7dd7c50d1e3ef59d1f8f
| 50,879
|
def generate_stack_id(stack_name: str) -> str:
"""Generate a stack ID from the stack name"""
return (
f"arn:aws:cloudformation:ap-southeast-2:123456789012:stack/{stack_name}/"
"bd6129c0-de8c-11e9-9c70-0ac26335768c"
)
|
130749de4bf983b22a26aaac5412c55c5baf5e37
| 50,883
|
def _mock_kernel(x1, x2, history):
"""A kernel that memorizes its calls and encodes a fixed values for equal/unequal
datapoint pairs."""
history.append((x1, x2))
if x1 == x2:
return 1
else:
return 0.2
|
d6319a63b987a162d99d810aebabb737328c2d9e
| 50,884
|
import math
def rotate_point(centerPoint,point,angle):
"""Rotates a point around another centerPoint. Angle is in degrees.
Rotation is counter-clockwise"""
angle = math.radians(angle)
# print(centerPoint, point)
temp_point = point[0]-centerPoint[0] , point[1]-centerPoint[1]
temp_point = ( temp_point[0]*math.cos(angle)-temp_point[1]*math.sin(angle) , temp_point[0]*math.sin(angle)+temp_point[1]*math.cos(angle))
temp_point = int(temp_point[0]+centerPoint[0]) , int(temp_point[1]+centerPoint[1])
return temp_point
|
0c6635653d1b9dad1269bb69321cea3f859713a5
| 50,885
|
def read_var_int(handle):
"""
Read a variable-length integer from the given file handle.
"""
res = 0
shift = 1
while True:
byte = handle.read(1)[0]
res += (byte & 0x7f) * shift
if byte & 0x80: break
shift <<= 7
res += shift
return res
|
380a708925bf1d07fb0a81de073b559aed16eb0c
| 50,892
|
def cleanHTML(html, lowercase=False, removeNonAscii=False, cutoffFooter=False, descriptionOnly=False, replaceDict=None):
"""
Args:
html: html string
lowercase: boolean indicating if the html should be sent to lowercase
removeNonAscii: boolean indicating if the html should have non ascii characters removed
replaceDict: a dictionary where keys represent words to be replaced by their values
Returns:
The html with the following adjustments:
Replace following with spaces: ( ) /
send to lowercase
remove non ascii
"""
if lowercase:
html = html.lower()
if removeNonAscii:
html = ''.join([i if ord(i) < 128 else ' ' for i in html])
if replaceDict:
for word, replaceWord in replaceDict.items():
html = html.replace(word.lower(), replaceWord)
if cutoffFooter:
html = html.split("html = html.split(" ")")
html = html[0]
# if descriptionOnly:
# "jobsearch-JobComponent-description"
return html
|
6b6efa1855cd98ab7ae4f6a70c8abb1ca35705af
| 50,893
|
import re
def hump2underline(hump_str):
"""
Convert the hump form string to an underscore
:param str hump_str: hump form string
:return: All lowercase underlined string of letters
:rtype: str
"""
patt = re.compile(r'([a-z]|\d)([A-Z])')
underline_str = re.sub(patt, r'\1_\2', hump_str).lower()
return underline_str
|
624ccb5538743d0706619b3768f301bf276b8cc2
| 50,894
|
def get_spawning_profile_list(intersection):
"""
Returns the spawning profile list of the intersection
:param intersection: intersection
:type intersection: Intersection
:return: The spawning profile list of the intersection
"""
return intersection.get_spawning_profile_list()
|
1239299901bb8109d264a948d1d47d8d02ff19b9
| 50,896
|
def process_fields(tag):
"""
Process the 'field' element of a tag dictionary.
Process the fields string - a comma-separated string of "key-value" pairs
- by generating key-value pairs and appending them to the tag dictionary.
Also append a list of keys for said pairs.
:param tag: dict containing a tag
:returns: dict containing the key-value pairs from the field element, plus
a list of keys for said pairs
"""
fields = tag.get('fields')
if not fields: # do nothing
return {}
# split the fields string into a dictionary of key-value pairs
result = dict(f.split(':', 1) for f in fields.split('\t'))
# append all keys to the dictionary
result['field_keys'] = sorted(result.keys())
return result
|
41e7e958fe350135559673ae7877ffb1f254caee
| 50,897
|
def join_with_function(func, values1, values2):
"""Join values using func function."""
return [
func(value1, value2)
for value1, value2 in zip(values1, values2)
]
|
6548c0777c14d2e02a2fe3579e0cc368a9729a32
| 50,898
|
def A_int(freqs,delt):
"""Calculates the Intermediate Amplitude
Parameters
----------
freqs : array
The frequencies in Natural units (Mf, G=c=1) of the waveform
delt : array
Coefficient solutions to match the inspiral to the merger-ringdown portion of the waveform
"""
return (delt[0]+delt[1]*freqs+delt[2]*freqs**2+delt[3]*freqs**3+delt[4]*freqs**4)
|
8fcdb60271e5938faad7f89d3d08bc78281a88f6
| 50,904
|
import math
def tangent_points_to_circle_xy(circle, point):
"""Calculates the tangent points on a circle in the XY plane.
Parameters
----------
circle : tuple
center, radius of the circle in the xy plane.
point : tuple
XY(Z) coordinates of a point in the xy plane.
Returns
-------
points : list of tuples
the tangent points on the circle
Examples
--------
>>> circle = (0, 0, 0), 1.
>>> point = (2, 4, 0)
>>> t1, t2 = tangent_points_to_circle_xy(circle, point)
>>> Point(*t1), Point(*t2)
(Point(-0.772, 0.636, 0.000), Point(0.972, -0.236, 0.000))
"""
m, r = circle[0], circle[1]
cx, cy = m[0], m[1]
px = point[0] - cx
py = point[1] - cy
a1 = r*(px*r - py*math.sqrt(px**2 + py**2 - r**2))/(px**2 + py**2)
a2 = r*(px*r + py*math.sqrt(px**2 + py**2 - r**2))/(px**2 + py**2)
b1 = (r**2 - px*a1)/py
b2 = (r**2 - px*a2)/py
p1 = (a1 + cx, b1 + cy, 0)
p2 = (a2 + cx, b2 + cy, 0)
return p1, p2
|
a5d09b4622a043ea12330c8516e6d7f3fc591aa2
| 50,907
|
import re
def ends_in_file(path):
""" Return True when path ends with '.%ext' or '%fn' """
_RE_ENDEXT = re.compile(r"\.%ext[{}]*$", re.I)
_RE_ENDFN = re.compile(r"%fn[{}]*$", re.I)
return bool(_RE_ENDEXT.search(path) or _RE_ENDFN.search(path))
|
b7087c407a474e9705aebe487a73a2daad124599
| 50,908
|
import re
def str2float(text):
"""
Remove uncertainty brackets from strings and return the float.
"""
try:
return float(re.sub("\(.+\)", "", text))
except TypeError:
if isinstance(text, list) and len(text) == 1:
return float(re.sub("\(.+\)", "", text[0]))
|
d97443664e1beb3535d9b1a162a19db2f3e6ed17
| 50,909
|
from pathlib import Path
def prov_data_paths(data_dir: Path) -> dict:
"""Build a dictionary to map a province to it's GeoPackage file."""
paths = {}
for item in data_dir.rglob('*NRN_*.gpkg'):
parts = item.name.split('_')
prcode = parts[1]
major = parts[2]
minor = parts[3]
if '.' in minor:
minor = minor.split('.')[0]
paths[prcode] = {'path': item, 'major': major, 'minor': minor}
return paths
|
db1e1e7024f8f2138b93c79811da230858246ea3
| 50,912
|
import difflib
def get_best_match(texts, match_against, ignore=' ', treshold=0.9):
"""Get the best matching from texts, none if treshold is not reached
texts: list of texts to choose from
match_against: text wanted
ignore: junk characters (set eg to "_ ")
treshold: best match must be at least this value.
"""
# JUNK = space _
# now time to figre out the matching
ratio_calc = difflib.SequenceMatcher(lambda x: x in ignore)
ratio_calc.set_seq1(match_against)
ratios = {}
best_ratio = 0
best_text = ''
for text in texts:
# set up the SequenceMatcher with other text
ratio_calc.set_seq2(text)
# calculate ratio and store it
ratios[text] = ratio_calc.ratio()
# if this is the best so far then update best stats
if ratios[text] > best_ratio:
best_ratio = ratios[text]
best_text = text
if best_ratio > treshold:
return best_text
|
74fffbd7ed3f0a90594bf61604fa25962d72cc49
| 50,921
|
from datetime import datetime
import pytz
def get_date(prompt: str, timezone: str) -> datetime:
""" Obtains a date from user input. """
date_str = input(f'Enter date of {prompt} (yy-mm-dd hh:mm): ')
date = datetime.strptime(date_str, "%y-%m-%d %H:%M")
print(f'The date you entered is: {date}')
return date.replace(tzinfo=pytz.timezone(timezone))
|
26dca58b6cb4edc3fd61032ed931aa3963efc63b
| 50,923
|
def tmp_config(transformations=None, remove_transactions=None, custom_category_map=None):
"""Helper to easily initialize a config"""
return {
"settings": dict(
transformations=transformations,
remove_transactions=remove_transactions,
custom_category_map=custom_category_map,
)
}
|
e62d0187c0dd5b8b62441e8c8adf4a7d948aacc5
| 50,930
|
import math
def pseudo_root_search_mathias(eos, r, temp, a_mix, b_mix,
desired_phase,
kappa,
search_iterations=10):
"""
Solves the Mathias constraint given by d_press_d_rho - 0.1*r*temp == 0
for SRK and PR equations of state. This method is technically independent of the EOS
provided the relevant derivatives exist.
Input parameters are the eos, gas_constant as r, temp, a_mix and b_mix which are
from the eos mixture calculation.
The desired phase is either 'vap' or 'liq' indicated a vapor like phase or a liquid like phase
to be returned. This setting also determines the interval for the root search
search iterations is how long newton search continues before terminating, default is 10.
Search should conclude in no more than 4-5 iterations.
"""
# 0 is SRK
# 1 is PR
# Bounds for the rho for a given eos
# This is method is independent of the actual EOS,
# But need rho_lo, rho_hi, rho_mc, temp_mc and dpress_drho and d2press_drho2 for each each of equation
# Only SRK and PR are implemented for now.
# Only the rho changes for these equations, so no new mixing terms need to be calculated
# Should converge very quickly
SQRT_2 = math.sqrt(2)
converged = False
if eos == 0:
u, w = 1, 0
rho_lo, rho_hi = -1 / b_mix, 1 / b_mix
# From original Mathias paper
rho_mc = 0.25599 / b_mix
temp_mc = 0.20268 * a_mix / (r * b_mix)
elif eos == 1:
u, w = 2, -1
rho_lo, rho_hi = (1 - SQRT_2) / b_mix, 1 / b_mix
# From Watson
rho_mc = 0.25308 / b_mix
temp_mc = 0.17014 * a_mix / (r * b_mix)
else:
return False, -1, -1, -1, -1, -1
if desired_phase == 'vap':
rho_interval_lo, rho_interval_hi = rho_lo, kappa * rho_mc
elif desired_phase == 'liq':
rho_interval_lo, rho_interval_hi = rho_mc, rho_hi
else:
return False, -1, -1, -1, -1, -1
scaling = 1 / (r * temp)
# scaling = 1
if desired_phase == 'liq':
# initial_estimate - given by Mathias
rho_test = rho_hi - 0.4 * (rho_hi - rho_mc)
else:
rho_test = (rho_interval_lo + rho_interval_hi) * 0.5
# rho_test = rho_hi - 0.4*(rho_hi - rho_mc)
for j in range(search_iterations):
# EOS in terms of rho (which 1/vol)
# press = r*temp/(-b_mix + 1/rho) - a_mix/(w*b_mix**2 + u*b_mix/rho + rho**(-2))
# Derivative of the EOS in terms of rho_test
d_press_d_rho = r * temp / (rho_test ** 2 * (-b_mix + 1 / rho_test) ** 2) - (
u * b_mix / rho_test ** 2 + 2 / rho_test ** 3) * a_mix / (
w * b_mix ** 2 + u * b_mix / rho_test + rho_test ** (-2)) ** 2
f = (d_press_d_rho - 0.1 * r * temp)
f *= scaling
if f < 1e-6:
converged = True
break
# 2nd Derivative of the EOS in terms of rho root
d2_press_d_rho_2 = 2 * (
-r * temp / (b_mix - 1 / rho_test) ** 2 - r * temp / (rho_test * (b_mix - 1 / rho_test) ** 3) + (
u * b_mix + 3 / rho_test) * a_mix / (
w * b_mix ** 2 + u * b_mix / rho_test + rho_test ** (-2)) ** 2 - (
u * b_mix + 2 / rho_test) ** 2 * a_mix / (
rho_test * (w * b_mix ** 2 + u * b_mix / rho_test + rho_test ** (-2)) ** 3)) / rho_test ** 3
d2_press_d_rho_2 *= scaling
df_drho = d2_press_d_rho_2
rho_test_new = rho_test - f / df_drho
if rho_test_new < rho_interval_lo:
rho_test_new = (rho_test + rho_interval_lo) / 2
elif rho_test_new > rho_interval_hi:
rho_test_new = (rho_test + rho_interval_hi) / 2
rho_test = rho_test_new
# if not converged:
# print('press_rho did not converge')
return converged, rho_mc, rho_lo, rho_hi, rho_test, temp_mc
|
d2a536dbe3b570492fb523f042098576187942f0
| 50,934
|
def _mocked_handle_column(mocked_handle_column):
"""Alias of mocked_handle_column to suppress unused argument."""
return mocked_handle_column
|
292017b0d6ef76b1ac657390c20da21021f34746
| 50,950
|
def dim(s,i):
"""Dimension of the slice list for index i."""
return s[i].stop-s[i].start
|
9ed12f89f5a75a84bd92bb903c6d4ff018e89aaa
| 50,952
|
def get_value_using_path(obj, path):
"""Get the attribute value using the XMLpath-like path specification.
Return any attribute stored in the nested object and list hierarchy using
the 'path' where path consists of:
keys (selectors)
indexes (in case of arrays)
separated by slash, ie. "key1/0/key_x".
Usage:
get_value_using_path({"x" : {"y" : "z"}}, "x")) -> {"y" : "z"}
get_value_using_path({"x" : {"y" : "z"}}, "x/y")) -> "z"
get_value_using_path(["x", "y", "z"], "0")) -> "x"
get_value_using_path(["x", "y", "z"], "1")) -> "y"
get_value_using_path({"key1" : ["x", "y", "z"],
"key2" : ["a", "b", "c", "d"]}, "key1/1")) -> "y"
get_value_using_path({"key1" : ["x", "y", "z"],
"key2" : ["a", "b", "c", "d"]}, "key2/1")) -> "b"
"""
keys = path.split("/")
for key in keys:
if key.isdigit():
obj = obj[int(key)]
else:
obj = obj[key]
return obj
|
92a1d0069a0651151332fd890901b603b242064d
| 50,954
|
def build_http_request(method: str, path: str, host: str, extra_headers=[], body: str = "") -> str:
"""
Returns a valid HTTP request from the given parameters.
Parameters:
- `method` - valid HTTP methods (e.g. "POST" or "GET")
- `path` - the path part of a URL (e.g. "/" or "/index.html")
- `host` - the host of the endpoint (e.g. "google.com" or "ualberta.ca")
- `extra_headers` - an optional list of strings to be included as part
of the request headers (e.g. ["Content-Type": "application/json"])
- `body` - the optional body of the request (if any)
Returns:
A string representation of a valid HTTP request
"""
status_line = f"{method} {path} HTTP/1.1"
headers = [
f"Host: {host}",
"Connection: close",
"User-Agent: sumitro-client/1.0"
]
if len(extra_headers) > 0:
headers.extend(extra_headers)
payload = "\r\n"
if len(body) > 0 or method == "POST":
payload += body
headers.append(f"Content-Length: {len(body)}")
request_body = "\r\n".join([status_line, "\r\n".join(headers), payload])
return request_body
|
4cc0bf8f5ed73788eb3e7a240bbc73f0a5a73d24
| 50,956
|
def filter_keys(func, a_dict):
"""Return a copy of adict with only entries where the func(key) is True.
Equivalent to the following in Python 3:
{k:v for (k, v) in a_dict.items() if func(k)}
"""
return dict((k, v) for (k, v) in a_dict.items() if func(k))
|
b06ec1803c6176e313fd1568df7fd9199f846554
| 50,958
|
def get_unused_var_name(prefix, var_table):
""" Get a new var name with a given prefix and
make sure it is unused in the given variable table.
"""
cur = 0
while True:
var = prefix + str(cur)
if var not in var_table:
return var
cur += 1
|
bdec5b16d3fc018e8c8efa13794fe55f71ac3702
| 50,959
|
import time
def convert_readable_time(longTime):
"""
Convert date and output into human readable format.
2018-01-03 --> Jan 03, 2018
"""
try:
t = time.strptime(longTime, "%Y-%m-%d")
except ValueError:
return longTime
ret = time.strftime("%b %d, %Y", t)
return ret
|
4cfa34da1eb131823a8019dbd0023d64f44d9f05
| 50,961
|
import re
def extract_pull_request_number(commit_logs):
"""Extract first occurrence of pull request number from commit logs.
:param str commit_logs: oneline commit logs
:return: pull request number
:rtype: str
"""
m = re.search(r'pull request #(\d+)', commit_logs)
if not m:
raise Exception(
'cannot detect pull request number from\n{}'.format(commit_logs))
return m.group(1)
|
3d2be6f8610971c0d1f7fdc189f272f32b9716f7
| 50,963
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.