content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def parse_word_expression(expr):
"""
Parses a word expression such as "thermoelectric - PbTe + LiFePO4" into positive and negative words
:param expr: a string expression, with " +" and " -" strings separating the words in the expression.
:return: Returns a tuple of lists (positive, negative)
"""
last_word, i, is_positive = "", 0, True
positive, negative = [], []
while i < len(expr):
if expr[i:i+2] != " +" and expr[i:i+2] != " -":
last_word += expr[i]
else:
if last_word.strip():
positive.append(last_word.strip()) if is_positive else negative.append(last_word.strip())
is_positive, last_word, i = expr[i:i+2] == " +", "", i + 1
i += 1
if last_word.strip():
positive.append(last_word.strip()) if is_positive else negative.append(last_word.strip())
return positive, negative
|
9872e05f7ce86f2973efec8c5e0d5306d718419a
| 46,162
|
import random
import string
def rand_string(count=12):
"""Return random string of length count with letters and numbers, mixed case. Uses Python randomness."""
return ''.join(random.choice(string.ascii_letters + string.digits) for x in range(count))
|
bc800528c6b45a7cc468ec2727ee1ce00136238d
| 46,164
|
def setBits( lst ):
"""
set the bits in lst to 1.
also set bits 18, 20, 22, 24, and 28 to one (since they should always be set)
all other bits will be 0
"""
res = 0
for b in lst + [18,20,22,24,28]:
res |= (1 << b)
return res
|
645509278a7f974c6da163935b3ee3e2e3724a06
| 46,168
|
from typing import List
from typing import Callable
def create_matcher(queries: List[str], query_type: str) -> Callable[[str], bool]:
"""
Create a matcher for a list of queries
Parameters
----------
queries : list
List of queries
query_type: str
Type of query to run: ["or"|"and"]
Returns
-------
Matcher function
"""
queries = [query.lower() for query in queries]
if query_type == "or":
return lambda x: any((q in x.lower() for q in queries))
else:
return lambda x: all((q in x.lower() for q in queries))
|
cc7cb37cab30728a70ac03446ed985ed0f71b9fc
| 46,174
|
def reverseSentence(s):
"""i am a student. -> student. a am i"""
s = s[::-1]
sNums = s.split(' ')
for i in range(len(sNums)):
sNums[i] = sNums[i][::-1]
return " ".join(sNums)
|
25e74f96182d94dd9031627e46d0a89235ffb796
| 46,175
|
def remove_suffix(s: str, suffix: str) -> str:
"""Remove the suffix from the string. I.e., str.removesuffix in Python 3.9."""
# suffix="" should not call s[:-0]
return s[: -len(suffix)] if suffix and s.endswith(suffix) else s
|
17474d37726249dc84aa89f0861fe43db43bf1e9
| 46,177
|
import re
def split_camel_cased(text: str) -> str:
"""Split camelCased elements with a space.
Arguments:
text:
The text to be processed.
Returns:
The text with all camelCased elements split into different elements.
"""
return re.sub("(?!^)([A-Z][a-z]+)", r" \1", text)
|
4ecc9e30bd50fb9898c23955917c953b3827b9a3
| 46,178
|
import math
def triangle(v):
"""
Return a value corresponding to a triangle wave
"""
return (2 * abs(v - math.floor(v + (1/2)))) - 1
|
85b5824a5686b56a0ab2455d8aaba60ee0c45d6d
| 46,182
|
def filter_tagged_vocabulary(tagged_vocabulary, vocabulary, split="|"):
"""Filters tagged_vocabulary (tokens merged with tags) for tokens
occurring in vocabulary.
Parameters
----------
tagged_vocabulary : collection
vocabulary of tokens (can be merged with tags)
vocabulary : collection
target vocabulary of tokens without tags
split : str
string delimiting tags and tokens in tagged_vocabulary
"""
targets = set()
for tagged_word in tagged_vocabulary:
word, *tag = tagged_word.split(split)
if word in vocabulary:
targets.add(tagged_word)
return targets
|
a84ded5b44db2a4075591fd56847dc9529eefc7a
| 46,184
|
def _f(X, y, clf):
"""Returns the flattened coefficients of a fitted classifier.
This function exists at the module level instead of as an anonymous or
subordinate function so that it is importable by `multiprocessing`.
Parameters
----------
X : array
Design matrix.
y : array
Response vector.
clf : sklearn.base.BaseEstimator
A scikit-learn estimator with `fit` and `predict` methods and a
`coef_` attribute.
Returns
-------
coef : array
The flattened coefficients of the fitted classifier.
"""
return clf.fit(X, y).coef_.flatten()
|
209d60f77168d39b541e3b091a8d342ed8c7fead
| 46,185
|
def load_languages(language, languages_dict):
"""loads the language from the string-dict loaded from the config
Args:
language (str): selectbox choice for the language model from the user
languages_dict (dict): dict containing to-evaluate strings of the language
Returns:
tfhub language or transformers language or whatlies language: language model to encode texts
"""
return eval(languages_dict[language])
|
3f7d945ab5154a44249755a067fc53d7ff7c4d9f
| 46,187
|
def inputthis(question='-> ', expected_tuple=('Y', 'N'), error='ERRO! Resposta inesperada!'):
"""
:param question: Input text
:param expected_tuple: Tuple containing all the options from wich the user should choose from
:param error: Error message for when the input isn't cointained in the tuple
:return: The user's answer
"""
while True:
x = str(input(question)).strip()
if x in expected_tuple:
return x
else:
print(error, '\n')
|
36c3d47f0ba0c73d12a1323241e1d6173aecd621
| 46,189
|
def _find_match(needle: dict, haystack: list, keys: list):
"""Find a dictionary in a list of dictionary based on a set of keys"""
for item in haystack:
for key in keys:
if item.get(key) != needle[key]:
break
else:
return item
return None
|
4cc009583cd3238bba3b4e3da94257229ee37894
| 46,194
|
def get_cached_stickers(context, fuzzy=False):
"""Return cached search results from a previous inline query request."""
query_id = context.inline_query_id
cache = context.tg_context.bot_data[query_id]
if fuzzy:
results = cache["fuzzy"]
offset = context.fuzzy_offset
else:
results = cache["strict"]
offset = context.offset
return results[offset : offset + 50]
|
c3399ac727d4f412f35b4442390c10abc7eb1ad5
| 46,199
|
from typing import List
from typing import Tuple
def find_two_smallest(L:List[float]) -> Tuple[int, int]:
""" (see above) """
# Find the index of the minimum and remove that item
smallest = min(L)
min1 = L.index(smallest)
L.remove(smallest)
# Find the index of the new minimum item in the list
next_smallest = min(L)
min2 = L.index(next_smallest)
# Put smallest back into L
L.insert(min1, smallest)
# Fix min2 in case it was affected by the removal and reinsertion:
if min1 <= min2:
min2 +=1
return (min1, min2)
|
6847fc028d01fa5539b3fd4a3e8de416089150ab
| 46,201
|
def get_attribute(name):
""" Gets the value of the element's attribute named ``name``. """
def evaluator(element):
return element.attrib.get(name)
return evaluator
|
8ffc06ae088f09284509d3a47ef85f32175385f3
| 46,202
|
from typing import Counter
def unique_words_by_tag(tokens, tag_value='N/A'):
"""Return a counter of the tokens with the given tag value."""
nouns = []
for word, tag in tokens:
if tag.startswith(tag_value) or tag_value == 'N/A':
nouns.append(word.lower())
return Counter(nouns)
|
b56dc9dbcdcb25b8d81fe2ebc79784008d848d23
| 46,204
|
def total_per_person(single_plan):
"""Returns total cost per person
Parameter
single_plan: single plan from database
Returns: total cost per person"""
costs = single_plan.cost_set.all()
if costs:
total_cost = 0
for cost in costs:
cost_per_person = round(cost.cost / cost.number_of_members, 2)
total_cost += cost_per_person
return total_cost
else:
return 0
|
0019267cfa754e4a64631b812f026fcd8534f8b8
| 46,205
|
def writeable(doc):
"""
Return a writeable tuple for doc.
writeable tuple is any 2-tuple of `output_path`, `bytes`.
`lettersmith.write` knows how to write these tuples to disk.
"""
return doc.output_path, doc.content.encode()
|
186580eee94dc537968b3c6edac1f5a755f858ed
| 46,207
|
def fsm_submit_button(transition):
"""
Render a submit button that requests an fsm state transition for a
single state.
"""
fsm_field_name, button_value, transition_name = transition
return {
'button_value': button_value,
'fsm_field_name': fsm_field_name,
'transition_name': transition_name,
}
|
e4fedef1a489dd96e573027639f0eec2cbf13711
| 46,209
|
def pointOnTrianglePlane(P, A, B, C):
"""
Projects the point P onto the plane containing traingle A, B, C
"""
N = (B - A).cross(C - A).normalize()
return P + (-(P - A).dot(N))*N
|
c70c02c31905208fa8a0942df7a12d33316f0afa
| 46,212
|
from typing import Dict
from typing import Tuple
from typing import List
from typing import Any
def _traverse_foreign_key_tree(
tree: Dict[str, Dict[Tuple[str, ...], dict]], name: str, fields: Tuple[str, ...]
) -> List[Dict[str, Any]]:
"""Traverse foreign key tree.
Args:
tree: Foreign key tree (see :func:`_build_foreign_key_tree`).
name: Local resource name.
fields: Local resource fields.
Returns:
Sequence of foreign keys starting from `name` and `fields`:
* `fields` (List[str]): Local fields.
* `reference['resource']` (str): Reference resource name.
* `reference['fields']` (List[str]): Reference primary key fields.
"""
keys = []
if name not in tree or fields not in tree[name]:
return keys
ref = tree[name][fields]
keys.append({"fields": list(fields), "reference": ref})
if ref["resource"] not in tree:
return keys
for next_fields in tree[ref["resource"]]:
if set(next_fields) <= set(ref["fields"]):
for key in _traverse_foreign_key_tree(tree, ref["resource"], next_fields):
mapped_fields = [
fields[ref["fields"].index(field)] for field in key["fields"]
]
keys.append({"fields": mapped_fields, "reference": key["reference"]})
return keys
|
37bafd03fed849031939159fab03bf5708ebffb7
| 46,219
|
from typing import List
from typing import Dict
def generate_currency_endowments(
agent_addresses: List[str], currency_ids: List[str], money_endowment: int
) -> Dict[str, Dict[str, int]]:
"""
Compute the initial money amounts for each agent.
:param agent_addresses: addresses of the agents.
:param currency_ids: the currency ids.
:param money_endowment: money endowment per agent.
:return: the nested dict of currency endowments
"""
currency_endowment = {currency_id: money_endowment for currency_id in currency_ids}
return {agent_addr: currency_endowment for agent_addr in agent_addresses}
|
0dbf36a09c88eb3cdb1278862bdb37b2927fcec0
| 46,220
|
def sort_list_by_other(to_sort, other, reverse=True):
"""Sort a list by an other."""
return [el for _, el in sorted(zip(other, to_sort), reverse=reverse)]
|
9672bd28349fe0f3cc0d8d122f59724965d53f35
| 46,224
|
def update_analyze_button(disabled):
"""
Updates the color of the analyze button depending on
its disabled status
:param disabled: if the button is disabled
"""
if not disabled:
style = {"width": "100%", "text-transform": "uppercase",
"font-weight": "700", "background": "green", "outline": "green"}
else:
style = {"width": "100%", "text-transform": "uppercase",
"font-weight": "700"}
return style
|
5a0056879fd5ecde05b7b4fbadbeb7e3aeb1d679
| 46,228
|
def format_id(ident):
"""
Convert a message ID to its canonical string form
"""
return '%016X' % ident
|
633ce965ca74ff3dac2b7bf9910186c88e892111
| 46,232
|
def read_classes(classes_path):
"""Reads classes from file.
Args:
classes_path (str):
Path to file containing names of all classes.
Returns:
list: List containing names of all classes.
"""
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
|
fcfcd3f7391e096fd71a7091eb33e4a9c8a21581
| 46,235
|
import torch
def generate_sparse_one_hot(num_ents, dtype=torch.float32):
""" Creates a two-dimensional sparse tensor with ones along the diagnoal as one-hot encoding. """
diag_size = num_ents
diag_range = list(range(num_ents))
diag_range = torch.tensor(diag_range)
return torch.sparse_coo_tensor(
indices=torch.vstack([diag_range, diag_range]),
values=torch.ones(diag_size, dtype=dtype),
size=(diag_size, diag_size))
|
3a71c72acfca4fe9fbfe1a6848519e9726fe72d9
| 46,237
|
def split_string_by_fields(string, fields):
"""
Helper function to split a string by a set of ordered strings,
primarily used for Maccor metadata parsing.
>>>split_string_by_fields("first name: Joey last name Montoya",
>>> ["first name:", "last name"])
["Joey", "Montoya"]
Args:
string (str): string input to be split
fields (list): list of fields to split input string by.
Returns:
list: substrings corresponding to the split input strings.
"""
# A bit brittle, there's probably something more clever with recursion
substrings = []
init, leftovers = string.split(fields[0])
for field in fields[1:]:
init, leftovers = leftovers.split(field)
substrings.append(init)
substrings.append(leftovers)
return substrings
|
7722f5fd80a581a113c18495d2ad21d1af41748d
| 46,250
|
def footer(id1, id2 = None):
"""
Build SMT formula footer
Args:
id1 (str): ID of policy 1 in SMT formula
id2 (str, optional): ID of policy 2 in SMT formula. Defaults to None.
Returns:
str: SMT footer
"""
smt = '(assert {}.allows)\n'.format(id1)
if id2:
smt += '(assert (or {0}.denies {0}.neutral))\n'.format(id2)
smt += '(check-sat)\n'
smt += '(get-model)\n'
return smt
|
c9af2ad7453282e1611e7b2010f4269ca8ac0bc0
| 46,252
|
def area_square(length):
"""
Calculates the area of a square.
Parameters
----------
length (float or int) length of one side of a square
Returns
-------
area (float) - area of the square
"""
return length ** 2
|
122ce824bc47651aa56f424618b1b18c7fd0aa19
| 46,253
|
def calculate_time(cents_per_kWh, wattage, dollar_amount):
""" Returns the time (in hours) that it would take to reach the monetary price (dollar_amount)
given the cost of energy usage (cents_per_kWh) and power (wattage) of a device using that energy.
"""
return 1 / (cents_per_kWh) * 1e5 * dollar_amount / wattage
|
86c261d8b72089d39935468dd349eef8e9611cdd
| 46,257
|
def flatten(filenames):
"""Takes a list which may contain other lists and returns a single,
flattened list
Args:
filenames (list): list of filenames
Returns:
flattened list of filenames
"""
flat_filenames = [file for i in filenames for file in i]
return flat_filenames
|
5a81b0c3d9395142c4991052cf494226b3516bc3
| 46,258
|
def vals_vec_from_lmfit(lmfit_params):
"""Return Python list of parameter values from LMFIT Parameters object."""
vals = [value.value for value in lmfit_params.values()]
return vals
|
6fac3ac8dd364dca3ae19a6cac36be072e32fdb7
| 46,259
|
def _get_yaml_path(path, parameter):
"""Compose the parameter path following the YAML Path standard.
Standard: https://github.com/wwkimball/yamlpath/wiki/Segments-of-a-YAML-Path#yaml-path-standard
"""
yaml_path = []
if path:
yaml_path.extend(path)
if parameter:
yaml_path.append(parameter)
return ".".join(yaml_path)
|
7b4c8807cbc8a030ad2b541bb21dc59e3204324f
| 46,260
|
def strip_yaml(text):
"""
strip starging yaml, between first --- and next --- from text
:param text:
:return:
"""
if text[:3] != '---':
return text
else:
stext = text.split('\n')
stext.pop(0)
n = 0
for ln in stext:
if ln != '---':
n += 1
else:
n += 1
return '\n'.join(stext[n:])
|
288ed60333b90acb6387746760415aa281fb2dd0
| 46,264
|
def unique_test_name(request):
"""Generate unique test names by prepending the class to the method name"""
if request.node.cls is not None:
return request.node.cls.__name__ + "__" + request.node.name
else:
return request.node.name
|
c7d0f732ad69c5a1e528cf7f2a64789c3080f215
| 46,266
|
def accidental2string(acc_number):
"""Return a string repr of accidentals."""
if acc_number is None:
return ""
elif acc_number > 0:
return "#" * int(acc_number)
elif acc_number < 0:
return "b" * int(abs(acc_number))
else:
return "n"
|
622b0952ba9c9bcc2428761da8b44dbef991911c
| 46,268
|
def _IsRemoteBetter(new_name, old_name):
"""Indicates if a new remote is better than an old one, based on remote name.
Names are ranked as follows: If either name is "origin", it is considered
best, otherwise the name that comes last alphabetically is considered best.
The alphabetical ordering is arbitrary, but it was chosen because it is
stable. We prefer "origin" because it is the standard name for the origin
of cloned repos.
Args:
new_name: The name to be evaluated.
old_name: The name to compare against.
Returns:
True iff new_name should replace old_name.
"""
if not new_name or old_name == 'origin':
return False
if not old_name or new_name == 'origin':
return True
return new_name > old_name
|
49ca158c596a173b881db169fe30531d8a77b9ae
| 46,271
|
from typing import Tuple
def get_chunk_type(tok: str) -> Tuple[str, str]:
"""
Args:
tok: Label in IOB format
Returns:
tuple: ("B", "DRUG")
"""
tag_class = tok.split('-')[0]
tag_type = tok.split('-')[-1]
return tag_class, tag_type
|
67a112c4f3f5c3d9594572a4f28ad83dcb53404a
| 46,276
|
def el (name, content):
"""Write a XML element with given element tag name (incl. element attributes) and the element's inner content."""
return "<%s>%s</%s>" % (name, content, name.partition(" ")[0])
|
8100779aa7935eb3ca0ea0c720508c1728c1874d
| 46,287
|
import math
def add_radians(a, b):
"""Adds two radian angles, and adjusts sign +/- to be closest to 0
Parameters:
:param float a: Angle a
:param float b: Angle b
Returns: The resulting angle in radians, with sign adjusted
:rtype: float
"""
return (a + b + math.pi) % (2 * math.pi) - math.pi
|
43f33ba646899e2431fee4967335b8a724cad194
| 46,289
|
def str2int(string):
"""Convert input string into integer. If can not convert, return 0"""
try:
value = int(string)
except ValueError:
value = 0
return value
|
20b370a9243b05240ffb39b968d139d8c67c7fea
| 46,296
|
import torch
def fft_rec_loss2(tgt, src, mask):
"""Calculate FFT loss between pair of images. The loss is masked by `mask`. FFT loss is calculated on GPU and, thus, very fast. We calculate phase and amplitude L1 losses.
Args:
tgt (torch.Tensor): Target image. Shape [B,C,H,W]
src (torch.Tensor): Source image. Shape [B,C,H,W]
mask (torch.Tensor): Boolean mask for valid points
Returns:
torch.Tensor: Scalar sum of phase and amplitude losses.
"""
# Apply 2D FFT on the last two dimensions, e.g., [H,W] channels.
fft_tgt = torch.fft.rfftn(
tgt, s=tgt.shape[-2:], dim=[-2, -1], norm="forward") # [B,C,H,W,2]
fft_src = torch.fft.rfftn(
src, s=tgt.shape[-2:], dim=[-2, -1], norm="forward") # [B,C,H,W,2]
# fft_diff = torch.fft.rfftn(tgt-src, s=tgt.shape[-2:], dim=[-2,-1], norm="ortho") # [B,C,H,W,2]
# fft_diff = fft_tgt - fft_src
# fft_diff = torch.view_as_real(fft_diff)
# mag_diff = fft_diff[...,0].abs().sum() #20*torch.log10(fft_diff[...,0]) # mag2db
# pha_diff = fft_diff[...,1].abs().sum()
# Convolution over pixels is FFT on frequencies.
# We may find a more clever way.
# fft_conv = fft_tgt*fft_src
# inv_fft_conv = torch.fft.irfftn(fft_conv, s=tgt.shape[-2:], dim=[-2,-1], norm="forward") # [B,C,H,W,2]
# mask_diff = fft_diff #fft_tgt-fft_src
# print(mask_diff.shape)
# print(mask.shape)
# mask_diff = mask_diff * mask
# l = 20*torch.log10(fft_diff.abs()) # mag2db 20*log10(abs(complex))
# Derivative for angle is not implemented yet.
# pha_diff = torch.abs(fft_tgt.angle() - fft_src.angle())
mag_diff = torch.abs(fft_tgt.abs() - fft_src.abs())
fft_tgt = torch.view_as_real(fft_tgt)
fft_src = torch.view_as_real(fft_src)
pha_tgt = torch.atan2(fft_tgt[..., 1], fft_tgt[..., 0])
pha_src = torch.atan2(fft_src[..., 1], fft_src[..., 0])
# mag_tgt = torch.sqrt(fft_tgt[...,1]**2 + fft_tgt[...,0]**2)
# mag_src = torch.sqrt(fft_src[...,1]**2 + fft_src[...,0]**2)
pha_diff = torch.abs(pha_tgt-pha_src)
# mag_diff = torch.abs(mag_tgt - mag_src)
# print(pha_diff.sum())
# print(mag_diff.sum())
l = 1e-4*mag_diff.sum() + pha_diff.sum()
return l
|
5ab462edb99cab347bc7e57c66a604d10dd3e98f
| 46,302
|
def area_to_capacity(statistical_roof_model_area_based, power_density_flat, power_density_tilted):
"""Maps area shares to capacity shares of statistical roof model.
The statistical roof model defines roof categories (e.g. south-facing with tilt 10°) and their
shares in a population of roofs. This function maps areas shares to shares of installable pv
capacity. It discriminates between flat and tilted roofs, i.e. the power density of flat roofs
can be different than the one from tilted roofs.
Parameters:
* statistical_roof_model_area_based: model as described above, values are shares of total roof area
* power_density_flat: power density of flat pv installations, unit must be consistent with next
* power_density_tilted: power density of tilted pv installations, unit must be consistent with previous
Returns:
* statistical_roof_model_cpacity_based: model as described above, values are shares of total
installable capacity
"""
cap_based = statistical_roof_model_area_based.copy()
flat_roofs = cap_based.index.get_level_values(0) == "flat"
tilted_roofs = cap_based.index.get_level_values(0) != "flat"
cap_based[flat_roofs] = cap_based[flat_roofs] * power_density_flat
cap_based[tilted_roofs] = cap_based[tilted_roofs] * power_density_tilted
return cap_based / cap_based.sum()
|
0e57c01bfa7c44743edb260b6a1b406ebf0fb82b
| 46,305
|
import torch
def kron(A, B):
"""
Kronecker Product.
Works with batch dimemsion(s) - requires both A and B have same batch dims.
"""
A_shp, B_shp = A.shape, B.shape
assert A_shp[:-2] == B_shp[:-2]
kron_block = torch.matmul(
A[..., :, None, :, None], B[..., None, :, None, :]
)
kron = kron_block.reshape(
A_shp[:-2] + (A_shp[-2] * B_shp[-2], A_shp[-1] * B_shp[-1],)
)
return kron
|
065b5c5b49c509f74733b5af6dfd7e76649cadfd
| 46,310
|
def get_all_model_fields(connector,model_name):
"""Utility function to get the full list of field names from a model,
INCLUDING field names that are eventually derived fro mthe connector's UNPACKING list in the manifest."""
model = connector.MODELS[model_name]
unpacking = connector.UNPACKING
base_model_fields = set(x['dbname'] for x in model['fields'].values())
full_model_fields = base_model_fields
# get teh list of fields that are in the model and potentially unpacked
unpackable_fields = set(x for x in unpacking.keys() if x in model['fields'].keys())
for fieldname in unpackable_fields:
unpack = unpacking[fieldname]
# if the field actually is unpacked into another field
if unpack:
full_model_fields.add(unpack['dbname'])
return full_model_fields
|
4985f67ce565ed6d0f309387d4123ce9a594dc61
| 46,316
|
def j_map_to_dict(m):
"""Converts a java map to a python dictionary."""
if not m:
return None
r = {}
for e in m.entrySet().toArray():
k = e.getKey()
v = e.getValue()
r[k] = v
return r
|
401bc3ceeafbbdd6f07b309cdef227caf56680d4
| 46,317
|
def _create_union_types_specification(schema_graph, graphql_types, hidden_classes, base_name):
"""Return a function that gives the types in the union type rooted at base_name."""
# When edges point to vertices of type base_name, and base_name is both non-abstract and
# has subclasses, we need to represent the edge endpoint type with a union type based on
# base_name and its subclasses. This function calculates what types that union should include.
def types_spec():
"""Return a list of GraphQL types that this class' corresponding union type includes."""
return [
graphql_types[x]
for x in sorted(list(schema_graph.get_subclass_set(base_name)))
if x not in hidden_classes
]
return types_spec
|
3b72a6865982638a280d0678f7092f564180ccb9
| 46,319
|
def _ends_in_by(word):
"""
Returns True if word ends in .by, else False
Args:
word (str): Filename to check
Returns:
boolean: Whether 'word' ends with 'by' or not
"""
return word[-3:] == ".by"
|
d6a080f8d3dcd5cab6ad6134df3dd27b3c2ceeea
| 46,321
|
def is_subnet_of(a, b):
"""
Check if network-b is subnet of network-a
"""
if a.network_address != b.network_address:
return False
return a.prefixlen >= b.prefixlen
|
32ae825937aa4e48098884e121f9238fbbd2ffec
| 46,322
|
def str_join(lst, sep=' '):
"""join(list, [sep]) -> string
Behaves like string.join from Python 2."""
return sep.join(str(x) for x in lst)
|
145520980df426fc84bda1eec0ef0eadcdaeaaec
| 46,323
|
def get_slots(slot_line, utterance, slot_dict):
"""
Formats slot labels for an utterance. Ensures the multiword
slot labels are grouped together. For example the words
'birthday party' should be grouped together under the
same event_name label like event_name(birthday party)
instead of event_name(birthday), event_name(party).
"""
# Get slots and their labels
utterance_words = utterance.split()
slots_and_labels = []
prev_slot_label = 'O'
prev_word_idx = 0
current_word = ""
if len(utterance_words) != len(slot_line):
slot_line = slot_line[1:]
for word_idx, slot_label_idx in enumerate(slot_line):
word = utterance_words[word_idx]
slot_label = slot_dict[int(slot_label_idx)].strip()
# Only care about words with labels
if slot_label != 'O':
# Keep multiword answers together
if prev_slot_label == slot_label and prev_word_idx == word_idx - 1:
current_word += " " + word
# Previous answer has ended and a new one is starting
else:
if current_word != "":
slots_and_labels.append(f"{prev_slot_label}({current_word})")
current_word = word
prev_word_idx = word_idx
prev_slot_label = slot_label.strip()
# Add last labeled word to list of slots and labels if the utterance is over
if current_word != "" and prev_slot_label != 'O':
slots_and_labels.append(f"{prev_slot_label}({current_word})")
# Format slot labels
if not slots_and_labels:
slot_labels = "None"
else:
slot_labels = ", ".join(slots_and_labels)
return slot_labels
|
52b68870d51ef394871ea77470fd10a0fddeea3a
| 46,324
|
def get_current_period(root_arr: list):
"""
Iterates through all xml-root objects in given list and returns the highest period incremented by 1 (since uploading the result of period `x` indicates that user wants to simulate period `x+1`).
Parameters
-----------
`root_arr`: list
List of xml root objects that is retrieved by `parse_all_xml()` method.
"""
if(len(root_arr) == 0):
return 1
periods = []
for root in root_arr:
periods.append(int(root.get('period'))+1)
return max(periods)
|
7c31f6943d4a79b8b80d02d0b132b21b13581794
| 46,328
|
def get_labels(labels_path):
"""
Extract classes from label.txt
:param labels_path: str, path to file with labels
:return : list with labels
"""
labelsfile = open(labels_path, 'r')
labels = []
line = labelsfile.readline()
while line:
labels.append(line.split(' ', 1)[1].rstrip())
line = labelsfile.readline()
labelsfile.close()
return labels
|
09ac5ed89733c1454874816fb353f2f00ae143f1
| 46,330
|
def split(target_list, num_anchor_list):
"""For each target and num_anchors: split target to a list of sub-target"""
target_list = [
label_targets.split(num_anchors, 0)
for label_targets, num_anchors in zip(target_list, num_anchor_list)
]
return target_list
|
3c8e834e2f654c1c434f5f4ecd43b5ed2f54450d
| 46,333
|
def epilog(text):
"""Adds an 'epilog' property to a CMD function.
It will be shown in the epilog. Usually useful for examples.
"""
def hook(fn):
fn.epilog = text
return fn
return hook
|
87e8b23f319a73b8568d9290e9c366893a602b7b
| 46,338
|
from pathlib import Path
def doRelocateFile(src, dst, overwrite=False):
"""
Relocate file `src` to the path `dst`.
If dirname(dst) is not exist yet, mkdir'ed silently.
If file is already exist as `dst`, raise ``FileExistsError``, unless
``overwrite=True``.
Parameter
---------
s : path-like: source file
d : path-like: destination file
overwrite(optional) : logical : overwrite file in destination, or not.
Return
------
path-like : pathname of relocated file.
"""
sf = Path(src)
df = Path(dst)
if (df.is_file()):
if overwrite:
pass
else:
raise FileExistsError(f'destination is existing file: {df}')
print(sf, '->', df)
df.parent.mkdir(parents=True, exist_ok=True)
sf.replace(df)
return df
|
380c3d2ad8f95eb4e4065a8a49699705c655eef7
| 46,339
|
async def health():
"""Check integrity of the server."""
return "The {{ cookiecutter.library_name }} API server is healthy."
|
a9140a5183cbb331700a5df05e789df7900298a9
| 46,341
|
def preprocess_call_name(name):
""" map an event name to a preprocess call name """
return 'preprocess_{}'.format(name)
|
b6e24ab1403f0b17d1970a6d2b8abacc9df094ea
| 46,342
|
def get_catalog_path(layer):
"""Get the catalog path for the designated layer if possible. Ensures we can pass map layers to the subprocess.
If it's already a string, assume it's a catalog path and return it as is.
Args:
layer (layer object or string): Layer from which to retrieve the catalog path.
Returns:
string: Catalog path to the data
"""
if hasattr(layer, "dataSource"):
return layer.dataSource
else:
return layer
|
c32bf25878d3e2633461549e9a762997a5cbc1ab
| 46,345
|
def get_instance_fields(instance):
"""Return parameter attributs managed by django-algolia
Tests:
>>> class ManagedClass(object): ALGOLIA_INDEX_FIELDS = ['some', 'fields']
>>> managed_instance = ManagedClass()
>>> get_instance_fields(ManagedClass)
['some', 'fields']
>>> get_instance_fields(managed_instance)
['some', 'fields']
>>> class AnotherGoodClass(object): ALGOLIA_INDEX_FIELDS = ('other', 'attrs')
>>> another_good = AnotherGoodClass()
>>> get_instance_fields(AnotherGoodClass)
('other', 'attrs')
>>> get_instance_fields(another_good)
('other', 'attrs')
>>> random_stuff = object()
>>> get_instance_fields(random_stuff)
[]
>>> get_instance_fields()
Traceback (most recent call last):
TypeError: get_instance_fields() takes exactly 1 argument (0 given)
"""
return getattr(instance, 'ALGOLIA_INDEX_FIELDS', [])
|
989ff1a1fe59ad63397151832be2416acff2e4c8
| 46,347
|
import math
def round_repeats(depth_coefficient, repeats):
""" Round number of filters based on depth coefficient.
Args:
depth_coefficient: Coefficient to scale number of repeats.
repeats: Number to repeat mb_conv_block.
From tensorflow implementation:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet
/efficientnet_model.py
"""
return int(math.ceil(depth_coefficient * repeats))
|
bafa99e8a406e068406806703be122573c68b084
| 46,348
|
from datetime import datetime
def timestamp(dt:int, fmt:str):
"""Helper function to convert timestamp to string
Arguments:
dt {int} -- timestamp
fmt {str} -- datetime format
Returns:
str -- datetime string
"""
return datetime.fromtimestamp(int(dt)).strftime(fmt)
|
49b54e934a90eaac415738bc609cd129fbfbc002
| 46,349
|
import torch
def build_loss(property_map, loss_tradeoff):
"""
Build the loss function.
Args:
property_map (dict): mapping between the model properties and the
dataset properties
loss_tradeoff (dict): contains tradeoff factors for properties,
if needed
Returns:
loss function
"""
def loss_fn(batch, result):
loss = 0.
for p, tgt in property_map.items():
if tgt is not None:
diff = batch[tgt] - result[p]
diff = diff ** 2
err_sq = torch.mean(diff)
if p in loss_tradeoff.keys():
err_sq *= loss_tradeoff[p]
loss += err_sq
return loss
return loss_fn
|
0aa23419d845df460f3ef534eff304de4af436aa
| 46,353
|
def get_objects(graph, predicate, subject=None):
"""Return a set of all the objects that match a predicate (and subject).
:graph: The policy graph.
:predicate: The predicate of the rules to match.
:subject: The subject of the rules to match (defaults to any).
:return: A set of all the objects that match the parameters in the graph.
"""
triples = graph.triples((subject, predicate, None))
return set(obj for (subj, pred, obj) in triples)
|
0da1a08e4da38e2de05920d92f65ecbc627bc06f
| 46,354
|
def twoNumberSum(array : list, targetSum : int) -> list:
"""Finds the two numbers in the array needed to get targetSum
This solution has O(n) time complexity | O(n) space complexity
Args:
array: A list containing all the candidate numbers
targetSum: The target number we want to get by adding two numbers from the array
Returns:
A list containing the two numbers that added give targetSum as a result
"""
sum = []
diff = []
for e in array:
if e in diff:
sum.append(e)
sum.append(array[diff.index(e)])
break
else:
diff.append(targetSum - e)
return sum
|
ed0ffe1efeb2072b2f96f231bfc1d396723c98ec
| 46,359
|
def freqs2probs(freqs):
"""Converts the given frequencies (list of numeric values) into probabilities.
This just normalizes them to have sum = 1"""
freqs = list(freqs)
total = float(sum(freqs))
return [f/total for f in freqs]
|
6d82756e428f289b804ebeed68262b34568b2720
| 46,366
|
import re
def get_info(var, entry):
"""Return a value for a user selected field in a line from a vcf (provided as a list split by whitespace)"""
ret_val = None
try:
#Loop through vcf fields backwards
for field in var[::-1]:
#First try fields seperated with an ':' e.g. GT:AO 0/1:23
found = re.search("['\t', ':']{}['\t', ':']".format(entry), field)
if found:
field_split = field.split(':')
entry_index = field_split.index('{}'.format(entry))
field_index = var.index(field)
ret_val = var[field_index+1].split(':')[entry_index]
break
#Second try fields with an '=' e.g. AO=23;RO=45
found = re.search("['\t', ';']{}=".format(entry), field)
if found:
ret_val = re.split("['\t', ';']{}=".format(entry), field)[1].split(';')[0]
break
except:
pass
return ret_val
|
4d8e785a8e3576f81bdfbd436af4a7f8239ca1cc
| 46,370
|
def trim_from_start(s, substring):
"""Trims a substring from the target string (if it exists) returning the trimmed string.
Otherwise returns original target string."""
if s.startswith(substring):
s = s[len(substring) :]
return s
|
a4d68169a50672159af939855734fabff1fd8426
| 46,373
|
def nz(i:int, y=0):
""" Same as the nz() function of Pinescript, for ints:
Returns y if i is None, or 0 if y is None too """
if i is None:
if y is None:
return 0
return y
return i
|
b6331d4ef5030968b2df188ff649d4eb695de30f
| 46,376
|
def subject(headers):
"""
Searches for the key 'Subject' in email headers
then returns the value of this key (the email
subject title).
"""
for header in headers:
if header['name'] == 'Subject':
return header['value']
|
8ef3f1b9cd105f6d3fb9e0a739d07bfcbf510ce4
| 46,377
|
def join_str_list(str_list):
"""Join a list of strings, handling spaces appropriately"""
return "".join(s[2:] if s.startswith("##") else " " + s for s in str_list)
|
c25dcc78bbb94767a02a33fe54439289f23975de
| 46,384
|
import torch
def get_mention_token_dist_tensors(m1, m2):
""" Returns distance in tokens between two mentions """
succ = m1[0] < m2[0]
first = m1 if succ else m2
second = m2 if succ else m1
d = second[0] - (first[1] - 1)
if d < 0:
return torch.tensor(0, dtype=torch.long, device=m1.device)
return d
|
f2bf78ed5d2f1c743aeda67a2cb19e1029991407
| 46,391
|
def _lenlastline(s):
"""Get the length of the last line. More intelligent than
len(s.splitlines()[-1]).
"""
if not s or s.endswith(('\n', '\r')):
return 0
return len(s.splitlines()[-1])
|
68b425e28818fccabac61803b05a2967a2ccc93b
| 46,392
|
def compile_krass_conditional(krass_conditional):
"""
Compile Krass conditional statements to Python conditional statements.
"""
# change true to True, && to and etc.
changes = [
("true", "True"),
("false", "False"),
("&&", " and "),
("||", " or "), # keep an eye on this one, for regex or non
("!", " not ")
]
for change in changes:
krass_conditional = krass_conditional.replace(change[0], change[1])
return krass_conditional
|
b29c01f4e62cfe4b1ffcbde37e3d0599db1565d3
| 46,395
|
def get_exception_kwargs(e):
""" Extracts extra info (attributes) from an exception object. """
kwargs = {}
for attr, value in vars(e).items():
if not attr.startswith('_') and attr not in ('args', 'message'):
kwargs[attr] = value
return kwargs
|
45b5a78c766c02ee49a3af4ed793a61025e9424a
| 46,396
|
def is_valid_name(name: str) -> bool:
"""Returns Ture if a given string represents a valid name (e.g., for a
dataset). Valid names contain only letters, digits, hyphen, underline, or
blanl. A valid name has to contain at least one digit or letter.
"""
allnums = 0
for c in name:
if c.isalnum():
allnums += 1
elif c not in ['_', '-', ' ']:
return False
return (allnums > 0)
|
ef59211145ea8172b2a8796ca4ce2b204f200c53
| 46,398
|
def name(obj):
"""
Utility function that adds space before 'Serv' in the name of the object
Args:
obj (any): object
Returns:
str: returns human readable name of name of the specified object
"""
return type(obj).__name__.replace("Serv", " Serv")
|
424772f9f3680c091865909f8b5c6856ddcb9a0f
| 46,416
|
def val2bits(val, nbits):
"""Convert decimal integer to list of {0, 1}."""
# We return the bits in order high to low. For example,
# the value 6 is being returned as [1, 1, 0].
return [int(c) for c in format(val, '0{}b'.format(nbits))]
|
c1ceaf9f65c3115260943996737020d6cea1fe89
| 46,418
|
import torch
def batch_linear(x, W, b=None):
"""Computes y_i = x_i W_i + b_i where i is each observation index.
This is similar to `torch.nn.functional.linear`, but a version that
supports a different W for each observation.
x: has shape [obs, in_dims]
W: has shape [obs, out_dims, in_dims]
b: has shape [out_dims]
"""
if x.size()[1] != W.size()[-1]:
raise ValueError(
f'the in_dim of x ({x.size()[1]}) does not match in_dim of W ({W.size()[-1]})')
if x.size()[0] != W.size()[0]:
raise ValueError(
f'the obs of x ({x.size()[0]}) does not match obs of W ({W.size()[0]})')
obs = x.size()[0]
in_dims = x.size()[1]
out_dims = W.size()[1]
x = x.view(obs, 1, in_dims)
W = W.transpose(-2, -1)
if b is None:
return torch.bmm(x, W).view(obs, out_dims)
else:
b = b.view(1, 1, out_dims)
return torch.baddbmm(1, b, 1, x, W).view(obs, out_dims)
|
1cac8de9ad6b0941149f254a925da310f2c67fc6
| 46,428
|
import re
def timedur_standardize(timedur: str) -> str:
"""
Convert a user-input ambiguous time duration string to standard
abbreviations, following the rules:
1. No space.
2. One letter represents unit.
3. s,m,h,d,W,M for seconds, minutes, hours, days, weeks and months.
:param timedur: A user-input ambiguous time duration string,
like '1 min', '5days',etc.
:returns: standardized time duration string.
"""
timedur_num = re.findall('\d+', timedur)[0] # find all digits
timedur_strs = re.findall('[a-zA-Z]', timedur) # find all letters
if len(timedur_strs) == 1:
# If only one letter, lower/upper case "m"/"M" to diff min and month
timedur_unit = timedur_strs[0].lower()
if timedur_unit not in ('s', 'm', 'h', 'd', 'w', 'y'):
raise Exception(
'Invalid input time duration unit: {}!'.format(timedur))
if timedur_strs[0] in ('w', 'W', 'M', 'y', 'Y'): # Upper case for week/month/year
timedur_unit = timedur_unit.upper()
else:
unit_map = {
'sec': 's',
'min': 'm',
'hour': 'h',
'hr': 'h',
'day': 'd',
'wk': 'W',
'week': 'W',
'mo': 'M',
'mon': 'M',
'yr': 'Y',
'year': 'Y'
}
timedur_unit = ''
for k in unit_map.keys():
timedur_strs = re.findall(k, timedur)
if timedur_strs:
timedur_unit = unit_map[k]
break
if not timedur_unit:
raise TypeError(
"Invalid input time duration unit: {}!".format(timedur))
return timedur_num + timedur_unit
|
c3d3922fa11ef7b2af3d1d1395d17c300400f473
| 46,429
|
def hr_button_id(button_id):
"""Convert a button identifier to human readable format."""
buttons = {
1: 'LEFT',
2: 'MIDDLE',
3: 'RIGHT'
}
return buttons[button_id]
|
1ca6e943e20856c7a570972d9b3f9299c59e4b49
| 46,431
|
def ql_patchplot(ax,vals,plottitle,grid,heatmap=None):
"""
Make patch plot of specific metrics provided in configuration file
Args:
ax: matplotlib subplot
vals: QA metric to be plotted
plottitle: plot title from configuration file
grid: shape of patch plot
Optional:
heat: specify color of heatmap (must conform to matplotlib)
Returns:
matplotlib sublot containing plotted metrics
"""
#- Setup title and tick parameters
ax.set_title(plottitle,fontsize=10)
ax.tick_params(axis='x',labelsize=10,labelbottom=False)
ax.tick_params(axis='y',labelsize=10,labelleft=False)
#- Add optional arguments
if heatmap: cmap = heatmap
else: cmap = 'OrRd'
#- Generate patch plot
patch=ax.pcolor(vals.reshape(grid[0],grid[1]),cmap=cmap)
return patch
|
6dfc2001805ebe3ec68e5ed05a9c9eb971f55aad
| 46,441
|
import re
def note_filename_from_query(fn: str) -> str:
"""
Remove characters from note title that could cause filename problems
"""
fn = re.sub(r"[^a-zA-Z0-9 _\-\/]", "", fn)
fn = re.sub(r"\s+", " ", fn)
fn = re.sub(r"^\s+", "", fn)
fn = re.sub(r"\s+$", "", fn)
return fn
|
c660631b9b76fc38744f78b9e898ebf5e563f15e
| 46,443
|
import uuid
def get_client_token(**_):
"""Generate a random client token."""
return str(uuid.uuid4())
|
6125447f3f4d7fc4d0b3efef743c272acf9e186a
| 46,445
|
def groups_per_user(group_dictionary):
"""The groups_per_user function receives a dictionary, which contains group names
with the list of users. Users can belong to multiple groups. It returns a
dictionary with the users as keys and a list of their groups as values."""
user_groups = {}
for group, users in group_dictionary.items():
for user in users:
if user not in user_groups:
user_groups[user] = []
user_groups[user].append(group)
return(user_groups)
|
5a48a376c8489024747af12609303617d5a57843
| 46,447
|
def save_fill(C, J):
"""Fill question marks at beginning, up to one before the first digit."""
first_digit = 0
for c, j in zip(C, J):
if c != '?' or j != '?':
break
first_digit += 1
for i in range(first_digit-1):
if C[i] == '?':
C = list(C)
C[i] = "0"
C = "".join(C)
if J[i] == '?':
J = list(J)
J[i] = "0"
J = "".join(J)
return C, J
|
2b7e22027b90c32b104cd2471bc57b64f630a6c7
| 46,451
|
def set_firewall_fail_open_behavior(api, configuration, api_version, api_exception, fail_open, policy_id):
""" Configures Firewall to operate in fail open or fail closed mode for a policy. Demonstrates how to configure multiple policy settings.
:param api: The Deep Security API modules.
:param configuration: Configuration object to pass to the api client.
:param api_version: The version of the API to use.
:param api_exception: The Deep Security API exception module.
:param fail_open: Indicates whether to enable fail open or fail closed mode. Set to True for fail open.
:param policy_id: The id of the policy to get the firewall_setting_network_engine_mode value from.
:return: A Policies object with the modified policy.
"""
# Create the SettingValue objects
failure_response_engine_system = api.SettingValue()
failure_response_packet_sanity_check = api.SettingValue()
# Set the values
if fail_open:
failure_response_engine_system.value = failure_response_packet_sanity_check.value = "Fail open"
else:
failure_response_engine_system.value = failure_response_packet_sanity_check.value = "Fail closed"
# Set the setting values and add to a policy
policy_settings = api.PolicySettings()
policy_settings.firewall_setting_failure_response_engine_system = failure_response_engine_system
policy_settings.firewall_setting_failure_response_packet_sanity_check = failure_response_packet_sanity_check
policy = api.Policy()
policy.policy_settings = policy_settings
try:
# Modify the policy on the Deep Security Manager.
policies_api = api.PoliciesApi(api.ApiClient(configuration))
return policies_api.modify_policy(policy_id, policy, api_version, overrides=False)
except api_exception as e:
return "Exception: " + str(e)
|
6770eb4f32f7b436d1a01dc39a5c75f0ad8cfc7c
| 46,452
|
from pathlib import Path
def dir_is_empty(dir_path, start_pattern="."):
"""
Check that the directory at `dir_path` is empty,
except maybe for some files starting with `start_pattern`
"""
# Ensure input is a Path type
dir_path = Path(dir_path)
dir_files = list(dir_path.rglob(f'[!{start_pattern}]*'))
if any(dir_files):
raise FileExistsError(
f"\n\tDirectory '{dir_path}' is not empty"
"\n\tPlease delete the following files before proceeding:"
f"\n\t{[str(dir_file) for dir_file in dir_files]}"
)
return True
|
6d5d863483d4c3ce5fca9cf2f96273744f0a71ef
| 46,453
|
def get(attr):
""" record.__getitem__(attr) """
def wrapped(record):
return record[attr]
return wrapped
|
6184bf18b23cef43a9f31ce37f49158ba96b701f
| 46,456
|
import requests
def is_website_online(path, timeout):
"""
Checks wether a given website is currently online. To do that, it reads the
HTTP status code. However, you could also add addidtional checks, e. g. testing
if a specific text is returned.
Parameters
----------
- path : str
exact path which should be checked
- timeout : float
after `timeout` seconds without answer, a website is considered offline
Returns
-------
`True` if the website could be reached with the given path and returned HTTP Code `200`.
"""
try:
response = requests.get(path, timeout=timeout)
return response.status_code == 200
except:
return False
|
066804d8d93db69861dcb80134a237ab7edadbab
| 46,467
|
def lightness_correlate(Y_b, Y_w, Q, Q_w):
"""
Returns the *Lightness* correlate :math:`J`.
Parameters
----------
Y_b : numeric
Tristimulus values :math:`Y_b` the background.
Y_w : numeric
Tristimulus values :math:`Y_b` the reference white.
Q : numeric
*Brightness* correlate :math:`Q` of the stimulus.
Q_w : numeric
*Brightness* correlate :math:`Q` of the reference white.
Returns
-------
numeric
*Lightness* correlate :math:`J`.
Examples
--------
>>> Y_b = 100.0
>>> Y_w = 100.0
>>> Q = 22.209765491265024
>>> Q_w = 40.518065821226081
>>> lightness_correlate(Y_b, Y_w, Q, Q_w) # doctest: +ELLIPSIS
30.0462678...
"""
Z = 1 + (Y_b / Y_w) ** 0.5
J = 100 * (Q / Q_w) ** Z
return J
|
854785cf9de0a03dcfc16a4e094dd39298f9abd7
| 46,469
|
def ScalarProperty(cdescriptor):
"""Returns a scalar property for the given descriptor."""
def Getter(self):
return self._cmsg.GetScalar(cdescriptor)
def Setter(self, value):
self._cmsg.SetScalar(cdescriptor, value)
return property(Getter, Setter)
|
d27505a9038503a4e567ba52b5792aac56e5170b
| 46,475
|
from typing import Optional
from typing import Union
from typing import List
from typing import Set
from typing import Tuple
def enforce_list(scope: Optional[Union[str, List, Set, Tuple]]) -> List:
"""
Converts a space separated string to a list of scopes.
Note:
If an iterable is passed to this method it will return a list
representation of the iterable. Use :py:func:`enforce_str` to
convert iterables to a scope string.
Args:
scope: An iterable or string that contains scopes.
Returns:
A list of scopes.
Raises:
TypeError: The ``scope`` value passed is not of the proper type.
"""
if isinstance(scope, (tuple, list, set)):
return [str(s) for s in scope]
elif scope is None:
return []
else:
return scope.strip().split(" ")
|
e87e85b7be9f964a2f8afc112b937175283c11da
| 46,479
|
def bani(registers, opcodes):
"""bani (bitwise AND immediate) stores into register C the result of the
bitwise AND of register A and value B."""
test_result = registers[opcodes[1]] & opcodes[2]
return test_result
|
dfc5ba9c19d53aa11c5a0a37bbaf6f09632d729d
| 46,480
|
def _apply_if_callable(maybe_callable, obj, **kwargs):
"""
Evaluate possibly callable input using obj and kwargs if it is callable,
otherwise return as it is
"""
if callable(maybe_callable):
return maybe_callable(obj, **kwargs)
return maybe_callable
|
21c101b32caadd208dcbeccfc7ef84ace11d848c
| 46,484
|
def F(p, d=2):
"""
Given the depolarizating probabilty of a twirled channel in :math:`d`
dimensions, returns the fidelity of the original gate.
:param float p: Depolarizing parameter for the twirled channel.
:param int d: Dimensionality of the Hilbert space on which the gate acts.
"""
return 1 - (1 - p) * (d - 1) / d
|
05dd2a0f9f1a00e3e62a9d00e5bda4998caadb28
| 46,490
|
def get_role_features_from_annotations(role_annotations):
"""Splits the verb and role information (in original annotations file) to separate values"""
head, role = role_annotations.split(")] ")
head_pos, head_wf = head.lstrip("[(").split()
span, tokens = role.split(maxsplit=1)
span, label = span.rstrip(":").split(":")
role_features = (head_wf, head_pos, span, label, tokens)
return role_features
|
2d7f3c012c9469ec9e492063237e8ce54a1b9d41
| 46,494
|
def filter_song_md(song, md_list=['id'], no_singletons=True):
"""Returns a list of desired metadata from a song.
Does not modify the given song.
:param song: Dictionary representing a GM song.
:param md_list: (optional) the ordered list of metadata to select.
:param no_singletons: (optional) if md_list is of length 1, return the data, not a singleton list.
"""
filtered = [song[md_type] for md_type in md_list]
if len(md_list) == 1 and no_singletons:
return filtered[0]
else:
return filtered
|
f592c0dd422e16868cb565395237c5da333be1a7
| 46,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.