content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def prefix_value(recipe, key, prefix):
"""
Finds the value of the keyword and prefixes
e.g. prefix_value('auto', 'name', job_id)
(... :name analysis) -> (... :name 42_analysis)
"""
words = recipe.split()
for i, word in enumerate(words):
if word.find(':{}'.format(key)) != -1:
return recipe.replace(words[i+1], '{}_{}'.format(prefix, words[i+1])) | ff5123c4cfb9665e77634d5e2fdd7bd14e74da8e | 51,417 |
from typing import Optional
def is_google_registry_domain(domain: Optional[str]) -> bool:
"""Returns true if the given Docker image path points to either the Google
Container Registry or the Artifact Registry."""
if domain is None:
return False
return domain == 'gcr.io' or domain.endswith('docker.pkg.dev') | ecab75b7d70e6c657a030ba33b06287d16e48472 | 51,418 |
import sys
def is_houdini():
"""
Checks if Houdini is available or not
:return: bool
"""
return 'houdini' in sys.executable | 75774e0d08beea0ec688b58eb5db237f9e16a8f8 | 51,419 |
def is_goal_attained(character: dict) -> bool:
"""
Check if goal is attained.
:param character: a dictionary
:precondition: character must be a dictionary
:postcondition: returns True if character has a key Artifact, else returns False
:return: True if goal is attained, otherwise False
>>> is_goal_attained({"Artifact": "Necronian Servo-Skull"})
True
>>> is_goal_attained({"Max wounds": 1000000000})
False
"""
return "Artifact" in character.keys() | f39182b52f238acec51f24168cb87ee3ef4ea20e | 51,422 |
def reindent(s, numSpaces):
"""Useful for pretty printer, appends spaces in front of children nodes
to visualize the breadth first hierarchy"""
leading_space = numSpaces * ' '
lines = [leading_space + line.strip()
for line in s.splitlines()]
return ''.join(lines) | 5ddd20c41389f411897edca2ecd38d7fcdd30c93 | 51,423 |
def deep_list(x):
"""fully copies trees of tuples to a tree of lists.
deep_list( (1,2,(3,4)) ) returns [1,2,[3,4]]"""
if type(x)!=type( () ):
return x
return map(deep_list,x) | ee4ceb049404c0b21dda0e409ad60d8aad751444 | 51,424 |
import os
def collect_files(root_dir, *extensions):
"""Finds files with the given extensions in the root_dir.
Args:
root_dir: The directory from which to start traversing.
*extensions: Filename extensions (including the leading dot) to find.
Returns:
A list of filenames, all starting with root_dir, that have one of the given
extensions.
"""
result = []
for root, _, files in os.walk(root_dir):
for basename in files:
for ext in extensions:
if basename.endswith(ext):
filename = os.path.join(root, basename)
result.append(filename)
return result | c257ccbb5b2f66ae0aa7a6a55cffeceb472a350a | 51,425 |
def extract_characteristics(spe):
"""
Extracts all the characteristics from a species and it's references
Parameters:
spe (meta-species object)
"""
lists_of_characteristics = []
for reference in spe.get_references():
lists_of_characteristics.append(reference.get_characteristics())
return lists_of_characteristics | 26801a07ea9851e49bd22a62ec3432f169d43857 | 51,426 |
import codecs
import sys
def _get_startup_screen_specs():
"""Get specs for displaying the startup screen on stdout.
This is so we don't get encoding errors when trying to print unicode
emojis to stdout (particularly with Windows Command Prompt).
Returns
-------
`tuple`
Tuple in the form (`str`, `str`, `bool`) containing (in order) the
on symbol, off symbol and whether or not the border should be pure ascii.
"""
encoder = codecs.getencoder(sys.stdout.encoding)
check_mark = "\N{SQUARE ROOT}"
try:
encoder(check_mark)
except UnicodeEncodeError:
on_symbol = "[X]"
off_symbol = "[ ]"
else:
on_symbol = check_mark
off_symbol = "X"
try:
encoder("┌┐└┘─│") # border symbols
except UnicodeEncodeError:
ascii_border = True
else:
ascii_border = False
return on_symbol, off_symbol, ascii_border | e3c8775bb9a20875b30ed694931bf17833325708 | 51,427 |
def get_groups_by_identifier(database, group_identifiers, identifier_name):
"""Returns a list of (group, group_identifier) tuples based a previously made grouping"""
groups = []
for group_identifier in group_identifiers:
# Get all sections in this group
group = []
for section in database["sections"]:
if section[identifier_name] == group_identifier:
group.append(section)
groups.append((group, group_identifier))
return groups | 0b62f27b4a0dd8c6c5a31e918ccda4f198c4861b | 51,429 |
def _setup_genome_annotations(g, args, ann_groups):
"""Configure genome annotations to install based on datatarget.
"""
available_anns = g.get("annotations", []) + g.pop("annotations_available", [])
anns = []
for orig_target in args.datatarget:
if orig_target in ann_groups:
targets = ann_groups[orig_target]
else:
targets = [orig_target]
for target in targets:
if target in available_anns:
anns.append(target)
g["annotations"] = anns
if "variation" not in args.datatarget and "validation" in g:
del g["validation"]
return g | 9c288f7e5dccb4ff4e7876ccacf3b3bdb32b130c | 51,430 |
def get_important_vals(labeled, unlabeled):
"""
Get values that are important for later
Args:
labeled_np (np.array of floats)
list of numbers for catastrophy times
unlabeled_np (np.array of floats)
list of numbers for catastrophy times
Returns:
labeled_len (int)
max indexable number for labeled list
unlabeled_len
max indexable number for unlabeled list
high
highest value in either list
"""
labeled_len = len(labeled) - 1
unlabeled_len = len(unlabeled) - 1
high = 0
if labeled[labeled_len] > unlabeled[unlabeled_len]:
high = labeled[labeled_len]+1
else:
high = unlabeled[unlabeled_len]+1
return [labeled_len, unlabeled_len, int(high)] | 4d11f8c364177fc158bcd2735fef53ae4fca8713 | 51,431 |
import random
def make_vertices(nr, max_val=1024):
"""Generates a set of vertices as nr-tuple of 2-tuple of integers
@return: tuple of 2-tuple
"""
return tuple([(random.randint(0, max_val), random.randint(0, max_val)) for i in range(nr)]) | 2717f2e99f3b62cc376e3137607db1543de198c5 | 51,434 |
def sequence_pad(sequence, maxlen, dtype='int32', padding='pre', truncating='pre', value=0):
"""Pads sequences to the same length.
Args:
sequences: pd.Series or np.array or List of lists, where each element is a sequence.
maxlen: Int, maximum length of all sequences.
dtype: Type of the output sequences.
To pad sequences with variable length strings, you can use `object`.
padding: String, 'pre' or 'post':
pad either before or after each sequence.
truncating: String, 'pre' or 'post':
remove values from sequences larger than
`maxlen`, either at the beginning or at the end of the sequences.
value: Float or String, padding value.
Returns:
List of lists with shape `(len(sequences), maxlen)`
"""
if not isinstance(sequence[0], list):
sequence = [sequence]
if padding=='post' and truncating=='post':
t = [i[:maxlen] if len(i)>maxlen else i+[value]*(maxlen-len(i)) for i in sequence]
elif padding=='post' and truncating=='pre':
t = [i[-maxlen:] if len(i)>maxlen else i+[value]*(maxlen-len(i)) for i in sequence]
elif padding=='pre' and truncating=='post':
t = [i[:maxlen] if len(i)>maxlen else [value]*(maxlen-len(i))+i for i in sequence]
elif padding=='pre' and truncating=='pre':
t = [i[-maxlen:] if len(i)>maxlen else [value]*(maxlen-len(i))+i for i in sequence]
else:
raise ValueError('Padding type "%s" not understood or Truncating type "%s" not understood' % (padding, truncating))
return t | 5e193bc60c1579d0f4cd39bab983cea1a30e11a4 | 51,436 |
def select_named_pairs(pair_data):
"""Returns a list of name, base id, target id tuples
for the codepoint and primary pairs in pair_data.
Generates a name for each selected pair. If the pair is matched
by codepoint, use the 'u(ni)XXXX' name of the codepoint. Else use
a name formed from the glyph id(s). This handles unmatched
pairs (where one glyph id is -1)."""
named_pairs = []
if pair_data.cp_pairs is not None:
for b, t, cp in pair_data.cp_pairs:
name = "%s%04X" % ("uni" if cp < 0x10000 else "u", cp)
named_pairs.append((name, b, t))
if pair_data.pri_pairs is not None:
for b, t, _ in pair_data.pri_pairs:
if b == t:
name = "g_%05d" % b
elif t == -1:
name = "g_b%05d" % b
elif b == -1:
name = "g_t%05d" % t
else:
name = "g_b%05d_t%05d" % (b, t)
named_pairs.append((name, b, t))
return named_pairs | 32dd6d32737cb7a9c29c90782557ce1bfec022e4 | 51,437 |
def deduplicate(list_object):
"""Rebuild `list_object` removing duplicated and keeping order"""
new = []
for item in list_object:
if item not in new:
new.append(item)
return new | 69608a67e0a8c466e9e8233f9d8cf61cb553a6cf | 51,438 |
def find_chains(graph_entry):
"""
Retrive chain of nodes.
Here we essentially extract all chains and return their vertices indices
E.G Graph:
M -v ,-> K
A -> M -> I -> N -> O
K -^ `-> P
(9 Nodes, 8 Edges)
would give us the node indices of [A M I N O] in a list (and all other available
chains in the graph)
"""
[__start_node__] = graph_entry.vs.select(aminoacid="__start__")
def traverse_to_end(graph_entry, complete_chain, single_nodes, next_node, c):
# iterate as long as possible.
while next_node in single_nodes:
single_nodes.remove(next_node)
c.append(next_node)
next_node = graph_entry.vs[next_node].neighbors(mode="OUT")[0].index
# Special case for single in
if next_node in single_in:
single_in.remove(next_node)
c.append(next_node)
next_node = graph_entry.vs[next_node].neighbors(mode="OUT")[0].index
# Skip chains containing only 1 element
if len(c) != 1:
complete_chain.append(c)
# Sort all nodes into 3 possible bins
single_nodes = set()
single_out = set()
single_in = set()
for idx, (a, b) in enumerate(zip(graph_entry.vs.indegree(), graph_entry.vs.outdegree())):
if a == 1 and b == 1:
single_nodes.add(idx) # case single
if a == 1 and b > 1:
single_in.add(idx) # case one in but multiple out
if a > 1 and b == 1:
single_out.add(idx) # case one out but multiple in
# save all chains in this list
complete_chain = []
# CASE 1: Chain is starting with a single out node
for so in single_out:
c = [so]
next_node = graph_entry.vs[so].neighbors(mode="OUT")[0].index
traverse_to_end(graph_entry, complete_chain, single_nodes, next_node, c)
# CASE 2: it may happen that the start node is at a beginning of chain.
# Here we do a intersection of remaining nodes in single_nodes with the
# nodes having a direct connection to start
start_set = {x.index for x in __start_node__.neighbors(mode="OUT")}
single_start_points = single_nodes.intersection(start_set)
for sn in single_start_points:
c = [sn]
next_node = graph_entry.vs[sn].neighbors(mode="OUT")[0].index
single_nodes.remove(sn)
traverse_to_end(graph_entry, complete_chain, single_nodes, next_node, c)
# return complete chain
return complete_chain | 1cd310f8d5af285bd27f8b11704c13095492d501 | 51,440 |
def lacosmic_param_dictionary():
"""Holds the best LACosmic parameters for each filter, primarily
for white dwarf standard GRW+70. Also works well on other white
dwarfs and red star P330E.
If running over a field of stars instead of a single standard, will
likely need increase 'objlim' param.
Add filters as needed!
These are determined using :mod:`run_lacosmic_tester`.
Parameters:
nothing
Returns:
param_dict : dictionary
Parameters to be used in LACosmic.
{'Filter':[sigclip, sigfrac, objlim, niter, sigclip_pf]}
Outputs:
nothing
"""
param_dict = {'F200LP':[5.5, 0.05, 7, 5, 9.5],
'F218W':[5.5, 0.3, 2, 5, 9.5],
'F225W':[5.0, 0.25, 2, 5, 9.5],
'F275W':[5.5, 0.3, 2, 4, 9.5],
'F280N':[5.0, 0.3, 2, 5, 9.5],
'F300X':[5.0, 0.3, 2, 5, 9.5],
'F336W':[6.5, 0.3, 5, 5, 9.5],
'F365N':[4.5, 0.3, 2, 5, 9.5],
'F390M':[5.0, 0.3, 2, 5, 9.5],
'F390W':[5.5, 0.25, 2, 5, 9.5],
'F395N':[4.5, 0.3, 5, 5, 9.5],
'F410M':[5.0, 0.3, 2, 5, 9.5],
'F438W':[5.0, 0.3, 2, 5, 9.5],
'F343N':[5.0, 0.3, 2, 5, 9.5],
'F373N':[5.0, 0.3, 2, 4, 9.5],
'F467M':[5.0, 0.3, 2, 5, 9.5],
'F469N':[4.5, 0.3, 5, 5, 9.5],
'F475W':[5.0, 0.3, 2, 5, 9.5],
'F502N':[5.0, 0.3, 2, 5, 9.5],
'F547M':[6.5, 0.3, 2, 5, 9.5],
'F555W':[4.5, 0.3, 5, 5, 9.5],
'F606W':[5.0, 0.3, 2, 5, 9.5],
'F631N':[4.5, 0.3, 5, 5, 9.5],
'F645N':[4.5, 0.3, 5, 5, 9.5],
'F656N':[4.5, 0.3, 5, 5, 9.5],
'F657N':[4.5, 0.3, 5, 5, 9.5],
'F658N':[4.5, 0.3, 5, 5, 9.5],
'F665N':[4.5, 0.3, 5, 5, 9.5],
'F673N':[4.5, 0.3, 5, 5, 9.5],
'F680N':[4.5, 0.3, 5, 5, 9.5],
'F689M':[8.5, 0.3, 5, 5, 9.5],
'F763M':[5.0, 0.3, 5, 5, 9.5],
'F775W':[5.5, 0.3, 5, 5, 9.5],
'F814W':[5.5, 0.3, 5, 5, 9.5],
'F845M':[5.0, 0.3, 5, 5, 9.5],
'F850LP':[7.5, 0.3, 2, 5, 9.5]}
return param_dict | 1368c38086883d6c4a11cbc461185709013e8dd5 | 51,441 |
from typing import Iterable
from typing import Tuple
from typing import List
def _get_headers(table: Iterable[Tuple[int, List]]) -> List[List[str]]:
"""
return a list of all of the header rows (which are lists of strings.
[ # headers
['str', 'str', 'str'], # header rows
['str', 'str', 'str']
]
"""
result = []
for row_num, line in table:
if row_num == 0:
result.append(line)
return result | 4dc1cee728cf4b262d579afad512edc31843f849 | 51,443 |
def get_lastlayer_params(net):
"""get last trainable layer of a net
Args:
network architectur
Returns:
last layer weights and last layer bias
"""
last_layer_weights = None
last_layer_bias = None
for name, para in net.named_parameters():
if 'weight' in name:
last_layer_weights = para
if 'bias' in name:
last_layer_bias = para
return last_layer_weights, last_layer_bias | bf6a92532f9855d44a91f9a46dfa81b60a546753 | 51,445 |
import yaml
def reformat_yaml(yaml_data):
"""Normalize YAML to a common format"""
data = yaml.safe_load(yaml_data)
return yaml.dump(data, sort_keys=False) | 97c983b6df1fcc33bc35b5ce17377c2f08ed86e8 | 51,446 |
import re
import string
def stripLeadingTrailingWhitespace(text):
"""Given a string, remove any leading or trailing whitespace"""
text = re.sub("^([" + string.whitespace + "])+", "", text)
text = re.sub("([" + string.whitespace + "])+$", "", text)
return(text) | 1933a39b42e4266680ca8b2049b01ded35882aa7 | 51,448 |
from typing import List
from typing import Tuple
def char_span_to_token_span(spans: List[Tuple[int, int]],
char_start: int, char_end: int) -> Tuple[int, int]:
""" Map a character span to the minimal containing token span.
Args:
spans: a list of end-exclusive character spans for each token
char_start: starting character offset
char_end: ending character offset (exclusive)
Returns:
(start, end) token indices, end-exclusive
"""
# first span ending after target start
tok_s = min(i for i, s in enumerate(spans) if s[1] >= char_start)
# last span starting before target end
tok_e = max(i for i, s in enumerate(spans) if s[0] <= char_end)
return (tok_s, tok_e + 1) | 8a2bd0a6fc3036064fa75127dfa1e6bc09bec92e | 51,453 |
def wrong_message():
"""
Message when anything was wrong
:return:
"""
return "I have no understood your message :(" | 56837609207c81ccd5946a1adb79b34236a9618e | 51,454 |
def pre_hash(s):
"""
Prepends a string with its length.
EXAMPLES::
sage: from sage.doctest.parsing import pre_hash
sage: pre_hash("abc")
'3:abc'
"""
return "%s:%s" % (len(s), s) | eedf80648a3cb93a5ec7ac941239834801e7b18d | 51,455 |
def _color_bool(series, color, bg_color):
"""Converts a boolean annotation column to colors."""
mapped = series.map({True: color, False: bg_color}, na_action='ignore')
return mapped, color | d5bd01d93c213e8df3544ec848736ee619ad08f2 | 51,456 |
def get_edf_signal_indices(f, signals):
"""Read EDF signals with the given names into a matrix"""
labels = [l.lower() for l in f.getSignalLabels()]
indices = [labels.index(signal.lower()) for signal in signals]
return indices | 7a5dd586581decdcd30a2ec6670c4c9d99292dfd | 51,457 |
import argparse
def get_parser(parser=None):
""" Returns an argument parser that includes host and token.
:returns: :class:`argparse.ArgumentParser` object that includes host and token arguments.
"""
if parser is None:
parser = argparse.ArgumentParser(description="Creates a test project.")
parser.add_argument('--host', default='https://www.tatorapp.com')
parser.add_argument('--token', help="Your API token.")
return parser | 85a7d613810f670c3b8c94e4bcda7924ba661eca | 51,458 |
import io
def read_file(file):
"""Open and read file input."""
f = io.open(file, 'r', encoding='utf-8')
text = f.read()
f.close()
return text | 8c5e65f59e0475473c29798a8aa10d038628a9b4 | 51,459 |
def matches_n_consecutive_words(text: str, database: set, consecutive_n: int):
"""Check whether a phrase (one or more words separated by whitespace characters)
from given database (set of phrases) is present in the provided text quickly.
Return all matches phrase.
"""
words = text.split()
matches = []
for span_size in range(1, consecutive_n + 1):
for start_position in range(0, len(words)):
if start_position + span_size <= len(words):
substring = ' '.join(words[start_position:start_position + span_size])
if substring in database:
matches.append(substring)
return matches | 3f74ad064735652df79e125c638d29d6d09709fe | 51,460 |
def split_into_integers(coordinate):
"""Get individual parts of a float and transform into integers
:coordinate: float value
:returns: list of integers
"""
return list(map(int, str(coordinate).split('.'))) | 5ddb05f2a6618a212de2e6f0c65598327f42a514 | 51,462 |
def unique_char(string):
"""."""
individuals = set()
string = ''.join(string.lower().split(' '))
for char in string:
if char in individuals:
return False
individuals.add(char)
return True | 42295fd38ef3b7d620ffdf3a0b7696dbc1dd1d40 | 51,463 |
def get_value(dictionary, key):
"""Django Template filter to get dictionary value based on the given key"""
return dictionary.get(key) | 594429a3ca3d9251e4afe583426539a3774feeb3 | 51,464 |
import logging
def get_broadcasts(resp):
"""Parses an NHL schedule response to get broadcast information.
Args:
resp: JSON response from NHL Schedule API call.
Returns:
broadcasts: Dictionary of home & away broadcasts.
"""
broadcasts = {}
# Set defaults in case one team doesn't have a broadcast yet
# broadcasts["home"] = "TBD"
# broadcasts["away"] = "TBD"
try:
resp_broadcasts = resp["broadcasts"]
for broadcast in resp_broadcasts:
broadcast_team = broadcast["type"]
if broadcast_team == "national":
broadcasts["away"] = broadcast["name"]
broadcasts["home"] = broadcast["name"]
break
else:
broadcast_channel = broadcast["name"]
broadcasts[broadcast_team] = broadcast_channel
except KeyError:
logging.warning("Broadcasts not available - setting them to TBD.")
broadcasts["home"] = "TBD"
broadcasts["away"] = "TBD"
return broadcasts | f1033ff0477f20439b1e6a22b8807ba207e6f30e | 51,466 |
def get_verified_emails(user):
"""
Get a list of non-primary, verified email addresses.
"""
return user.get_emails(is_verified=True, include_primary=False) | 0f3093f43cbca503cbaf3afc73b565ffe4d6c255 | 51,468 |
def str_input(prompt="", max_length=0):
"""Uses `input(prompt)` to request a str value from the user, retrying if the user doesn't enter anything or
only enters whitespace.
@param str prompt: The prompt to display.
@param int max_length: The maximum length of the string. Defaults to no length limit.
@return str: The entered value.
"""
while True:
string = input(prompt)
if not len(string.strip()):
continue
if max_length != 0 and len(string) > max_length:
print("Your text is too long. It must not exceed a length of %i characters." % max_length)
return string | 30a8579e82220b96e530d5b0142f4e7028b547f4 | 51,469 |
import inspect
import textwrap
def func_to_source_code(function, dedent=True):
"""
Transforms function into raw string of source code.
"""
if not (inspect.isfunction(function) or inspect.ismethod(function)):
raise TypeError(
"The type of 'function' should be a function or method, but received {}."
.format(type(function).__name__))
source_code_list, _ = inspect.getsourcelines(function)
# Replace comments with blank lines so that error messages are not misplaced
source_code_list = [
line if not line.lstrip().startswith('#') else '\n'
for line in source_code_list
]
source_code = ''.join(source_code_list)
if dedent:
source_code = textwrap.dedent(source_code)
return source_code | bc2066daa2021a1da71840b36fe194031547e3e4 | 51,470 |
import os
def get_env_val(key):
"""get environmental variable
Returns:
env variable (str)
"""
val = os.environ.get(key)
if val is None:
raise Exception("Did not find env variable for " + str(key))
return val | 48abef36794c2e93bffa7da87ce3176d5a262ddc | 51,472 |
def fill_tabs(string: str):
"""Replaces every occurence of \\t with four spaces"""
return string.replace("\t", " ") | 5b8849612759d2f7c803c6311414d59d94b49d70 | 51,473 |
def calculate_mins(time_string):
"""Convert a time string into minutes."""
hours, mins = time_string.split()
hour_num = int(hours.replace('h', ''))
min_num = int(mins.replace('min', ''))
return hour_num * 60 + min_num | a991f780296473fd926c470c37664fdf0c59173f | 51,474 |
import argparse
from pathlib import Path
def _parse_args() -> argparse.Namespace:
"""Parses arguments for this script, splitting out the command to run."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--gn-root',
type=Path,
required=True,
help=('Path to the root of the GN tree; '
'value of rebase_path("//")'))
parser.add_argument('--current-path',
type=Path,
required=True,
help='Value of rebase_path(".")')
parser.add_argument('--default-toolchain',
required=True,
help='Value of default_toolchain')
parser.add_argument('--current-toolchain',
required=True,
help='Value of current_toolchain')
parser.add_argument('--directory',
type=Path,
help='Execute the command from this directory')
parser.add_argument('--module', help='Run this module instead of a script')
parser.add_argument('--env',
action='append',
help='Environment variables to set as NAME=VALUE')
parser.add_argument(
'--touch',
type=Path,
help='File to touch after the command is run',
)
parser.add_argument(
'--capture-output',
action='store_true',
help='Capture subcommand output; display only on error',
)
parser.add_argument(
'original_cmd',
nargs=argparse.REMAINDER,
help='Python script with arguments to run',
)
return parser.parse_args() | 599db464fbdb71a36327d677b67d4c071e9d87e9 | 51,476 |
def f(x):
""" int -> int """
#y : int
y = 1
#a : int
a = x + 1
return y | 3aac8f5f3b642dae0c775016d0dbdb77a036870e | 51,477 |
def dedefaultdictize(d):
"""
>>> vivify = lambda: defaultdict(vivify)
>>> d = vivify()
>>> d['foo'][0]['bar'] = 'baz'
>>> d['foo'][1] = 'quux'
>>> dedefaultdictize(d)
{'foo': {0: {'bar': 'baz'}, 1: 'quux'}}
"""
if not isinstance(d, dict):
return d
return {k: dedefaultdictize(v) for k, v in d.items()} | d904cc4393ebde85ae9f8110b8741b1630df54a4 | 51,478 |
def _update_deletion_data(variants, deletions):
"""
Compare new deletion with previously found deletions and update variants with the new deletion if
has a better allele ratio
:param variants: (dict) previously found deletions
:param deletions: new deletion
:return:
"""
def extract_info(info):
return dict(tuple(comment.split("=")) for comment in info.split("\t")[-1].split(" ")
if not comment.startswith("comment"))
for key, info in deletions.items():
if key in variants:
existing_variant_info = extract_info(variants[key])
new_variant_info = extract_info(info)
if int(new_variant_info['alleleFreq'].split(",")[1])/int(new_variant_info['readDepth']) > float(existing_variant_info['variantAlleleRatio']):
variants[key] = info
else:
variants[key] = info
return variants | 5028e324878d817bc02e99b262b090e5b1c7bdbf | 51,479 |
import string
def split_into_words(speech):
"""Split paragraphs into words
Arg:
speech: A string
Returns:
A list of words
"""
word = []
words = []
for letter in speech:
if letter in string.punctuation or letter in string.whitespace:
if len(word) > 0:
words.append(''.join(word))
word = []
continue
if (letter not in string.punctuation and letter not in
string.whitespace):
word.append(letter)
return words | 99b608fe222109001fdb70b0f1ceb7baa9a87824 | 51,480 |
def find(r):
"""around 0.3ms"""
if r.find(b"response") != -1:
return True
return False | 50de19b0c5a0bfc0cd0a12618463c72ebc3e50f5 | 51,481 |
from typing import Sequence
def remove_reps(
seq: Sequence,
) -> Sequence:
"""Remove consecutive equal elements from a given sequence"""
result = [seq[0]]
for el in seq[1:]:
if el == result[-1]:
pass
else:
result.append(el)
return result | a8b89dfe7c36d9bc3f01dd3ccfae0449ec955757 | 51,483 |
def linear_trans(u0, u1, t, tc, teps):
"""
Linear transition from u0 to u1 in interval `tc - teps < t < tc + teps`.
"""
t0 = tc - teps
t1 = tc + teps
return u0 if t <= t0 else u1 if t >= t1 else \
u0 + (u1 - u0) * (t - t0) / (t1 - t0) | 5a8336d59037a208a3746cca50ca2384eeb9ad37 | 51,484 |
from typing import List
from typing import Counter
def extract_word_ngrams(tokens: List[str], n: int) -> Counter:
"""Extracts n-grams with order `n` from a list of tokens.
:param tokens: A list of tokens.
:param n: The order of n-grams.
:return: a Counter object with n-grams counts.
"""
return Counter([' '.join(tokens[i:i + n]) for i in range(len(tokens) - n + 1)]) | 6504e8b40f3cd256c34d2aa0e44660eded49a506 | 51,485 |
def terminal_side_effect(state, depth_remaining, time_remaining):
""" Side effect returns true if we reach a terminal state
:param state: the state of the game to evaluate
:param depth_remaining: true if there is depth remaining
:param time_remaining: true if there is time remaining
:return: true if we are terminating
"""
if not depth_remaining:
return True
end_state_nodes = []
for alpha in list(map(chr, range(101, 110))): # iterate from e-m
end_state_nodes.append(str(alpha))
if state in end_state_nodes:
return True
return False | 231933f2a24559011eaefcd580732c674c9a932c | 51,486 |
import argparse
def parse_cmdline(argv):
"""Parse the command line.
"""
parser = argparse.ArgumentParser(
description='Ingest metadata for files generated outside DESDM framework')
parser.add_argument('--des_services', action='store', help='')
parser.add_argument('--section', action='store',
help='Must be specified if not set in environment')
parser.add_argument('--provmsg', action='store', required=True)
parser.add_argument('--wclfile', action='store')
parser.add_argument('--outcfg', action='store')
parser.add_argument('--classmgmt', action='store')
parser.add_argument('--classutils', action='store')
parser.add_argument('--no-commit', action='store_true', default=False)
parser.add_argument('--list', action='store', help='format: fullname, filetype')
parser.add_argument('--archive', action='store', dest='archive_name',
help='archive name, single value', required=True)
parser.add_argument('--filetype', action='store',
help='single value, must also specify search path')
parser.add_argument('--path', action='store',
help='single value, must also specify filetype')
parser.add_argument('--version', action='store_true', default=False)
args = vars(parser.parse_args(argv)) # convert to dict
if args['filetype'] and ',' in args['filetype']:
print("Error: filetype must be single value\n")
parser.print_help()
return 1
if args['path'] and ',' in args['path']:
print("Error: path must be single value\n")
parser.print_help()
return 1
if args['filetype'] and args['path'] is None:
print("Error: must specify path if using filetype\n")
parser.print_help()
return 1
if args['filetype'] is None and args['path']:
print("Error: must specify filetype if using path\n")
parser.print_help()
return 1
if not args['filetype'] and not args['list']:
print("Error: must specify either list or filetype+path\n")
parser.print_help()
return 1
return args | 2ad227a68c1c0712028a19200a495ed1e9e46b36 | 51,487 |
def lib2to3_unparse(node):
"""Given a lib2to3 node, return its string representation."""
code = str(node)
return code | eda967fd03c7ccb4412cd2ba04cb522bd9db0508 | 51,488 |
def v2_uniques_only(iterable):
"""Return iterable in the same order but with duplicates removed.
To get our function to work with all types of iterables,
we'll need to keep track of all the items we've seen so far.
We're actually already doing that with our items list.
So we could simply check whether each new item is already
contained in our list.
This works but it's going to be very slow for large lists of items
because checking for containment (X not in Y) in a list requires
looping through the whole list.
"""
items = []
for item in iterable:
if item not in items:
items.append(item)
return items | 315303838a4279f06802ca358a8ca753c8cda50d | 51,489 |
def build_dag_id(partner_id):
"""Builds the DAG ID for the given Airflow variable.
Args:
partner_id: Partner ID to build the dag_id for.
Returns:
The DAG ID.
"""
dag_name = 'algo_readiness_reporting_%s_dag' % partner_id
return dag_name | 08ab98be05ba6812b2a43cf5de8cbad04bb47be6 | 51,490 |
def sum_numbers(numbers):
"""
Sum an array of numbers
:param list(float) numbers: The array of numbers to sum
:returns: The sum of the numbers
"""
return sum(numbers) | e6726e45e0356a4a877e05221a653a48bc237b94 | 51,491 |
import json
import logging
def parse(msg):
""" Builds a dict given another dictionary or
a JSON UTF-8 encoded string/bytearray """
if isinstance(msg, bytes) or isinstance(msg, bytearray):
msg = msg.decode('utf-8')
if isinstance(msg, str):
try:
msg = json.loads(msg.strip())
except:
logging.warning('Invalid JSON message: {}'.format(msg))
return None
return msg | e885c2b9fa9ede8e2738b9ed5079052a11469186 | 51,492 |
def _subtype_to_dict(self):
"""Convert an unpickleable subtype instance to a dictionary so it can be
recovered at a later date."""
attributes = {"name": self.name, "subtype_id": self.subtype_id}
params = {}
for param, val in vars(self).items():
params[param] = val
attributes["params"] = params
return attributes | a291375a2c645d8dbf7d81f51f13f63ea4d51d16 | 51,493 |
def glyphInFont( glyph, font ):
""" Given a glyph and a font, use a pixel-finding heuristic to determine
if the glyph renders to something other than an "empty border" non-existant
font symbol. Returns True if it renders to something.
"""
result = False
WHITE = ( 255, 255, 255 ) # can be any colour pair with contrast
BLACK = ( 0, 0, 0 )
try:
text_image = font.render( glyph, True, WHITE, BLACK )
text_rect = text_image.get_rect()
x_centre = text_rect.width // 2
y_centre = text_rect.height // 2
# Non-renderable glyphs have a border.
# work out a 50% search box, centred inside the glyph
box_top = y_centre - ( text_rect.height // 4 )
box_bottom = y_centre + ( text_rect.height // 4 )
box_left = x_centre - ( text_rect.width // 4 )
box_right = x_centre + ( text_rect.width // 4 )
# Trace a Horizontal line through the middle of the bitmap
# looking for non-black pixels
for x in range( box_left, box_right ):
if ( text_image.get_at( ( x, y_centre ) ) != BLACK ):
result = True
break
# If not found already, trace a line vertically
if ( result == False ):
for y in range( box_top, box_bottom ):
if ( text_image.get_at( ( x_centre, y ) ) != BLACK ):
result = True
break
# If still not found, check every pixel in the centre-box
if ( result == False ):
for y in range( box_top, box_bottom ):
for x in range( box_left, box_right ):
if ( text_image.get_at( ( x, y ) ) != BLACK ):
result = True
break
except UnicodeError as uce:
# Glyph-ID not supported
pass # False goes through
return result | 6de426f8a912ee767d775280c722a00b1e843244 | 51,495 |
def _unique_periodictorsion_parameters(dihedral1, dihedral2):
"""Return true if dihedral1 contains the parameters of dihedral2.
Parameters
----------
dihedral1: ET.subelement
This is the "larger" dihedral ETelement that is collecting multiple
periodicities
dihedral2: ET.subelement
This should only contain periodicity1, phase1, k1 attributes
"""
n = 1
param_tuples = set()
while "periodicity{}".format(n) in dihedral1.attrib:
param_tuples.add(
(
dihedral1.attrib["periodicity{}".format(n)],
dihedral1.attrib["phase{}".format(n)],
dihedral1.attrib["k{}".format(n)],
)
)
n += 1
if (
dihedral2.attrib["periodicity1"],
dihedral2.attrib["phase1"],
dihedral2.attrib["k1"],
) in param_tuples:
return False
else:
return True | 35b5651581c472a6b8f2940cac74b27a735ac588 | 51,496 |
def is_ghb_bound(command, path):
"""Return whether path is destined for git-http-backend"""
if command == "GET":
tails = (
"/info/refs?service=git-upload-pack",
"/info/refs?service=git-receive-pack",
)
return any(path.endswith(t) for t in tails)
assert command == "POST"
return any(
path.endswith(t) for t in ("git-upload-pack", "git-receive-pack")
) | fd56c19714b9f56a9cc6fc9ad4a9459fb1e95a25 | 51,497 |
def chunkify(lst, n):
"""
Splits a list into roughly n equal parts.
http://stackoverflow.com/questions/2130016/splitting-a-list-of-arbitrary-size-into-only-roughly-n-equal-parts
"""
return [lst[i::n] for i in range(n)] | 4de3e15ca494668c58f605605b8c83ccc2be8e8a | 51,498 |
def get_last_log_error_title():
"""Get the exact line that casued the last error in the log file and return it."""
with open('modules.log') as f:
content = f.readlines()
#Third line from the bottom of the error log is always the title of the last error
return content[-3] | 3c99b2b89db44671efc4f55073218dd10b029736 | 51,499 |
def _parse_slices(slicing_string):
"""Construct a tuple of slices from the slicing string.
The string must be a valid slicing string.
Args:
slicing_string: (str) Input slicing string to be parsed.
Returns:
tuple(slice1, slice2, ...)
Raises:
ValueError: If tensor_slicing is not a valid numpy ndarray slicing str.
"""
parsed = []
for slice_string in slicing_string[1:-1].split(","):
indices = slice_string.split(":")
if len(indices) == 1:
parsed.append(int(indices[0].strip()))
elif 2 <= len(indices) <= 3:
parsed.append(
slice(*[
int(index.strip()) if index.strip() else None for index in indices
]))
else:
raise ValueError("Invalid tensor-slicing string.")
return tuple(parsed) | 0d3e3ce23837dfb847a5f2bf146f26082a0f2b89 | 51,500 |
def collect_df(df):
"""The pyspark Spark context will be stopped on pipeline termination, so we need to collect
the pyspark DataFrame before pipeline completion.
"""
return df.collect() | 38d3a3a3007a45506f6d4722416534f4238e2b3f | 51,501 |
def get_form_name(form):
"""Get form name"""
class_name = str(form.__class__).split(".")[-1]
return class_name[:-2] | feaf7d93a9f28082a60b30628e7a7a44f1faf7d3 | 51,502 |
def format_string(console):
"""
This function accepts a string of video game console data, delmited by commas (e.g. an
element from <vg_consoles>), and returns a formatted string.
Parameters:
- console (str): A string containing all of the information on the console (i.e.
an element from <vg_consoles>).
Returns:
- (str): A string that has formatted the information from <console> in the following
format:
"<Console name> was produced in <Release year> by <Production company>"
"""
pass
return f"{console.split(',')[0]} was produced in {console.split(',')[3]} by {console.split(',')[1]}" | ed808588affbf35227297557dcc83b3052ddbf71 | 51,503 |
import os
def find_matching_dirs(root, target_name, blacklist):
"""Return a list of directories that contain an entry with a particular name.
Paths have a directory matching anything in blacklist are excluded.
Args:
directory: Path to the directory to recursively search.
target_name: Name of a file/dir that, if it exists, marks its parent for
inclusion.
blacklist: A set of directories that cannot appear in the paths.
Returns:
A list of file paths.
"""
paths = []
for dirpath, dirnames, filenames in os.walk(root, topdown=True):
if target_name in filenames or target_name in dirnames:
paths.append(dirpath)
# Remove directories that we never want to explore further.
# Note that we have to modify dirnames in place, not replace it.
for p in set(dirnames) & blacklist:
dirnames.remove(p)
return paths | 29f5ef730a382ecd17eaeb6b8c553bab839c99d8 | 51,504 |
def parse_pmid(medline):
"""Parse PMID from article
Parameters
----------
medline: Element
The lxml node pointing to a medline document
Returns
-------
pmid: str
String version of the PubMed ID
"""
if medline.find('PMID') is not None:
pmid = medline.find('PMID').text
else:
pmid = ''
return pmid | 2340bbb6f538e1e7a4972f8292372980a80b022b | 51,506 |
import os
import re
def parse_requirements(file_name):
"""Taken from http://cburgmer.posterous.com/pip-requirementstxt-and-setuppy"""
requirements = []
for line in open(os.path.join(os.path.dirname(__file__), "config", file_name), "r"):
line = line.strip()
# comments and blank lines
if re.match(r"(^#)|(^$)", line):
continue
if line.startswith("git+"):
parts = line.split('#')
package = parts.pop().split('=').pop()
parts = '#'.join(parts).split('@')
if len(parts) == 3:
version = parts.pop()
if version.find('v') > -1:
version = version.replace('v', '')
line = "%s==%s" %(package, version)
else:
line = package
requirements.append(line)
return requirements | 1ae2e231bdab30ab3c70e87e41c538443cde4434 | 51,507 |
import numpy
def _to_categorical(y, num_classes=None, dtype='float32', compress=False):
"""Converts a class array (integers) to binary class array.
E.g. for use with categorical_crossentropy.
Args:
y (array-like): class array to be converted
(integers from 0 to num_classes).
num_classes (int, optional): total number of classes. If not given, the
number of classes is the maximum value found in `y` plus 1, or the
actual number of unique values if `compress` is True.
dtype (str, optional): The data type to be returned,
(e.g. `float32`, `float64`, `int32`...)
compress (bool, default False): Whether to compress the values in the input
array, so that only classes with at least one observation appear
in the result. This is useful if the input array is not encoded with
a sequential set of integer values.
Returns:
ndarray: A binary matrix representation of the input. The classes axis
is placed last, such that the shape of this array is the same as that
of `y` plus one dimension.
"""
y = numpy.array(y, dtype='int')
if compress:
y = numpy.unique(y, return_inverse=True)[1]
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = numpy.max(y) + 1
n = y.shape[0]
categorical = numpy.zeros((n, num_classes), dtype=dtype)
categorical[numpy.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = numpy.reshape(categorical, output_shape)
return categorical | b00ab43b104b529eaa1ca80c97d3b6549586252e | 51,508 |
def is_signif(pvalue, p=0.05):
"""Tell if condition with classifier is significative.
Returns a boolean : True if the condition is significativeat given p
"""
answer = False
if pvalue <= p:
answer = True
return answer | 9d7b1460a5bea36d5025ed72b5ffa970b3b29da7 | 51,509 |
from typing import List
def split_strings(subtree: dict) -> List[str]:
"""
Produce the list of strings from the dictionary with concatenated chars \
and lengths. Opposite to :func:`merge_strings()`.
:param subtree: The dict with "strings" and "lengths".
:return: :class:`list` of :class:`str`-s or :class:`bytes`.
"""
strings = subtree["strings"][0]
if subtree.get("str", True):
strings = strings.decode("utf-8")
lengths = subtree["lengths"]
result = [None] * lengths.shape[0]
offset = 0
for i, l in enumerate(lengths):
result[i] = strings[offset:offset + l]
offset += l
return result | 5b1e7fbb8f0eda3df389108595ac19bde716d8b0 | 51,510 |
def parseConfStr(confStr):
"""Parse a line of configuration file which is in format of semicolon-separated tuples.
Args:
confStr (str): String of tuples, each separated by semicolon, e.g., "(h1,s1);(h2,s1)".
Returns:
list: A list of tuples (key, value pairs).
"""
pairList = []
specs = confStr.split(';')
for spec in specs:
if not spec:
continue
spec = spec.strip()
splits = spec.split(',')
splits = [ss.strip("()") for ss in splits]
splits = tuple(splits)
pairList.append(splits)
return pairList | ed93c939bf0844d59b4986ce1966bfc78d0f9c08 | 51,511 |
def generic_fuel_switch(
enduse,
sector,
curr_yr,
base_yr,
fuel_switch,
fuel_y
):
"""Generic fuel switch in an enduse (e.g. replacing a fraction
of a fuel with another fueltype)
Arguments
---------
enduse : str
Enduse
sector : str
Sector
curr_yr : str
Current year of simulation
fuel_switch : str
Fuel swithces
fuel_y : str
Fuel of specific enduse and sector
Returns
-------
fuel_y : array
Annual fuel demand per fueltype
"""
if base_yr == curr_yr:
return fuel_y
try:
# Test if switch is defined for sector
fuel_switch = fuel_switch[enduse][sector]
switch_defined = True
except KeyError:
switch_defined = False
# Test is not a switch for the whole enduse (across every sector) is defined
try:
keys_of_switch = list(fuel_switch[enduse].keys())
for key_of_switch in keys_of_switch:
# Test wheter switches for sectors are provided
if 'param_info' in fuel_switch[enduse][key_of_switch]: #one switch
fuel_switch = fuel_switch[enduse]
switch_defined = True
else:
pass # Switch is not defined for this sector
except KeyError:
pass
if switch_defined is True:
for fueltype_new_int in fuel_switch.keys():
if fuel_switch[fueltype_new_int][curr_yr] != 0:
# Get fueltype to switch to (new)
fueltype_new_int = int(fuel_switch[fueltype_new_int]['param_info']['fueltype_new'])
fueltype_replace_int = int(fuel_switch[fueltype_new_int]['param_info']['fueltype_replace'])
# Value of current year
fuel_share_switched_cy = fuel_switch[fueltype_new_int][curr_yr]
# Substract fuel
fuel_minus = fuel_y[fueltype_replace_int] * (1 - fuel_share_switched_cy)
fuel_y[fueltype_replace_int] -= fuel_minus
# Add fuel
fuel_y[fueltype_new_int] += fuel_minus
else:
pass # no fuel switch defined
return fuel_y | 5a37a8f5ebd0346a937726c469c0d93a9ee56651 | 51,512 |
def default_rflop_plotting_colours(rows):
"""
Defines a default colour order used in plotting Rflop components
:returns: List of HTML colour codes as string
"""
# Stolen from D3's category20
cat20 = ['#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c',
'#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5',
'#8c564b', '#c49c94', '#e377c2', '#f7b6d2', '#7f7f7f',
'#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5']
return (cat20 + cat20)[:len(rows)] | 0f750392c255bbcbb25e50616158a6c0188baeda | 51,513 |
import random
def ran_num(length=1):
"""
Random string number generator.
This function generates a string with a custom length
that contains random digits and characters from a-f.
Parameters:
length: Number of places the number should have.
Returns:
A string with random digits and characters.
"""
number = ''
for z in range(length):
r = random.randint(0, 15)
if 0 <= r <= 9:
number += str(r)
elif r == 10:
number += 'a'
elif r == 11:
number += 'b'
elif r == 12:
number += 'c'
elif r == 13:
number += 'd'
elif random == 14:
number += 'e'
elif r == 15:
number += 'f'
return number | 33687faee692a02b0721aed9c2852a09d7183337 | 51,515 |
def phasor_reference_correction(r, r_ref):
"""Compute corrected phasor.
Parameters
----------
r, r_ref : array_like
Phasor and reference phasor.
"""
return r / r_ref | ec60a1668d598a6f25b765e9a7d679383a1c75f4 | 51,516 |
def _should_allow_unhandled(class_reference, key_name):
"""Check if a property is allowed to be unhandled."""
if not hasattr(class_reference, "__deserialize_allow_unhandled_map__"):
return False
return class_reference.__deserialize_allow_unhandled_map__.get(key_name, False) | 3475c0eef50a31ad88bcfaaa2d0a6335cffbba24 | 51,517 |
from typing import Tuple
from typing import Union
from typing import Dict
from typing import List
from typing import Set
def build_reverse_graph(graph: dict) -> Tuple[Union[list, set], Dict[int, List[int]]]:
"""Take the data from a Tracks layer graph and reverse it.
Parameters
----------
graph : dict
A dictionary encoding the graph, taken from the napari.Tracks layer.
Returns
-------
roots : int, None
A sorted list of integers represent the root node IDs
reverse_graph : dict
A reversed graph representing children of each parent node.
"""
reverse_graph = {}
roots: Set[int] = set()
# iterate over the graph, reverse it and find the root nodes
for node, parents in graph.items():
for parent in parents:
if parent not in reverse_graph:
reverse_graph[parent] = [node]
else:
reverse_graph[parent].append(node)
if parent not in graph.keys():
roots.add(parent)
# sort the roots
sorted_roots = sorted(list(roots))
return sorted_roots, reverse_graph | a90f4c4da9395b610999d1272fb0a58b9b983ecd | 51,519 |
def promoted_services(services, production_gateway):
"""Promotes service and reloads production gateway"""
for svc in services:
version = svc.proxy.list().configs.latest()['version']
svc.proxy.list().promote(version=version)
production_gateway.reload()
return services | eb2ef902184862125e7caca452467820a204e4f9 | 51,520 |
def other_alignments_with_same_score(all_alignments, cur_alignment_idx,
cur_alignment_score):
"""Returns True if there are other alignments with identical scores.
Args:
all_alignments: PairwiseAlignment iterable object from BioPython.Align
cur_alignment_idx: The index of the desired alignment
cur_alignment_score: The score of the desired alignment
Returns:
True if any alignments other than the one specified have scores that
are identical to the specified alignment.
"""
if len(all_alignments) <= 1:
return False
for i, a0 in enumerate(all_alignments):
if i > 0 and a0.score < cur_alignment_score:
break
if i == cur_alignment_idx:
continue
elif a0.score == cur_alignment_score:
return True
return False | 51d5de043bcdb8e9afccac594fc199ca6c4a3856 | 51,522 |
import os
def fn_check_full(fn):
"""Check for file existence
Avoids race condition, but slower than os.path.exists.
Parameters
----------
fn : str
Input filename string.
Returns
-------
status
True if file exists, False otherwise.
"""
status = True
if not os.path.isfile(fn):
status = False
else:
try:
open(fn)
except IOError:
status = False
return status | f34439827c75c789108e324c83b3340c9109bae0 | 51,523 |
import unicodedata
import re
def sanitize_filename(filename):
"""
Adapted from Django's slugify functions.
:param filename: The filename.
"""
try:
filename = filename.decode()
except AttributeError:
pass
value = unicodedata.normalize('NFKD', filename).encode(
'ascii', 'ignore').decode('ascii')
# In constrast to django we allow dots and don't lowercase.
value = re.sub(r'[^\w\.\s-]', '', value).strip()
return re.sub(r'[-\s]+', '-', value) | 2163a149e484aa5810308ef2c0608a201be2674f | 51,524 |
def markdown_style(value, style_dict):
"""update the theme of banner"""
if value:
theme = "dark"
else:
theme = "light"
style_dict["color"] = "#FFFFFF"
return style_dict | 0167e8a48859327d6c5d6d895d57e7075753628b | 51,525 |
def strip_plus1(number: str) -> str:
"""
Strip leading "+1-" if present. NANP numbers on the platform seem to be stored as 10D only
:param number:
:return:
"""
return number and number.startswith('+1-') and number[3:] or number | 298872bccd693f3d67df09c9d77c02dce72e5711 | 51,528 |
def slot_obj_dict(o):
"""
Builds dict for o with __slots__ defined
:param o:
:return:
"""
d = {}
for f in o.__slots__:
d[f] = getattr(o, f, None)
return d | 1074abf9236d80bb6fbeb47641d05af3e7c8608c | 51,529 |
def int_32bit(num):
"""Return `num` as a list of bytes of its 32-bit representation."""
return list(num.to_bytes(4, "little", signed=True)) | 2b69f7862a082eb5206d91c637f871ab173598cf | 51,530 |
def _list_union_inter_diff(*lists):
"""Return 3 lists: intersection, union and differences of lists
"""
union = set(lists[0])
inter = set(lists[0])
for l in lists[1:]:
s = set(l)
union = union | s
inter = inter & s
diff = union - inter
return list(union), list(inter), list(diff) | 09277948cd19d15bff6918b2285fa81d8c8b3a81 | 51,533 |
def check_key(key: str, messages: list) -> bool:
"""Checks that criterion has a metadata variable
and a numeric separated by a comma (",").
Parameters
----------
key : str
Current criterion.
messages : list
Message to print in case of error.
Returns
-------
boolean : bool
Whether to keep the key/value or not.
"""
boolean = False
if ',' not in key or len(key.split(',')) != 2:
messages.append('Must have a metadata variable and a numeric separated by a comma (",")')
boolean = True
return boolean | 69efc0e3d460343fef0136cf6f6f2912df2a366c | 51,535 |
import os
def is_max_size(filename, max_size):
"""Check if a file is at the max allowable size."""
# If the file isn't there, it's not at the max size.
if not os.path.isfile(filename):
return False
statinfo = os.stat(filename)
if statinfo.st_size > max_size:
return True
else:
return False | fb52d545cf8a7606c48f11a24bfa6c510157de66 | 51,536 |
def attr_probs(**probs):
""" return the inputs in a dictionary
"""
return probs | d43d5c69065cc9d11fb8b8920b643419abdf5b42 | 51,537 |
import warnings
import tempfile
import sys
import os
def getDefaultShortcuts():
"""Return the dictionary of default shortcuts."""
if sys.platform == "win32" and sys.version.startswith("2.3."):
warnings.filterwarnings("ignore", module="fcntl", lineno=7)
shortcuts = {
'.': os.curdir,
'..': os.pardir,
'...': os.path.join(os.pardir, os.pardir),
'tmp': tempfile.gettempdir(),
}
try:
shortcuts['~'] = os.environ['HOME']
except KeyError:
pass
return shortcuts | 6605809b3e02a5d0245954afd39fc88f712b758d | 51,538 |
import os
def idle_blockchain_policy(blockchain_alice, blockchain_bob):
"""
Creates a Policy, in a manner typical of how Alice might do it, with a unique label
"""
random_label = b'label://' + os.urandom(32)
policy = blockchain_alice.create_policy(blockchain_bob, label=random_label, m=2, n=3)
return policy | 0abdedc602e3020abff7b19d272758f7b805dd3c | 51,540 |
import numpy as np
def doniach_sunjic(x, scale=1.0, E_0=0, gamma=1.0, alpha=0.0):
"""
Doniach Sunjic asymmetric peak function. tail to higher binding energies.
param x: list values to evaluate this function
param scale: multiply the function with this factor
param E_0: position of the peak
param gamma, 'lifetime' broadening
param alpha: 'asymmetry' parametera
See
Doniach S. and Sunjic M., J. Phys. 4C31, 285 (1970)
or http://www.casaxps.com/help_manual/line_shapes.htm
"""
arg = (E_0-x)/gamma
alpha2 = (1.0 - alpha)
#scale = scale/(gamma**alpha2)
don_su = np.cos(np.pi*alpha + alpha2*np.arctan(arg))/(1 + arg**2)**(alpha2/2)
return np.array(scale*don_su) | f5922f1dfeac616947dabc5f76383f999ddec4b8 | 51,541 |
import os
def get_long_description():
"""Get the long library description from package README."""
with open(os.path.join(os.path.dirname(__file__), 'README.md'), 'r') as long_file:
return long_file.read() | aa9b98e46960912fed772c43fc58c538dc469101 | 51,542 |
import re
def squash_non_word_characters(_s: str) -> str:
"""Squashes certain consecutive characters into singular characters"""
return re.sub(pattern=r"""(\W)+""", repl=r"\1", string=_s) | f2be4333ddca233c1b15b3392d97364e2d29ce70 | 51,543 |
def language_path_context(_request):
"""Removes the language from the path"""
path: str = _request.path
try:
return {"no_lang_path": path.split("/", 2)[2]}
except: # noqa: E722
return {} | 0a64fa7d7bcb6d7920ff2dc247c4e44a98b034fd | 51,544 |
def time_serializer(a):
"""
Simply rounds a floating point value for its input, as we must store dates and times as integers
in SQLite.
"""
return round(float(a)) | 54a73c0e0e2e1d5db1e871d8a5e03c17c50ea842 | 51,545 |
def _makeElementsOneBased(elements_list):
"""
Take a list of a list of element node indexes and increment the
node index by one.
"""
updated_elements = []
for el in elements_list:
updated_elements.append([n + 1 for n in el])
return updated_elements | cd5a13f6e4ff1de27c0fbcda681808fa2306dc17 | 51,547 |
def get_signature(url):
"""
visit https://rapidapi.com/409232112/api/tiktok-signature1
"""
return "" | 2c0ba64b7a035793b4d3da255a2b3dfb8d4555eb | 51,548 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.