content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def has_single_uses_stmt_only(stmt):
"""Check to see if this statement has exactly one 'uses' substatement
without defining its own properties
"""
# First, check 'uses' substatements
usess = stmt.search('uses')
if len(usess) != 1:
return False
# Then check local property definitions
for sub in stmt.substmts:
if sub.keyword in ['leaf','leaf-list','list','container','choice','augment']:
return False
# Has single uses statement only
return True | 56bdf78decb351287bff564eac8da5c21027a795 | 45,170 |
def display_menu_prompt():
"""display function for the menu prompt"""
return input("Enter the ordinal of the setting to update (enter q to exit): ") | 59ff19f300bfe81c36cd409823c5fa5547ad87a9 | 45,171 |
def featgen(a_tree):
"""Generate features for the given BitPar tree.
Args:
a_tree (dsegmenter.bparseg.constituency_tree.CTree): BitPar tree
for which we should generate features
Returns:
list: string features
"""
assert a_tree.leaves(), "Tree does not contain leaves."
# add unigram features
ret = {u"tok_{:s}".format(token.lower()): 1 for token in a_tree.leaves()}
# add very first and very last tokens of the tree
ret[u"tokFirst_{:s}".format(a_tree.leaves()[0].lower())] = 1
ret[u"tokLast_{:s}".format(a_tree.leaves()[-1].lower())] = 1
sublabels = [st.label() for st in a_tree.subtrees()]
if sublabels:
ret[u"lblFirst_{:s}".format(sublabels[0].lower())] = 1
ret[u"lblLast_{:s}".format(sublabels[-1].lower())] = 1
# add tree label
ret[u"lbl_{:s}".format(a_tree.label())] = 1
# add label of the parent tree
ret[u"prntLbl_{:s}".format(a_tree.prnt_label())] = 1
# add first and last word of the parent tree
if a_tree.parent():
prnt_tree = a_tree.parent()
t_idx = a_tree.parent_index()
ret[u"treeIdx"] = t_idx
if t_idx > 0:
prev_tree = prnt_tree[t_idx - 1]
ret[u"prevLbl_{:s}".format(prev_tree.label())] = 1
ret[u"prevTokFrst_{:s}".format(prev_tree.leaves()[0].lower())] = 1
ret[u"prevTokLst_{:s}".format(prev_tree.leaves()[-1].lower())] = 1
if t_idx + 1 < len(prnt_tree):
nxt_tree = prnt_tree[t_idx + 1]
ret[u"nxtLbl_{:s}".format(nxt_tree.label())] = 1
ret[u"pxtTokFrst_{:s}".format(nxt_tree.leaves()[0].lower())] = 1
ret[u"pxtTokLst_{:s}".format(nxt_tree.leaves()[-1].lower())] = 1
# add tree height
ret["height"] = a_tree.height()
# add label of the parent tree
return ret | 882959c96315c2606bb56c25bb9bd650b4c6e1fa | 45,175 |
def str2_bool(v):
"""
This function converts a string to bool if this is the case.
If the value received is of type bool, it is just returned.
If it is a string and has one of these values "yes", "true", "t", "1"
regardless of whether it is uppercase or not, it will return a bool with a true value.
"""
if type(v) is bool:
return v
return v.lower() in ("yes", "true", "t", "1") | bfd12b9983b6da4039f76127c1b7fbb9aa0cb841 | 45,176 |
def process_issue_results(data):
"""Process the data returned by the issues GraphQL request.
Args:
data: The data returned
Returns:
issues: A list of dicts; each dict is the data for some of
the results
"""
edges = data.get("data").get("repository").get("issues").get("edges")
issues = []
for e in edges:
issues.append(e["node"])
return issues | 42a9aae67df7dc824c495983e63b88163c269c36 | 45,177 |
def cal_base_aggregates(lst, input_df):
"""
Used to calculate basic aggregations
sum(), min(), max(), mean(), median(), any(), count()
"""
out = 0
if lst == "sum":
out = input_df.sum()
if lst == "min":
out = input_df.min()
if lst == "max":
out = input_df.max()
if lst == "mean":
out = input_df.mean()
if lst == "median":
out = input_df.median()
if lst == "variance":
out = input_df.var()
if lst == "any":
out = input_df.any()
if lst == "count":
out = input_df.count()
return float(out) | 824a24df44c4f5837b91cc4ee26ffd3a5ddbd4c7 | 45,178 |
def SizeBenefitAccDth(t):
"""Accidental death benefit per policy"""
return 0 | 0f7e72beb0ebdedc53e1a4ed08a2584f9811ed6f | 45,179 |
def exp_moving_avg(cur, prev=None, alpha=.05, steps=None):
"""exponential moving average"""
if prev is None:
return cur
avg = alpha * cur + prev * (1 - alpha)
return avg / (1 - alpha ** steps) if steps else avg | 2f12e3425ee6190c4bf0a8ce7938abaaadf05052 | 45,180 |
def annot_type(ann):
"""
Returns what type of annotation `ann` is.
"""
return tuple(sorted(set(ann) & {'bbox', 'line', 'keypoints'})) | 5dd7b11e264c918429dcac61903e6f80fe6e0cfe | 45,181 |
def clean_parquet_data(df):
"""
Cleans parquet data
"""
# subset of English-only tweets
df = df[df['lang'] == 'en'].reset_index(drop=True)
# gather lengths of tweets
df['tweet_length'] = df['full_text'].apply(lambda x: len(x))
# remove tweets that are abnormally long
df = df[df['tweet_length'] <= 330].reset_index(drop=True)
# then drop column (we don't need it any further)
df = df.drop(columns='tweet_length')
return df | 5e31069e6ab3edab06bf13bb04a5c3daa630a4d7 | 45,183 |
import io
def remove_multiple_newlines_in_txt(txt: str) -> str:
""" This function will remove multiple, sequential newlines in text (str) data.
:param txt: a str containing the text to be cleaned.
:return: a str containing the text with multiple, sequential newlines removed.
"""
clean_txt = ''
# convert the text string into a buffer so that we can read lines.
txt_buffer = io.StringIO(txt)
last_line = True # initialize to True so that on our first pass through the loop we'll get the first line.
next_line = txt_buffer.readline()
while next_line:
stripped_next_line = next_line.strip()
# was our previous line also a new line?
if last_line: # strings in Python are "falsey" so '' will not pass.
# no, was not a newline... add the current line to our cleaned text.
clean_txt += next_line
else:
# yes, our previous line was a newline... is our current?
if stripped_next_line:
# must have content... write it out.
clean_txt += next_line
# set last_line to our current line (stripped version) and then grab the next line.
last_line = stripped_next_line
next_line = txt_buffer.readline()
return clean_txt | 9b1be808c4253b0f2b58b1985b10417a65b7cdeb | 45,184 |
def name_to_shortname(name):
"""take the first two letters from the first and from the last name."""
if name.count(' ') >= 1:
name = name.lower()
name = name.split(' ')
name1 = name[0]
name2 = name[len(name)-1]
return name1[0:2]+name2[0:2]
else:
return '' | 0ccb0e19cd9fc60f6bb9deeceddfb26e839b1ed7 | 45,185 |
def get_adts_hdr(data_size):
"""
AAAAAAAA AAAABCCD EEFFFFGH HHIJKLMM MMMMMMMM MMMOOOOO OOOOOOPP
11111111 11110000 0110000H HH0000MM MMMMMMMM MMM11111 11111100
"""
hdr_size = 7
hdr_bin = "1111 1111 1111 0001 0110 000{}0000{}1 1111 1111 1100".format(
"010", # as 2ch or "001" as 1ch
bin(hdr_size + data_size)[2:].rjust(13,"0")).replace(" ","")
return int(hdr_bin,2).to_bytes(hdr_size,"big") | 906663313e24c53b5723583e6d2f6f6a474876d6 | 45,186 |
import subprocess
def download_video(vid, outfile, container_format):
"""
Returns boolean indicating success or failure
"""
url = f"https://youtube.com/watch?v={vid}"
download_format = "bestvideo+bestaudio/best"
if container_format == "mp4":
download_format = "mp4"
ret = subprocess.call([
"youtube-dl",
"-o", outfile, # Output filename
"-f", download_format, # Output container format
url, # Youtube URL
])
return ret == 0 | 5155b239d25f14276bc6ffb3911d4e7702c8d784 | 45,188 |
def raw_remove_comments(input):
"""
takes a raw string
"""
comment = False
out = ""
empty = True
for elem in input:
if comment == False:
if elem == "%":
comment = True
empty = False
else:
out += elem
else:
if elem == "%":
if empty == False:
comment = False
else:
empty = False
if elem == "\n":
comment = False
return out | 91c836fb2a95b013cbefc6c59bc73c2fd9004f6c | 45,190 |
import os
def _get_dir_basename(dirname):
"""Gets the base name of the directory."""
return os.path.basename(os.path.abspath(dirname)) | a98b74ddbfc5ef316d21603f2008da89b620ad0b | 45,191 |
import argparse
def get_parser():
"""
Gets argument parser of the command-line version
:return: argument parser
"""
_parser = argparse.ArgumentParser(description='Search for a tag and value in multiple API pages')
_parser.add_argument('--command-line', help='Shows command line dialog',
dest='command_line', action='store_true')
_parser.add_argument('--url', help='API URL for the first page')
_parser.add_argument('--tag', help='tag to search for')
_parser.add_argument('--value', help='value to search for', default='')
return _parser | bbfef73bd5969e90f98ea38f767032a169048a5a | 45,192 |
import subprocess
def julia_version(exe):
"""
Return the version of the julia executable `exe` as a string.
Parameters:
exe - the path to a possible Julia executable.
"""
words = subprocess.run(
[exe, '-O0', '--startup-file=no', '--history-file=no', '--version'], check=True, capture_output=True, encoding='utf8'
).stdout.strip().split()
if len(words) != 3 and words[0] != "julia" and words[1] != "version":
raise Exception(f"{exe} is not a julia exectuable")
version = words[2]
return version | 8d32fca71d347f5c080612018c8af098a5734e53 | 45,193 |
def check_row(row):
"""
:param row: str, the user's input
:return: bool, if the format is correct
"""
if len(row) != 7:
return False
# Check if row[1], row[3], row[5] is space
for i in range(3):
if not row[1 + 2 * i].isspace():
return False
# Check if row[0], row[2], row[4], row[6] is alpha
for j in range(4):
if not row[j * 2].isalpha():
return False
return True | 010864e1610467453c9cb27d7015a613ac0ca072 | 45,194 |
def _fake_dropout_factory():
"""Returns an dropout-like mapping that leaves the input unchanged."""
return lambda x, deterministic: x | 05eb6aaa0be353f2e4a788caf7a832ef9e26c351 | 45,195 |
import importlib
def _import_class(cls: str):
"""Take a string FQP and return the imported class or identifier
clas is of the form "package.module.klass".
"""
mod, name = cls.rsplit(".", maxsplit=1)
module = importlib.import_module(mod)
return getattr(module, name) | b2b3cddf49b88f99b35c6720d12d96dfa007441c | 45,196 |
def naive_matching(target, original):
"""
target:需要匹配的字符串,比如 efg
original:字符集合 abcdefghijkl
"""
len_t = len(target)
len_o = len(original)
i, j = 0, 0
while i < len_t and j < len_o:
# 找到首字母
if target[i] == original[j]:
print(i, j)
i, j = i + 1, j + 1
# 字符不同,考虑original中的下一位置
else:
i, j = 0, j - i + 1
if i == len_t:
return j - i
return -1 | 8083296f82d30ac51f5b7447bf27fefed53f3bb0 | 45,197 |
import os
def replace_name_part(paths, replace_this, with_this):
"""Replace the replace_this string with the with_this string in the file names from the paths list provided"""
# initialize a counter for the files
file_counter = 0
# initialize a list to store the failed paths
failed_paths = []
# initialize a list for replaced names to output
new_names = []
# for all the paths
for file_path in paths:
# check if the old path exists
if not os.path.isfile(file_path):
failed_paths.append('_'.join(('old', file_path)))
continue
# create the new path
new_path = file_path.replace(replace_this, with_this)
# check if the new path exists
if os.path.isfile(new_path):
failed_paths.append('_'.join(('new', new_path)))
continue
# change the file_name
print(file_path)
print(new_path)
os.rename(file_path, new_path)
# add it to the list of renamed paths
new_names.append(new_path)
# update the counter
file_counter += 1
print("_".join(("Total original files: ", str(len(paths)), "Successfully renamed files: ", str(file_counter))))
return failed_paths, new_names | b703374c6200aaa9575f9d4ae3b2b742a8c66376 | 45,199 |
def read_files(filename):
""" Reads a file line by line and commits it to a variable.
Takes a file and reads it line by line, commiting each line to
memory. Returns the variable as a tuple with each line as an item.
Args:
filename: the location of the file to be read.
Returns:
Returns the file as a tuple with each line stored as a value.
"""
with open(filename, mode="r", encoding="utf-8") as f:
file_memory = [lines for lines in f]
return file_memory | e21597f55f515614255eac5992a6ac75179a249f | 45,200 |
def message_to_pretty_string(incoming_message):
"""
Args:
incoming_message: dict
Returns:
str
"""
try:
formatted_message = "{0} | {1} | {2} | [errno:{3}] | {4} | {5} | data: {6} | {7}".format(
incoming_message['result'],
incoming_message['action'],
incoming_message['target'],
incoming_message['error_code'],
incoming_message['error_message'],
incoming_message['linenum'],
incoming_message['data'],
incoming_message['timestamp'])
except KeyError:
formatted_message = "{0} | {1} | {2} | data: {3} | {4}".format(incoming_message['result'],
incoming_message['action'],
incoming_message['target'],
incoming_message['data'],
incoming_message['timestamp'])
return formatted_message | 2c9147146067d2e02879320d5f2bac94a0160be9 | 45,209 |
def uniq(elems):
"""Generate a set of unique objects"""
output = {}
id = 0
for elem in elems:
duplicate = False
# look up for duplicates
for oid, other in output.items():
if elem == other:
# creating a relationship
elem.set_id(oid)
duplicate = True
break
# a new benchmark found
if not duplicate:
elem.set_id(id)
output[id] = elem
id += 1
return output | 4939308a450ec863637e5e9abe561e4a8cb86407 | 45,210 |
from urllib import error, request
def fetch(url):
"""
This simply downloads the given url or fetch landing page from the given url
:param url: website url
:return: length of the landing page from the given url
:rtype: str
"""
try:
data = request.urlopen(url).read()
return "{}: length {}".format(url, len(data))
except error.HTTPError as e:
return "{}: {}".format(url, e) | c80c20b91b02ce837a97e6c46c8deaa305417a1e | 45,211 |
def make_readable(seconds):
"""
Write a function, which takes a non-negative integer (seconds) as input and returns the time in a human-readable
format (HH:MM:SS)
HH = hours, padded to 2 digits, range: 00 - 99
MM = minutes, padded to 2 digits, range: 00 - 59
SS = seconds, padded to 2 digits, range: 00 - 59
The maximum time never exceeds 359999 (99:59:59)
:param seconds: non-negative integer.
:return: the time in a human-readable format (HH:MM:SS)
"""
hours = seconds // 3600
mins = (seconds // 60) % 60
secs = seconds % 60
return "{:02d}:{:02d}:{:02d}".format(hours, mins, secs) | 30ba121c84526dda77705908b1486c46d1e5f8d4 | 45,212 |
def get_current_domain(r):
"""
From original source, structures website domain name
"""
return '{scheme}://{host}'.format(
scheme='https' if r.is_secure() else 'http',
host=r.get_host(),
) | 66ef8a7e79fc211ce7db42b47eeccce19459d605 | 45,214 |
import time
def millitime(*a, **kw):
"""The difference, measured in milliseconds, between the current time
and midnight, January 1, 1970 UTC.
>>> e = millitime()
>>> type(e)
<type 'int'>
"""
ts = time.time(*a, **kw)
return int(ts * 1000) | 926c615f1596588f3ac02c5936a3f7c6f39bd67f | 45,215 |
def create_schema():
"""Create schema for the given index that is queried. Useful if there are no results returned.
:return: schema of index
"""
return {"schema": "to_be_created"} | e8c8b8c02ffffd2ec38d782d94217c7f2e6d2870 | 45,216 |
def cli(ctx, active=False):
"""Update the job lock status by setting ``active`` to either ``True`` or ``False``. If ``True``, all job dispatching will be blocked.
Output:
"""
return ctx.gi.jobs.update_job_lock(active=active) | e5d06422907ec0ff3b90d82ca4a06769acfcac69 | 45,217 |
def port(request):
"""
Destination port override for tests
"""
return request.config.getoption('--port') | fab1a2a9b34dfb8e94ae42687c498dba358e4fdc | 45,218 |
def template_used(response, template_name):
"""
:response: respone from django test client.
:template_name: string with path to template.
:rtype: bool
"""
templates = [t.name for t in response.templates if t.name]
assert template_name in templates, templates
return True | c52a3afc9f2592aa92ef6d8efda6c689a3e2d0bb | 45,219 |
def check_var_constraints(var_constraints, rule_walks):
"""
Check variable constraints of the rule.
Parameters:
var_constraints (list): variable constraints from the rule
rule_walks (pd.DataFrame): all walks matching the rule
Returns:
rule_walks (pd.DataFrame): all walks matching the rule including the variable constraints
"""
for const in var_constraints:
for i in range(len(const) - 1):
rule_walks = rule_walks[
rule_walks["entity_" + str(const[i])]
== rule_walks["entity_" + str(const[i + 1])]
]
return rule_walks | b379526ee6a288b2a7de1ca224edf19be698acc9 | 45,221 |
from typing import Iterator
from typing import Tuple
from typing import Any
def _duplicate_avoiding_dict(pairs: Iterator[Tuple[Any, Any]]):
"""
The default output_type of CollectionParser.delimited_pairwise. Returns a dict from key-value pairs while
ensuring there are no duplicate keys.
"""
ret = {}
for k, v in pairs:
if k in ret:
raise ValueError(f'duplicate key {k}')
ret[k] = v
return ret | 5be688e4e509997c4cf10403d7b7599b07cf83e5 | 45,222 |
import os
def _expvars(path):
"""Alias to os.path methods to expand vars and user in path.
Args:
path(str): Path
Returns:
str: Modified path
"""
return os.path.expanduser(os.path.expandvars(path)) | 515533263463a36b62437a7bbb1b9b23c784b858 | 45,226 |
from typing import Optional
import sys
import subprocess
def recommended_executable(program_name: str) -> Optional[str]:
"""Shows the program which would be executed based on path."""
if sys.platform == 'win32':
program_locator = 'where'
else:
program_locator = 'which'
try:
return subprocess.check_output([program_locator, program_name]).decode().strip()
except subprocess.CalledProcessError:
print(f'Unable to find program to execute for: {program_name}')
return None | 9bf1793c63f0813946f73636e06c7dea418555c2 | 45,227 |
def fix_alignment (src_tag_dict, standard_tag, lang):
"""Fix no alignment. Make the length of each sentence in source language the same as the target language.
Then evaluate the performance easily."""
final_tag = {}
for sentence in src_tag_dict.keys():
final_tag[sentence] = []
for i in range(len(standard_tag[sentence])):
if str(i) in src_tag_dict[sentence].keys():
tag = src_tag_dict[sentence][str(i)]
final_tag[sentence].append(tag)
else:
final_tag[sentence].append('O') # when no alignment
return final_tag | ef56ca0fee0648a152b4b3d472a91bf9278dae77 | 45,228 |
def both(f, g):
"""Return a commentary function that says what f says, then what g says.
NOTE: the following game is not possible under the rules, it's just
an example for the sake of the doctest
>>> h0 = both(say_scores, announce_lead_changes())
>>> h1 = h0(10, 0)
Player 0 now has 10 and Player 1 now has 0
Player 0 takes the lead by 10
>>> h2 = h1(10, 8)
Player 0 now has 10 and Player 1 now has 8
>>> h3 = h2(10, 17)
Player 0 now has 10 and Player 1 now has 17
Player 1 takes the lead by 7
"""
def say(score0, score1):
return both(f(score0, score1), g(score0, score1))
return say | d355e3c2eb666702752a386825745a36a11675ef | 45,229 |
def smart_mul(x, y):
"""
0- and 1- aware multiply, to prevent computation graph from getting very
large
"""
if x == 0 or y == 0:
return 0
elif x == 1:
return y
elif y == 1:
return x
else:
return x * y | 8c6005df2ddd6c46b3c253c240aeab8dc70e5a02 | 45,230 |
def remove_extra_spaces(inputstr):
"""Remove extra spaces in *inputstr* so that there are only single spaces.
Parameters
----------
inputstr : str
Returns
-------
str
"""
while ' ' in inputstr:
inputstr = inputstr.replace(' ', ' ')
return inputstr.strip() | 78578fa75d3416a3da9b7a542337c7ce19291888 | 45,231 |
def flatten_tree(tree: dict, full: bool = False) -> dict:
"""
Flatten an execution tree to make it easier to read.
Task trees are often a single node nested several levels deep. These trees may be collapsed
into a list. The execution order is the same, but it's easier for a human to read.
Before:
- foo
- bar
- xoo
After:
- xoo
- bar
- foo
Before:
- foo
- xar
- bar
- xoo
After:
- foo
- xar
- xoo
- bar
:param tree: Tree to flatten
:param full: Flatten tree into single list
:return: flattened task list
"""
def flatten_node(node: dict) -> list:
"""
Flatten a single node. Always return a list for consistency, even when returning a single
node.
:param node:
:param parent: parent task list to collapse into
:return: flattened node
"""
node = node.copy()
num_dependencies = len(node["dependencies"])
if num_dependencies == 0:
# no dependencies: nothing to flatten, return as-is
return [node]
elif full or num_dependencies == 1:
# flatten dependencies: flatten into single list that includes parent & child
flattened = []
for dependency in node["dependencies"]:
flattened_child = flatten_node(dependency)
flattened.extend(flattened_child)
# clear dependencies, since they are now siblings
# this node is added last since it runs after dependencies
node["dependencies"] = []
flattened.append(node)
return flattened
else:
# multiple dependencies: do not flatten into parent.
#
# Any dependencies that are flattened need to be merged with other dependencies.
# Dependency nodes should either be a single node, or a list of nodes
dependencies = []
for dependency in node["dependencies"]:
flattened = flatten_node(dependency)
dependencies.extend(flattened)
node["dependencies"] = dependencies
return [node]
root = flatten_node(tree)
if len(root) > 1:
# if root's dependencies were flattened into it, then the returned list will have all of
# those dependencies. Create a new root node to contain them all. This keeps the structure
# consistent-ish for downstream consumers. They still have to special case this node, but
# it should be a little simpler since all nodes are of a similar shape
return {"name": None, "dependencies": root}
else:
# a single node, unpack it and return as root.
return root[0] | 77b133b80d70256643e22d1b778c1bdecb00badf | 45,232 |
def f(z):
"""
A complex function of the user's design.
"""
return 0 | e33a84edd127822a9bfc10540e5b24cdf765ef08 | 45,233 |
import pathlib
def mzcompose_location(mz_root: str) -> pathlib.Path:
"""Return the absolute path to mzcompose.
MZ_ROOT is expected to be set via pyactivate.
"""
return pathlib.Path(mz_root, "bin", "mzcompose") | 80de24e6d4ea4bde9e846b2f7e942ed64a0d0e3f | 45,234 |
from typing import Dict
from typing import Tuple
def apply(dfg: Dict[Tuple[str, str], int]) -> Dict[Tuple[str, str], float]:
"""
Computes a causal graph based on a directly follows graph according to the heuristics miner
Parameters
----------
dfg: :class:`dict` directly follows relation, should be a dict of the form (activity,activity) -> num of occ.
Returns
-------
:return: dictionary containing all causal relations as keys (with value inbetween -1 and 1 indicating that
how strong it holds)
"""
causal_heur = {}
for (f, t) in dfg:
if (f, t) not in causal_heur:
rev = dfg[(t, f)] if (t, f) in dfg else 0
causal_heur[(f, t)] = float((dfg[(f, t)] - rev) / (dfg[(f, t)] + rev + 1))
causal_heur[(t, f)] = -1 * causal_heur[(f, t)]
return causal_heur | c022aa8da1d5436f62b000619959a14db75672b2 | 45,235 |
from datetime import datetime
def get_timestamp():
"""
Get the time now, formatted as YYYY/MM/DD-HH:MM:SS
Returns:
(str): The properly formatted date right now.
"""
now = datetime.now()
return now.strftime("%Y/%m/%d-%H:%M:%S") | e99d9390269d59b8aa3cd932596f8d8e9b533c90 | 45,236 |
def center(x, y, canvas_w, canvas_h, object_w, object_h):
""" Returns a positional tuple that will centre a surface on another surface. """
# Do some math and return a positional tuple for use in the outer scope
return x + canvas_w // 2 - object_w // 2, y + canvas_h // 2 - object_h // 2 | 91603a33d381b8de5074e7b522e257c2b20e177b | 45,237 |
def is_valid(value, matcher, require):
"""Determine if a value is valid based on the provided matcher.
:param str value:
Value to validate.
:param matcher:
Compiled regular expression to use to validate the value.
:param require:
Whether or not the value is required.
"""
if require:
return (value is not None
and matcher.match(value))
# require is False and value is not None
return value is None or matcher.match(value) | d67ee9f6dbc6136703e5c0bf42b04c6a7547b0d3 | 45,239 |
def shape_to_strides(shape):
""" Constructs strides from shape (for objects with no special strides). """
strides = []
curstride = 1
for s in reversed(shape):
strides.append(curstride)
curstride *= s
return list(reversed(strides)) | 8bea7683eeff0cf4f8c528aa59058cb19ff48d7c | 45,240 |
def fahrenheit_vers_celsius(t: float) -> float:
"""convertit une température t exprimée en degrés Fahrenheit en son équivalent en degrés Celsius
"""
return (t - 32) * 5/9 | bb63e776c4979deb8e42adb71dd415790e17a8f9 | 45,241 |
def is_segmentable(partic_id):
"""
A function that returns True if the participant's interview clip is not
in the manually identified set of troubled clips. The clips below were
not segmentable do to excessive static, proximity to the virtual
interviewer, volume levels, etc.
"""
troubled = set(['P300', 'P305', 'P306', 'P308', 'P315', 'P316', 'P343',
'P354', 'P362', 'P375', 'P378', 'P381', 'P382', 'P385',
'P387', 'P388', 'P390', 'P392', 'P393', 'P395', 'P408',
'P413', 'P421', 'P438', 'P473', 'P476', 'P479', 'P490',
'P492'])
return partic_id not in troubled | 58859f155d315311f353d31df4dcee47bd21ceb6 | 45,242 |
def quote(env, args):
"""Returns its argument; stops evaluation:
(quote (1 2 3)) = (1 2 3)
"""
if len(args) > 1:
raise ValueError(
"Function quote expectes one argument, got: '{}'"
.format(args)
)
else:
return args.car() | 4b652a61b535dc36cd5449ad609c07aac82e4615 | 45,243 |
def int_to_bytes(num, lens):
"""
int转bytes
:param num: 整数
:param lens: 目标bytes的字节长度
:return: bytes类型
"""
int_bytes = int(num).to_bytes(lens, byteorder='big')
return int_bytes | 2dee2d30ba5fb93cd9f8b74a0dc16e9c0ca20dad | 45,245 |
import select
def read_ready(*read_fds, timeout=None):
"""Returns a list of file descriptors that are ready to be read.
Args:
*read_fds (int): Integers that refer to the file descriptors.
timeout (float): A timeout before returning an empty list if no file
descriptor is ready. None waits until at least one file descriptor
is ready.
"""
return [] if not read_fds else select.select(read_fds, [], [], timeout)[0] | ac0005928ca836c08f5064f34bb7093c695e2220 | 45,246 |
def solar_geometric_mean_longitude(julian_century):
"""Returns the Solar Geometric Mean with Julian Century, julian_century."""
solar_geometric_mean_longitude = (
280.46646 + julian_century * (36000.76983 + julian_century * 0.0003032)
) % 360
return solar_geometric_mean_longitude | ef414d97cf620be1a367e3b3dc2b8ba8c7aa5a68 | 45,247 |
import logging
def get_logger(name, level, fmt=None):
"""
Get logger from logging with given name, level and format without
setting logging basicConfig. For setting basicConfig in paddle
will disable basicConfig setting after import paddle.
Args:
name (str): The logger name.
level (logging.LEVEL): The base level of the logger
fmt (str): Format of logger output
Returns:
logging.Logger: logging logger with given settings
Examples:
.. code-block:: python
logger = log_helper.get_logger(__name__, logging.INFO,
fmt='%(asctime)s-%(levelname)s: %(message)s')
"""
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.StreamHandler()
if fmt:
formatter = logging.Formatter(fmt=fmt)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger | ef8bc7864f0369b95dd7d08c4ef40cd5916a7d3a | 45,248 |
import argparse
def parse_args(args):
"""Parses command line arguments"""
parser = argparse.ArgumentParser(prog="discover", description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-m", "--model", type=str, required=True,
help="BioGraph classifier model for qual_classifier")
args = parser.parse_args(args)
return args | 0ae8ae5174bf657a6facfc8bdeac262e1a24c992 | 45,249 |
from typing import List
def ks_filter_ascending(features: List[dict]) -> List[dict]:
"""
Filter out ascending path/rows from a STAC query
"""
return [f for f in features
if int(f['properties']['landsat:wrs_path']) < 100
and int(f['properties']['landsat:wrs_row']) < 100] | e55b80ee10cdc786d8418836532ca1cdfe7adfda | 45,250 |
import json
def get_ignore_info(request):
"""Extract a dict of residues to ignore from the post request"""
ignore_dict = request.POST.get("ignore")
return json.loads(ignore_dict) | a200378b959b4d5b5b560df6e4d101909c086894 | 45,253 |
def export(varname):
"""Returns "export varname={formated value}\\n" """
var = str(globals()[varname])
frmted_var = var.strip("'\"").replace("\"", "\\\"").replace(r"\n", r"\\n")
return f"export {varname}=\"{frmted_var}\"\n" | 0c0e0d09cfbc8784fc4ce7e265b5be4e534dc4ff | 45,254 |
import secrets
def _make_token(num_bytes=16):
"""
Creates a cryptographically-secure, URL-safe string (for Python 3.6+)
"""
return secrets.token_urlsafe(num_bytes) | 6b23ff243380619d92a0e36da14fc1b5a64ca519 | 45,255 |
import os
import glob
def _get_conventions(return_type):
"""
Get available SOFA conventions.
Parameters
----------
return_type : string, optional
``'path'``
Return a list with the full paths and filenames of the convention
files
``'name'``
Return a list of the convention names without version
``'name_version'``
Return a list of tuples containing the convention name and version.
``'string'``
Returns a string that lists the names and versions of all
conventions.
Returns
-------
See parameter `return_type`.
"""
# directory containing the SOFA conventions
directory = os.path.join(os.path.dirname(__file__), "conventions")
# SOFA convention files
paths = [file for file in glob.glob(os.path.join(directory, "*.json"))]
conventions_str = "Available SOFA conventions:\n"
conventions = []
versions = []
for path in paths:
fileparts = os.path.basename(path).split(sep="_")
conventions += [fileparts[0]]
versions += [fileparts[1][:-5]]
conventions_str += f"{conventions[-1]} (Version {versions[-1]})\n"
if return_type is None:
return
elif return_type == "path":
return paths
elif return_type == "name":
return conventions
elif return_type == "name_version":
return [(n, v) for n, v in zip(conventions, versions)]
elif return_type == "string":
return conventions_str
else:
raise ValueError(f"return_type {return_type} is invalid") | bb7d34218e433bf9ca3bb8baa4f70f53480ac091 | 45,256 |
def properties_to_dict(filepath):
"""
Convert Java .properties file to a dict
Only include non-commented lines
"""
out = {}
with open(filepath) as prop_file:
for line in prop_file.readlines():
line = line.strip()
if line and (not line.startswith('#')):
k, v = line.split('=')
out[k.strip()] = v.strip()
return out | d425bfa4a3e3d10b8a3625232d55a098b914db03 | 45,257 |
def specific_time_range(data_frame, min_range, max_range):
"""
:param max_range:
:param min_range:
:param data_frame:
:return:
"""
return data_frame[min_range:max_range] | abb7ef1e698ad773aca0b7d5db9674b03e01a9bf | 45,261 |
from typing import Dict
from typing import Any
import importlib
def get_contract_definition(contract_name: str) -> Dict[str, Any]:
"""Returns the abi JSON for a contract name."""
try:
return importlib.import_module("artifacts." + contract_name).__dict__
except ModuleNotFoundError:
raise TypeError("Contract name does not exist in artifacts.") | f5870b512bc2abbee501b061ea550690354b9646 | 45,262 |
def parse_sources(sourcedef):
"""parse a source definition such as 'src1:1.0,src2' into a sequence of
tuples (src_id, weight)"""
sources = []
totalweight = 0.0
for srcdef in sourcedef.strip().split(','):
srcval = srcdef.strip().split(':')
src_id = srcval[0]
if len(srcval) > 1:
weight = float(srcval[1])
else:
weight = 1.0
sources.append((src_id, weight))
totalweight += weight
return [(srcid, weight / totalweight) for srcid, weight in sources] | 9462ca2f27f8a210aa55e72f82ed56cca152140d | 45,264 |
def prep_key(key):
"""
Prepare key.
:param key: Key to use. 8-character hexadecimal, with or without 0x.
:type key: str
"""
if key is None:
key = input("PGP KEY (0x12345678): ")
return key | d30ca3a15fb53034ba0eca7edeb35f7a5e308337 | 45,266 |
def assert_equals(source_dict, expected_dict):
"""
Check equality with expected values in dictionary keys.
Args:
- source_dict (dict): the dict containing the keys/values that should
conform to expected values
- expected_dict (dict): the dict containing keys of the `source_dict`, with
expected values of those keys as values
Returns:
list: empty list if all checks succeeded, list of error message str if
some checks failed
"""
error_tplt = "Expected value '{}' for key '{}', got '{}'"
errors = [
error_tplt.format(value, key, source_dict.get(key))
for key, value in expected_dict.items()
if source_dict.get(key) != value
]
return errors | 884d3bbb46d32f52fc758bffd8e7f4787ceb4b5f | 45,269 |
def import_cmd(cmd):
"""Return the full module name of a fully-quallified function call
"""
#print 'cmd', cmd
lp = cmd.index('(')
ftoks = cmd[:lp].split('.')
imp = '.'.join(ftoks[:-1])
return imp, cmd | b0912967afbabf43ff5cc5d0eedf808e68d8cede | 45,270 |
def is_end_of_file(empty_line_count):
"""
Checks whether reading of file reaches bottom of file
"""
return empty_line_count > 5 | c6ba5121180c7bb10beed460c10ed285e7aedecb | 45,271 |
import sys
def _called_with_wrong_args(factory):
"""Check whether calling a function raised a ``TypeError`` because
the call failed or because something in the factory raised the
error.
:param factory: the factory function that was called
:return: true if the call failed
"""
tb = sys.exc_info()[2] # pylint: disable=invalid-name
try:
while tb is not None:
if tb.tb_frame.f_code is factory.__code__:
# in the factory, it was called successfully
return False
tb = tb.tb_next # pylint: disable=invalid-name
# didn't reach the factory
return True
finally:
del tb | f1426b3ab101779c8e672611977ccd35c6f81901 | 45,272 |
from pathlib import Path
import argparse
def valid_dir(path):
"""Helper method for parse_args to convert to Path and verify if the directory exists.
Args:
path: path to directory from parse_args()
Returns:
The absolute path of the given directory if it exists
Raises:
argparse.ArgumentTypeError if the directory given does not exist or if not a directory
"""
path = Path(path)
if path.exists() and path.is_dir():
return path.absolute()
raise argparse.ArgumentTypeError(f"No such directory or the given path is not a directory: '{path}'") | 9d4698a6ea258aadbddd2876d9804a817e982be2 | 45,273 |
def approx_pretty_size(total_bytes) -> str:
"""
Return a humane and pretty size approximation.
This looks silly bellow 1KB but I'm OK with that.
Don't call this with negative total_bytes or your pet hamster will go bald.
>>> approx_pretty_size(50)
'1KB'
>>> approx_pretty_size(2000)
'2KB'
>>> approx_pretty_size(2048)
'2KB'
>>> approx_pretty_size(3000000)
'3MB'
>>> approx_pretty_size(4000000000)
'4GB'
>>> approx_pretty_size(-314)
Traceback (most recent call last):
...
ValueError: total_bytes may not be negative
"""
if total_bytes < 0:
raise ValueError("total_bytes may not be negative")
for scale, _unit in ((1024 * 1024 * 1024, "GB"), (1024 * 1024, "MB"), (1024, "KB")):
div, rem = divmod(total_bytes, scale)
if div > 0:
if rem > 0:
div += 1 # always round up
break
else:
div, _unit = 1, "KB"
return f"{div:d}{_unit}" | dc9a15fed28e0bb9d5ca9d51d2f15a10887c70ea | 45,275 |
def get_time_with_unit(time):
"""This method sets seconds in minutes, hours or days."""
sec_in_min = 60
sec_in_hour = 60 * 60
sec_in_day = 24 * 60 * 60
if time % sec_in_day == 0:
time = time / sec_in_day
unit = 'days'
elif time % sec_in_hour == 0:
time = time / sec_in_hour
unit = 'hours'
else:
time = time / sec_in_min
unit = 'minutes'
return "%s %s" % (time, unit) | 9c6dd5230de7a3e213cb935af56a874711d14e3b | 45,276 |
def is_implicit_newline(raw):
"""should we add a newline to templates starting with *, #, :, ;, {|
see: http://meta.wikimedia.org/wiki/Help:Newlines_and_spaces#Automatic_newline_at_the_start
"""
sw = raw.startswith
for x in ('*', '#', ':', ';', '{|'):
if sw(x):
return True
return False | f4d2ed48b378cdbd1e4cc5d0bd2816154b2afb1f | 45,277 |
def literally(obj):
"""Forces Numba to interpret *obj* as an Literal value.
*obj* must be either a literal or an argument of the caller function, where
the argument must be bound to a literal. The literal requirement
propagates up the call stack.
This function is intercepted by the compiler to alter the compilation
behavior to wrap the corresponding function parameters as ``Literal``.
It has **no effect** outside of nopython-mode (interpreter, and objectmode).
The current implementation detects literal arguments in two ways:
1. Scans for uses of ``literally`` via a compiler pass.
2. ``literally`` is overloaded to raise ``numba.errors.ForceLiteralArg``
to signal the dispatcher to treat the corresponding parameter
differently. This mode is to support indirect use (via a function call).
The execution semantic of this function is equivalent to an identity
function.
See :ghfile:`numba/tests/test_literal_dispatch.py` for examples.
"""
return obj | eff86e0db241abfa895c1fcc69dee7cb99ad6370 | 45,279 |
import json
def get_dict(body_string):
"""
Generates dict from message body
:param string
:return dict object
"""
body_string = json.dumps(body_string)
body_string = body_string.replace("'", "\"")
body_string = json.loads(body_string)
message_body_obj = json.loads(body_string)
return message_body_obj | 469bd091078552ee613cd5faaa4a53b14303ac63 | 45,280 |
def clean_names(name):
"""Function to clean local authority names in various datasets
in order to join data from different sources.
"""
strings = [
" Metropolitan Borough Council",
" Metropolitan District Council",
" Royal Borough Council",
"Royal Borough of ",
"London Borough of ",
" Borough Council",
" District Council",
" City Council",
" County Council",
"District",
"Council",
"Corporation",
", City of",
", City Of",
"City of ",
", County of",
", County Of",
"County UA",
"County ",
" (Met County)",
"CC",
"DC",
]
name = name.replace("\xa0", " ")
for string in strings:
name = name.replace(string, "")
#
replacements_dict = {
"&": "and",
"Mid-": "Mid ",
"Upon": "upon",
"Kings Lynn": "King's Lynn",
"King’s Lynn": "King's Lynn",
"Basingstoke and Deane": "Basingstoke and Dean",
"St Helens": "St. Helens",
"Vale of Whitehorse": "Vale of White Horse",
"Newcastle upon Tyne": "Newcastle",
"Newcastle-Under-Lyme": "Newcastle-under-Lyme",
"Blackburn With Darwen": "Blackburn with Darwen",
}
#
for key, value in replacements_dict.items():
name = name.replace(key, value)
#
name = name.strip()
#
return name | a31f3f5935226dea0efade0ff81fa72dbff2e272 | 45,281 |
import string
def prettify_permission_name(perm_name: str) -> str:
"""Takes a internal D.py permission name (such as send_tts_messages) and converts it to a prettified form suitable for showing to users (send_tts_messages -> Send TTS Messages)"""
pretty_perm_name = string.capwords(f"{perm_name}".replace('_', ' ')) # Capitalize the permission names and replace underlines with spaces.
pretty_perm_name = "Send TTS Messages" if pretty_perm_name == "Send Tts Messages" else pretty_perm_name # Mak sure that we capitalize the TTS acronym properly.
return pretty_perm_name | fea3a27a9f3a9f1c80641b571705a3d43a4678d3 | 45,283 |
def c_to_f(temp):
"""
Converts Celsius to Fahrenheit.
"""
return temp * 9/5 + 32 | 49071c9f52b47e3ae2d03133d68e63071ac8eb00 | 45,284 |
def argspec_args(argspec, constructor, *args, **kwargs):
"""
Return (args, kwargs) matching the argspec object
:param argspec: argspec to use
:type argspec: argspec
:param constructor: is it a constructor ?
:type constructor: bool
:param args:
:type args:
:param kwargs:
:type kwargs:
:return: (args, kwargs) matching the function signature
:rtype: tuple
"""
if argspec.keywords:
call_kwarg = kwargs
else:
call_kwarg = dict((k, kwargs[k]) for k in kwargs if k in argspec.args) # Python 2.6 dict comprehension
if argspec.varargs:
call_args = args
else:
call_args = args[:len(argspec.args) - (1 if constructor else 0)]
return call_args, call_kwarg | 0fb5c402ab7ff15ef57069ac4f0671469f67be72 | 45,286 |
import os
def hasLsf():
"""Returns True only if LSF is available"""
ret = os.system("which bsub > /dev/null")
return (ret == 0) | 99a940cfe824eeb2b1add3c2c8919ad38f937d37 | 45,287 |
def prio_dscp_map(duthosts, rand_one_dut_hostname):
"""
This fixture reads the QOS parameters from SONiC DUT, and creates
priority Vs. DSCP priority port map
Args:
duthosts (pytest fixture) : list of DUTs
rand_one_dut_hostname (pytest fixture): DUT hostname
Returns:
Priority vs. DSCP map (dictionary, key = priority).
Example: {0: [0], 1: [1], 2: [2], 3: [3], 4: [4] ....}
"""
duthost = duthosts[rand_one_dut_hostname]
config_facts = duthost.config_facts(host=duthost.hostname,
source="running")['ansible_facts']
if "DSCP_TO_TC_MAP" not in config_facts.keys():
return None
dscp_to_tc_map_lists = config_facts["DSCP_TO_TC_MAP"]
if len(dscp_to_tc_map_lists) != 1:
return None
profile = dscp_to_tc_map_lists.keys()[0]
dscp_to_tc_map = dscp_to_tc_map_lists[profile]
result = {}
for dscp in dscp_to_tc_map:
tc = int(dscp_to_tc_map[dscp])
result.setdefault(tc, []).append(int(dscp))
return result | 9538063704fc0a09599f7a8577781d0900e92404 | 45,289 |
def lerp(value1, value2, amt):
"""Calculates a number between two numbers at a specific increment.
The amt parameter is the amount to interpolate between the two values
where 0.0 equal to the first point, 0.1 is very near the first point,
0.5 is half-way in between, etc"""
return value1 + amt * (value2 - value1) | b94f21d3c6f646102f74560c815a3304a741e391 | 45,290 |
def search(pd_db, year=0, author='', journal='', author1='', title='', doi='', byindex=False):
""" search panda database by keywords """
if ("author1" not in pd_db.columns) and ("author" in pd_db.columns):
pd_db["author1"] = [ x.split(' and ')[0] for x in pd_db['author'].values ]
if year != 0:
pd_db.loc[:, 'year'] = pd_db.loc[:, 'year'].astype(int)
db = pd_db.loc[pd_db['year'] == year]
else:
db = pd_db
def _search_item(db, column, value):
if (value != '') and (column in db.columns):
db[column].fillna('', inplace=True)
return db.loc[db[column].str.contains(value)]
else:
return db
db = _search_item(db, "author", author)
db = _search_item(db, "author1", author1)
db = _search_item(db, "journal", journal)
db = _search_item(db, "title", title)
db = _search_item(db, "doi", doi)
if byindex:
return db.index
else:
return db | ea04736876d9b83b342d22a9e3f7d15bb5f12278 | 45,295 |
def split_folds(X, y, fold_series, test_fold):
"""Take a dataset whose observations have been grouped into folds,
then perform a train-test split.
X, y: feature and target DataFrames.
fold_series: Series containing the fold numbers of the observations.
test_fold: Integer, the fold number that will be used as the test fold.
Returns: tuple of four DataFrames"""
if fold_series.dtype != "int64":
raise AttributeError("The fold list does not purely contain integers.")
test_mask = (fold_series == test_fold)
X_train = X.loc[~test_mask].copy()
y_train = y.loc[~test_mask].copy()
X_test = X.loc[test_mask].copy()
y_test = y.loc[test_mask].copy()
return X_train, X_test, y_train, y_test | 38051e584c427ffe77273fbbcdd764b6fe432b2f | 45,296 |
import time
def str2time(sTime, timeformat="%Y-%m-%d %H:%M:%S"):
"""字符串转化为时间戳
Arguments:
sTime {str} -- 时间字符串
Keyword Arguments:
timeformat {str} -- 时间字符串格式 (default: {"%Y-%m-%d %H:%M:%S"})
Returns:
int -- 时间戳
"""
oTime = time.strptime(sTime, timeformat)
iTime = int(time.mktime(oTime))
return iTime | f7e775636005031ea80c2602137906828c977218 | 45,297 |
def ffs(x: int) -> int:
"""Find first set - returns the index, counting from 0 (from the right), of the
least significant set bit in `x`.
"""
return (x&-x).bit_length()-1 | 1d9fef75b58bba59e0ddb442115d1f5c62dd7844 | 45,298 |
def radon_cyclomatic_complexity_parser(stdout, stderr, previous_reports=None):
"""
Example line: F 31:0 get_datetime - A
"""
lines = stdout.split('\n')[:-1]
summary = {'A': 0, 'B': 0, 'C': 0, 'D': 0, 'E': 0, 'F': 0}
for line in lines:
words = line.split(' ')
if len(words) == 1:
continue
complexity = words[8]
assert complexity in ['A', 'B', 'C', 'D', 'E', 'F']
summary[complexity] += 1
summary['total'] = sum(summary.values())
return {
'stdout': stdout,
'stderr': stderr,
'summary': summary,
} | c11a74f2d06bc70f1dab0293118fb0eb75ae58e6 | 45,300 |
import re
def _validhex(hexstring):
"""_validhex(hexstring)
validate a string as being a hex color value
"""
if len(hexstring) == 7 and hexstring[0] == '#' and \
re.match('[0-9A-Fa-f]', hexstring[1]) and \
re.match('[0-9A-Fa-f]', hexstring[2]) and \
re.match('[0-9A-Fa-f]', hexstring[3]) and \
re.match('[0-9A-Fa-f]', hexstring[4]) and \
re.match('[0-9A-Fa-f]', hexstring[5]) and \
re.match('[0-9A-Fa-f]', hexstring[6]):
return True
else:
return False | e1559e4996b5a0f2c6e0c1fab66ff8af5bf9698a | 45,301 |
from typing import TextIO
from typing import Tuple
def run(inp: TextIO) -> Tuple[int, int]:
"""Solution for 2021 Day 2"""
data = inp.read().splitlines()
horizontal = 0
depth = 0
instructions = [(item.split()[0], int(item.split()[1])) for item in data]
for direction, distance in instructions:
distance = int(distance)
if direction == "forward":
horizontal += distance
elif direction == "down":
depth += distance
elif direction == "up":
depth -= distance
# part 2
# keeping these loops separate in order to keep the result simpler
depth_correct = 0
aim = 0
for direction, distance in instructions:
if direction == "forward":
depth_correct += aim*distance
elif direction == "down":
aim += distance
elif direction == "up":
aim -= distance
return (horizontal*depth, horizontal*depth_correct) | 71f89f65195cb2dea8a25261a368cfc969110ba2 | 45,302 |
import logging
def is_logging_to_tty():
"""Return true iff all logging destinations are terminal devices."""
if not logging.root.handlers:
logging.basicConfig()
assert logging.root.handlers
for handler in logging.root.handlers:
if not (isinstance(handler, logging.StreamHandler) and
handler.stream.isatty()):
return False
return True | bb74626a55b64b65a532c2fb09313841614cb89b | 45,303 |
def all_services(request):
"""Return all test services."""
return request.param | 165df889ce0e2729aaed431fbdc1020b6d3cf034 | 45,305 |
def _reformat_mspass_error(
mserr, prefix_message, suffix_message="Some requested metrics may not be computed"
):
"""
Helper for below used to reformat a message from ccore functions that
throw a MsPASSError. Needed to produce rational messages from
different error metric calculations.
:param mserr: MsPASSError object caught with a try: except: block
:param prefix_message: string that becomes the first part of the revised
message posted.
:param suffix_message: string that becomes a third line of the revised
message posted. Note this string is always preceded by a newline so do not
put a newline in this arg unless you want a blank line.
:return: expand message string
"""
log_message = "FD_snr_estimator: error in computing an snr metric"
log_message += prefix_message
log_message += mserr.message
log_message += "\n"
log_message += suffix_message
return log_message | 469755c1251e98ab1ab15708956e0d3bba048627 | 45,306 |
def lost_techs():
"""Find research that was canceled due to the building being destroyed."""
query = """
select ois.match_id, ois.player_number as number, destroyed::interval(0) as timestamp, technologies.name as value
from object_instance_states as ois join
(
select max(object_instance_states.id) as id, match_id, instance_id
from object_instance_states join ({sq}) as sq on match_id=sq.id
where class_id=80
group by instance_id, match_id
) as s on ois.id=s.id and ois.match_id=s.match_id
join object_instances as oi on ois.instance_id=oi.instance_id and oi.match_id=ois.match_id
join technologies on ois.researching_technology_id=technologies.id and ois.dataset_id=technologies.dataset_id
where oi.destroyed is not null and ois.researching_technology_id > 0
"""
return query, dict() | 9fbb345205aae382a914437e2f516e422325df4d | 45,307 |
def get_largest_mol(mol_list):
"""
Given a list of rdkit mol objects, returns mol object containing the
largest num of atoms. If multiple containing largest num of atoms,
picks the first one
Args:
mol_list(list): a list of rdkit mol object.
Returns:
the largest mol.
"""
num_atoms_list = [len(m.GetAtoms()) for m in mol_list]
largest_mol_idx = num_atoms_list.index(max(num_atoms_list))
return mol_list[largest_mol_idx] | f8d2692c34c5a49ecdeb6c3f3c5345091651c182 | 45,309 |
def zip_dict(*dicts: dict, default=None) -> dict:
"""
Takes a list of dicts, and creates a union of all dicts with the values from each dict as elements of a tuples
zip_dict({a: 1, b: 2}, {a: 2, c: 3}) == {a: (1, 2), b: {2, None}, c: {None, 3}}
"""
keys_sets = tuple(set(d.keys()) for d in dicts)
keys = set.union(*keys_sets)
return {k: tuple(d.get(k, default) for d in dicts) for k in keys} | 66c66856829ac0cc4f9f256e0778ad8b335d9fd1 | 45,310 |
def patternhost(pattern, user):
"""
Given a 'something-%s-example.org' format, return that with %s replaced
(once) by the username in question.
"""
return pattern % user | 96127b71b701f2e112bced8fd7e299001bdefea7 | 45,311 |
def capitalize(txt: str) -> str:
"""Trim, then turn only the first character into upper case.
This function can be used as a colander preparer.
"""
if txt is None or (
not isinstance(txt, str) and repr(txt) == "<colander.null>"
):
return txt
txt = str(txt).strip()
if txt == "":
return txt
val = txt[0].upper()
if len(txt) > 1:
val += txt[1:]
return val | 30ccfddfa4baa9f91caf7a0ca4b924d65f8401e6 | 45,312 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.