content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import pytz
def localize_datetime(dt, tz_name='UTC'):
"""Provide a timzeone-aware object for a given datetime and timezone name
"""
assert dt.tzinfo is None
utc = pytz.timezone('UTC')
aware = utc.localize(dt)
timezone = pytz.timezone(tz_name)
tz_aware_dt = aware.astimezone(timezone)
return tz_aware_dt
|
dbf08960ac51b7ce58fabf2f303310d27b39f495
| 89,803
|
from typing import Union
import re
def _get_cimrdf_version(cim_ns) -> Union[None, str]:
"""
Parse the cim namespace_name into a version number
:param cim_ns: cim namespace_name
:return: double, version number, or None if no version could be identified
"""
match = re.search(r"(?<=CIM-schema-cim)\d{0,2}?(?=#)", cim_ns)
if match:
return match.group()
else:
return None
|
1660f63c4d9f699e9852d980d8751e710afc6ee5
| 89,806
|
def triarea(x, y, dir=0):
"""RETURNS THE AREA OF A TRIANGLE GIVEN THE COORDINATES OF ITS VERTICES
A = 0.5 * | u X v |
where u & v are vectors pointing from one vertex to the other two
and X is the cross-product
The dir flag lets you retain the sign (can tell if triangle is flipped)"""
ux = x[1] - x[0]
vx = x[2] - x[0]
uy = y[1] - y[0]
vy = y[2] - y[0]
A = 0.5 * (ux * vy - uy * vx)
if not dir:
A = abs(A)
return A
|
b8bdbb93eed60168d5fa103c24d185b4fdaf93ca
| 89,808
|
def filter_data(lines):
"""
Filter out lines that are too short or too long.
"""
return [line for line in lines if len(line) >= 8 and len(line) <= 60]
|
185b70ff0787c8166e25abb0d893875730715a77
| 89,811
|
def is_valid_boolean_response(response):
"""
Returns true if a boolean response is valid.
str -> bool
"""
return response == 'n' or response == 'y'
|
059161b21ed4d1e8f82bb92e47e77ff2767e9893
| 89,816
|
def ask(prompt: str) -> bool:
"""Ask a yes/no question until user gives an understandable response."""
inpt: str
while True:
# Normalize input to uppercase, no whitespace, then get first character
inpt = input(prompt + "? ").upper().strip()[0]
print()
if inpt == "Y":
return True
elif inpt == "N":
return False
print("PLEASE ANSWER 'YES' OR 'NO'.")
return False
|
a479ab5096993144cdb41635fae4b6407e3ca4bd
| 89,819
|
def getEntity(entityLabel: str, ontoInstance):
"""Retrieve an ontology entity based on their label.
Parameters
----------
entityLabel : str
Label for searching.
ontoInstance : type
Owlready2 loaded ontology.
"""
res = ontoInstance.search(label=entityLabel)
if len(res) == 1:
return res[0]
else:
return False
|
16ce67a7554ecd39bfc1c574e5c3256c3344772d
| 89,824
|
def train_model(model, train_x, train_y):
"""
Train the model
"""
model.fit(train_x, train_y)
return model
|
175e7b3e85050361c64d583fe797873df0fcce39
| 89,826
|
def rectangle_crop_img(src, x_start, x_end, y_start, y_end):
"""
Getting the parts needed in an image by clipping an image with a rectangle.
Image ( 0, 0 ) points in the upper left corner of the image,
x_start and x_end determine the height of the image,
y_start and y_end determine the width of the image.
"""
return src[x_start:x_end, y_start:y_end]
|
40eb71921db0f6d7f30221c8f07309a11daa4b2a
| 89,827
|
def _parse_error_as_dict(pe):
"""
Return a dict for a ParseError.
"""
return dict(
token_type=pe.token_type,
token_string=pe.token_string,
position=pe.position,
error_code=pe.error_code,
)
|
3410a9e406d7af2f11afde55b0dda551149f4dc5
| 89,830
|
import math
def max_returns(prices):
"""
Calculate maxiumum possible return
Args:
prices(array): array of prices
Returns:
int: The maximum profit possible
"""
current_min_price, max_profit = math.inf, 0
for price in prices:
if price < current_min_price:
current_min_price = price
if price - current_min_price > max_profit:
max_profit = price - current_min_price
return max_profit
|
8150700a20af4c753bcf52f8a455e9da18f41d49
| 89,831
|
from typing import Dict
def get_joint_acc(results: Dict[str, dict]) -> float:
"""Calculate joint accuracy.
Args:
results: from Trainer.format_results method
Returns:
turn joint accuracy
"""
joint_acc = []
for _, dial in results.items():
for _, turn in dial.items():
if turn["preds"] == turn["labels"]:
joint_acc.append(1)
else:
joint_acc.append(0)
joint_acc = sum(joint_acc) / len(joint_acc)
return joint_acc
|
09f5a0ddafa4ead14cae22c65f06e0da446eec0d
| 89,832
|
def lines_stripped(iterable, chars=None):
"""Return Iterable object containing lines from input iterable with strip(chars) applied"""
return map(lambda s: s.strip(chars), iterable)
|
5fd843304db86aa2d1c66e7052a6258f87a41f46
| 89,833
|
def gen_lsp_name(func_name: str, prefix: str = 'lsp_') -> str:
"""Generates the name of an lsp-method from a function name,
e.g. "lsp_S_cancelRequest" -> "$/cancelRequest" """
assert func_name[:len(prefix)] == prefix
return func_name[len(prefix):].replace('_', '/').replace('S/', '$/')
|
8377b21e3da8363efaf590a5347a16e91a9d9ab8
| 89,836
|
def strip_dep_version(dependency: str) -> str:
"""Strip a possible version suffix, e.g. types-six>=0.1.4 -> types-six."""
dep_version_pos = len(dependency)
for pos, c in enumerate(dependency):
if c in "=<>":
dep_version_pos = pos
break
return dependency[:dep_version_pos]
|
31a3264c24384b69e38c2cbceeb1408e237c8626
| 89,837
|
def pop_target(unpopped_data):
"""
Removed target column from the data frame.
Args:
unpopped_data: Data frame with target column.
Returns:
data: Data frame without target column.
target: Target column.
"""
data = unpopped_data.copy()
target = data.pop('CONTROL_STATUS')
return data, target
|
d2f6dee0480a473c749c53b7759c909e20bbcc29
| 89,840
|
from pathlib import Path
def get_headers(fname: str) -> dict:
"""
Parse headers directly copied from dev tools.
"""
return {t[0].lower(): t[-1].strip()
for t in (s.partition(':')
for s in Path(fname).read_text().split('\n') if s)}
|
78f729b8718deae234a69ce6c248d9f8e68b97d2
| 89,842
|
import re
def verify_ok(code):
"""Verify if the http code is 200 OK"""
return re.sub('[^0-9]', '', code)[2:] < '400'
|
1473160e1c195ec62a9d7088a64849ed0cfa5ac7
| 89,843
|
import re
def get_summary_val(fname, vname):
"""Gets value of a summary variable from file 'fname' and
variable 'vname' in line of text
Returns value and unit"""
with open(fname, encoding='utf-16-le') as file:
p = re.compile(vname + r'[ \w\.\,\n\(\)\/]*: +(\d+([.,]+[\de-]+)?)\,?\s+([\w\/]+)', re.IGNORECASE | re.UNICODE)
m = p.search(file.read())
# Check the desired summary value was found
if m is None:
print('No ' + vname + ' was found. Check the Tristar file and/or regular expression.')
return [0, '']
return [vname, m.group(3), float(m.group(1).replace(',', '.'))]
|
72005cb0609ecf2292a5e2b809bef0ec6febc3a8
| 89,850
|
from pathlib import Path
import shutil
def copy_fixture(src: Path, dest: Path) -> Path:
"""Wrap shutil.copy to backport use with Path objects."""
return shutil.copy(src.absolute(), dest.absolute())
|
059897b88bba6905e239c532f878de72a6854381
| 89,851
|
def serialize_json(msg: dict) -> bytes:
"""
create a compact sorted rendering of a json object since micropython
implementation of ujson.dumps does not support sorted keys
:param msg: the json object (dict) to serialize
:return: the compact sorted rendering
"""
serialized = "{"
for key in sorted(msg):
serialized += "\"{}\":".format(key)
value = msg[key]
value_type = type(value)
if value_type is str:
serialized += "\"{:s}\"".format(value)
elif value_type is int:
serialized += "{:d}".format(value)
elif isinstance(value, float):
serialized += "{:.4f}".format(value) # modified for elevate
elif value_type is dict:
serialized += serialize_json(value).decode()
elif value_type is bool:
if value:
serialized += "true"
else:
serialized += "false"
elif value is None:
serialized += "null"
else:
raise Exception("unsupported data type {} for serialization in json message".format(value_type))
serialized += ","
serialized = serialized.rstrip(",") + "}" # replace last comma with closing braces
return serialized.encode()
|
3a8f198674b3d573abedce44ecc6f63190695aad
| 89,853
|
def Cross3(a, b):
"""Return the cross product of two vectors, a x b."""
(ax, ay, az) = a
(bx, by, bz) = b
return (ay * bz - az * by, az * bx - ax * bz, ax * by - ay * bx)
|
f112dddb0312b5d95f3ded955d8c3e8905d16299
| 89,856
|
def xr_size(
da):
"""
Description:
Returns the size of a xarray.Dataset or xarray.DataArray object in
Gigabytes GB, not Gibibytes (GiB)
Parameters:
da - xarray.Dataset or xarray.DataArray
"""
return str(da.nbytes / (1000**3))+' gigabytes'
|
94c20db8a37ce2ca01b93d1d598ba05f204c1d83
| 89,860
|
def function_f1b(x):
"""Function with one argument, returning one value.
"""
return '{}'.format(x)
|
d2b76e654d89660413830eec7cdbd764942b6ad3
| 89,861
|
from typing import Any
def set_common_options(
spark_source: Any,
url: str = 'localhost:5432',
jdbc_table: str = 'default.default',
user: str = 'root',
password: str = 'root',
driver: str = 'driver',
) -> Any:
"""
Get Spark source from JDBC connection
:param spark_source: Spark source, here is Spark reader or writer
:param url: JDBC resource url
:param jdbc_table: JDBC resource table name
:param user: JDBC resource user name
:param password: JDBC resource password
:param driver: JDBC resource driver
"""
spark_source = (
spark_source.format('jdbc')
.option('url', url)
.option('dbtable', jdbc_table)
.option('user', user)
.option('password', password)
.option('driver', driver)
)
return spark_source
|
499b5a6f24ecde21639e6d9f1e6085066cb931dd
| 89,873
|
def remap(indict, *args, **kwargs):
"""
Re-map keys of indict using information from arguments.
Non-keyword arguments are keys of input dictionary that are passed
unchanged to the output. Keyword arguments must be in the form
new="old"
and act as a translation table for new key names.
"""
outdict = {role: indict[role] for role in args}
outdict.update(
{new: indict[old] for new, old in kwargs.items()}
)
return outdict
|
e44678373451587d74bb38a6a53b36155c42bc30
| 89,875
|
def compute_params(model):
"""Compute the total number of parameters.
Args:
model (nn.Module): PyTorch model.
Returns:
Total number of parameters - both trainable and non-trainable (int).
"""
return sum([p.numel() for p in model.parameters()])
|
d6f3e5fec5d9217c4f8be410403486c0efd58798
| 89,877
|
def is_pred(pred):
""" Checks if tuple represents predicate.
Args:
pred: column and associated value
Returns:
true iff predicate represents condition
"""
return not pred[0].startswith("'")
|
0bf9d3d0af9893fb665fda438ea4aa0c31cfdd76
| 89,878
|
import re
def extract_nos(input_: str) -> float:
"""Extracts number part from a string.
Args:
input_: Takes string as an argument.
Returns:
float:
Float values.
"""
return float('.'.join(re.findall(r"\d+", input_)))
|
a86b59a2a5fb4a4c887baf01baefcb8aac330999
| 89,879
|
import re
def _gridset_matches_platform(gridset, pattern, platform):
"""Returns True if the given gridset contains any files matching the given platform.
gridset: gridset with metadata; the metadata has a list of files (no nested
lists)
pattern: regex matching file names in gridset['metadata']['files'], with a
"platform" capture group
platform: the platform to match
"""
for file_ in gridset['metadata']['files']:
match = re.match(pattern, file_)
if match.group('platform') == platform:
return True
return False
|
b10021afb8b74e1d65c2640318d6eb07c7effbde
| 89,884
|
def get_fclk(parser, blockname, portname):
"""
Find the frequency of a clock port.
:param parser: HWH parser object (from Overlay.parser, or BusParser)
:param blockname: the IP block of interest
:type blockname: string
:param portname: the port we want to trace
:type portname: string
:return: frequency in MHz
:rtype: float
"""
xmlpath = "./MODULES/MODULE[@FULLNAME='/{0}']/PORTS/PORT[@NAME='{1}']".format(
blockname, portname)
port = parser.root.find(xmlpath)
return float(port.get('CLKFREQUENCY'))/1e6
|
33ed80317aca5be16b06061b731a522597851d79
| 89,887
|
def params_count(params):
"""
count the number of the parameters.
"""
def get_from_tuples(some_tuples):
num = 1
for t in some_tuples:
num *= t
return num
nums = 0
for v in params.values():
nums += get_from_tuples(v.shape)
return nums
|
fb1b305b144425092e6725a9b75ac520c4a3ccf9
| 89,888
|
def mk_subdict_extractor(fields):
"""Makes a function that generates field-inclusion defined subdicts from a dict iterator."""
def extract(dict_list):
for dd in dict_list:
yield {k: dd[k] for k in fields}
return extract
|
8e7a184041cbeee330bc1abeeda326badb3e2046
| 89,891
|
import re
def strip_protocol(path: str) -> str:
"""
Returns a URL removing the http/https protocol
Example:
{% strip_protocol 'https://app.posthog.com' %}
=> "app.posthog.com"
"""
return re.sub(r"https?:\/\/", "", path)
|
b7517bf2a43ccf5533af02ba10031d0d4f3d0ed3
| 89,893
|
def get_jobs(links, url):
""" Get all job categories and corresponding links. """
names = []
urls = []
jobs = links.find_all('ul', {'id': 'jjj0'})
links = jobs[0].find_all('a')
for link in links:
names.append(link.text)
urls.append(url + link.get('href'))
return names, urls
|
22b59791f2794b2ddfb197791f3336afabdfaa44
| 89,896
|
def reverse_enum(iterable):
"""Return a reversed iterable with its reversed index."""
return zip(range(len(iterable) - 1, -1, -1), reversed(iterable))
|
ceef0e97cfc166ca9b800e5f1c0e89301a1905f6
| 89,897
|
from typing import Optional
from pathlib import Path
def find_path(path: str) -> Optional[Path]:
"""Checks if the file denoted by the specified `path` exists and returns the Path object
for the file.
If the file under the `path` does not exist and the path denotes an absolute path, tries
to find the file by converting the absolute path to a relative path.
If the file does not exist with either the absolute and the relative path, returns `None`.
"""
if Path(path).exists():
return Path(path)
elif path.startswith('/') and Path('.' + path).exists():
return Path('.' + path)
elif path.startswith('\\') and Path('.' + path).exists():
return Path('.' + path)
else:
return None
|
f4f957273a6f55662fd358189fc72c6ea11b5744
| 89,899
|
def hello_world(name: str):
"""
Greets you by your name!
:param name: Your lovely name
:return: my greetings
"""
s = f'Hello {name}! I hope you are well.'
print(s)
return s
|
3515721d6f23bc8ee3e00c1147dfb1362e69de1b
| 89,905
|
import re
def DateStart(s):
"""
To check the validity of the date pattern in the text file
Parameters:
s (string): The string for which we perform a regex check
Returns:
true, false (boolean) : Returns true f the format of the input string is valid and false otherwise
"""
patterns = ['([0-2][0-9]|(3)[0-1])(\/)(((0)[0-9])|((1)[0-2]))(\/)(\d{2}|\d{4}), ([0-9][0-9]|[0-9]):([0-9][0-9])',
'([0-2][0-9]|(3)[0-1])(\/)(((0)[0-9])|((1)[0-2]))(\/)(\d{2}|\d{4}), ([0-9][0-9]|[0-9]):([0-9][0-9])']
pattern = '^' + '|'.join(patterns)
result = re.match(pattern, s)
if result:
return True
return False
|
2130b7c903c3bfad1cd0e54f22807317c53908e9
| 89,906
|
def get_name_and_entity(url):
"""
Parses reference string to extract object type and
:param url: reference url to be parsed
:return: entity and object name
"""
parsed = url.split('/')
return parsed[1], parsed[2]
|
9fd98ba9af7457f798c0cf4dd252e4b1bfe8035c
| 89,907
|
def _index_spec_params(spec_params):
"""
Makes an index of the spec parameters. It dict-ifies the list of spec params
provided by the SpecManager, and also returns the set of param ids that are
used in groups.
This gets returned as a tuple (indexed params, group param ids)
"""
spec_params_dict = dict()
grouped_parents = dict()
for p in spec_params:
spec_params_dict[p["id"]] = p
# groupify the parameters - identify params that are part of groups, and don't include
# them in the list separately.
children = p.get("parameter_ids")
if children:
for child in children:
grouped_parents[child] = p["id"]
return (spec_params_dict, grouped_parents)
|
19aa93b2d34fb448476a2ebe0a2666494eebb70b
| 89,913
|
def last_revision(list_of_revisions):
"""Get the last item in a list.
"""
try:
return list_of_revisions[-1]
except (IndexError, KeyError):
return None
|
0969b6cfb5c71fa138d9444bcc5569137f96f668
| 89,916
|
import json
def load_dict_json(file_path: str) -> dict:
"""
Loads a JSON file as a dictionary.
# Arguments
file_path (string): Path to the JSON file.
# Returns
Dictionary with the data from the JSON file.
"""
with open(file_path, 'rb') as fp:
return json.load(fp)
|
d692e55dda5818cc9aaede507165f56057c85ba5
| 89,918
|
import re
def validate_text(text: str, pattern: str, max_length: int) -> bool:
"""
Validate text based on regex pattern and maximum length allowed
:param text: Text to validate
:param pattern: Regular expression pattern to validate text
:param max_length: Maximum length allowed
:return: boolean
"""
if len(text) > max_length:
return False
if re.search(pattern, text):
return True
return False
|
cd95a09b39b947e8a50467abd7d8cee39304f42f
| 89,919
|
def bitListToBinString(bitList):
"""Converts a list of 0's and 1's to a string of '0's and '1's"""
return ''.join([('0','1')[b] for b in bitList])
|
30eb7c47374852b580248353145d28fc8dd36ded
| 89,921
|
def jacard_sim(c1, c2):
"""
c1, c2, are two clusters, each is a list of seq ids, return jacard similarity.
"""
return len(set(c1).intersection(c2))*1./len(set(c1).union(c2))
|
68e8b0b6449e63774d232262966e2ace31b2f70f
| 89,927
|
def uniq(a):
"""
Removes duplicates from a list. Elements are ordered by the original
order of their first occurrences.
In terms of Python 2.4 and later, this is equivalent to list(set(a)).
"""
if len(a) == 0:
return []
else:
return [a[0]] + uniq([x for x in a if x != a[0]])
|
33f4a6351e9bf40a548b6d1b83293043a75dc031
| 89,929
|
def extract_params(query):
"""
:param query: (str) eg: q=testerhome&encoding=utf-8
:return:
[
{
"key": "q",
"value": "testerhome"
},
{
"key": "encoding",
"value": "utf-8"
}
]
"""
params = []
if not query:
return params
for i in query.split('&'):
param = i.split('=')
key = param[0]
value = None
if len(param) > 1:
value = param[-1]
params.append({
'key': key,
'value': value
})
return params
|
06379832a125db68c4fdb789d758a847342f5364
| 89,934
|
def pprint_header(formatter, returned_metrics):
"""
Return a pretty printed string with the header of the metric streaming
output, consisting of the name of the metrics including the parameters.
"""
return formatter.format(*([u'TIMESTAMP:'] + [u'{0}:'.format(m.str_ui(True)) for m in returned_metrics]))
|
10acc4b79be3d92e2ac694c6887e48369cb42245
| 89,936
|
import requests
def call_mhs(mhs_command, hl7payload):
"""Call the MHS with the provided details.
:param mhs_command: The command/interaction name to call the MHS with.
:param hl7payload: The HL7 payload to send to the MHS.
:return: The response returned by the MHS.
"""
mhs_url = 'http://localhost/' + mhs_command
response = requests.post(mhs_url, data=hl7payload)
return response.text
|
2593d6a2e0708c940016ff58ef5e1ac6ce57d833
| 89,941
|
def read_file_keys(fname):
"""
Reads the given file's list of file keys and returns them as a set.
"""
with open(fname, 'r') as infile:
fkeys = infile.read().split('\n')
return set(fkeys)
|
690ae31116403eb0a588cb3cec5b58513e4af048
| 89,943
|
def flatten(list_of_lists):
"""Takes a list of lists and returns a list of the elements.
Args:
list_of_lists: List of lists.
Returns:
flat_list: Flattened list.
flat_list_idxs: Flattened list indices.
"""
flat_list = []
flat_list_idxs = []
start_idx = 0
for item in list_of_lists:
if isinstance(item, list):
flat_list += item
l = len(item)
idxs = range(start_idx, start_idx+l)
start_idx = start_idx+l
else: # a value
flat_list.append(item)
idxs = [start_idx]
start_idx += 1
flat_list_idxs.append(idxs)
return flat_list, flat_list_idxs
|
8bb82d2d44a07267ef8015e35ae6ca5cd39f243c
| 89,955
|
def build_request_dict_basic(tag, text):
"""Builds a dictionary matches the json structure of a Dialogflow request."""
request_mapping = {"fulfillmentInfo": {}}
request_mapping["fulfillmentInfo"]["tag"] = tag
request_mapping["text"] = text
return request_mapping
|
1a082541fcd783ffdb24494078225aab99510950
| 89,956
|
def uri_to_uuid(uri_uuid: str) -> str:
"""
Standardize a device UUID (MAC address) from URI format (xx_xx_xx_xx_xx_xx) to conventional
format (XX:XX:XX:XX:XX:XX)
"""
return uri_uuid.upper().replace('_', ':')
|
4efc3791303420187bae4aecaa3852556c3a906e
| 89,964
|
def field_name(component):
"""Return the name for the FormField built for the given component."""
return 'comp-' + component.name
|
84ed4617b3843d80c5cf899eca02cc2692e1413c
| 89,966
|
import pickle
def read_experiment_results(file_name_path):
"""
Read in wrapper_base.Experiment object from .pickle file.
Arguments
---------
file_name_path : string
path of .pickle file for reading wrapper_base.Experiment object
Returns
-------
experiment : wrapper_base.Experiment object
experiment that has been run or has been post-processed
"""
with open(file_name_path, "rb") as file:
experiment = pickle.load(file)
return experiment
|
38cb2c6456ea52589ed480d39018a5d5336b7afd
| 89,967
|
def merge_contexts(ctx, obj):
"""Return the given context with the attributes of another object
merged into it. This is useful to attach a separate application
context to a Click context.
"""
for attr in obj.__dir__():
if not attr.startswith("__"):
ctx.__setattr__(attr, getattr(obj, attr))
return ctx
|
0461d7614c225410df540c4517603065e1cf8ef3
| 89,970
|
def input_ask(prompt, allowed):
"""Ask for input until a valid response is given"""
while True:
val = input(prompt)
if val in allowed:
return val
|
66974d33fa10fb42907060d049ddcb9108e23750
| 89,971
|
def add_decorators(p):
"""Adds ping and normalize decorators to pool."""
@p.ping
def ping(con):
return True
@p.normalize_connection
def normalize(con):
pass
|
ba2b02564ff0b2b07ca301cf89b215b71102090b
| 89,972
|
def classes(field):
"""
Returns CSS classes of a field
"""
return field.widget.attrs.get("class", None)
|
8fd0407d3571338bfa6719b310f78bdbc1349e99
| 89,973
|
import re
def regex_replace(value, regex, replace):
"""Replace every string matching the given regex with the replacement"""
return re.sub(regex, replace, value, flags=re.IGNORECASE)
|
b9b892041fe92399a5ff67c605451b1d406235d9
| 89,975
|
import math
def productivity(affinity_count):
""" Return number representing productivity given the affinity count
e.g. if you have nothing, we still automatically produce, but if there
is someone, it will produce more efficiently.
"""
if affinity_count == 0:
return 0.3
return 0.419874 * math.log(11.2406 * affinity_count)
|
07f5aeda584d328a3d5871879d8c358fee1c0171
| 89,979
|
def mark_as_changed_wrapper(parent_method):
"""Decorator that ensures _mark_as_changed method gets called."""
def wrapper(self, *args, **kwargs):
# Can't use super() in the decorator.
result = parent_method(self, *args, **kwargs)
self._mark_as_changed()
return result
return wrapper
|
8b181532ea98885e7f7283db71a1247d975dd76a
| 89,980
|
import re
def custom_parser(string):
"""Custom parser for separating variable name from units
Name = Non-greedy match until opening parenthesis +
remainder after closing parenthesis
Units = Greedy match inside parentheses
"""
match = re.match(r'(?P<name>.*?)\((?P<units>.*)\)(?P<remainder>.*)', string)
if match:
return (match['name']+match['remainder'], match['units'])
else:
return string, ''
|
e623f8b8836696a56ab75ba8fa15a8bb271ee8d7
| 89,982
|
def _merge_summary(merge_dest, merge_src):
"""Merges to 'summaries' fields in metadata format.
Two sumaries are required to have the exact same metrics, and this method
adds the 'total' and 'covered' field of each metric in the second parameter
to the corresponding field in the first parameter.
Each parameter is expected to be in the following format:
[{'name': 'line', 'total': 10, 'covered': 9},
{'name': 'region', 'total': 10, 'covered': 9},
{'name': 'function', 'total': 10, 'covered': 9}]
"""
def get_metrics(summaries):
return {s['name'] for s in summaries}
assert get_metrics(merge_dest) == get_metrics(merge_src), (
'%s and %s are expected to have the same metrics' %
(merge_dest, merge_src))
merge_src_dict = {i['name']: i for i in merge_src}
for merge_dest_item in merge_dest:
for field in ('total', 'covered'):
merge_dest_item[field] += merge_src_dict[merge_dest_item['name']][field]
|
4c731ee0755ea4851ddd69b60d6f36ac37959ef9
| 89,983
|
import time
def stamp_to_time(time_stamp):
"""将时间戳转化成普通时间的格式
:param time_stamp: 时间戳
:return: 时间戳对应的日期
"""
stamp = time.localtime(time_stamp)
local_time = time.strftime("%Y-%m-%d", stamp)
return local_time
|
f2e92b012dc7206750eed091e191060437e482c9
| 89,987
|
def SeparateFlagArgs(args):
"""Splits a list of args into those for Flags and those for Fire.
If an isolated '--' arg is not present in the arg list, then all of the args
are for Fire. If there is an isolated '--', then the args after the final '--'
are flag args, and the rest of the args are fire args.
Args:
args: The list of arguments received by the Fire command.
Returns:
A tuple with the Fire args (a list), followed by the Flag args (a list).
"""
if '--' in args:
separator_index = len(args) - 1 - args[::-1].index('--') # index of last --
flag_args = args[separator_index + 1:]
args = args[:separator_index]
return args, flag_args
return args, []
|
52e026953ea001be41a0c90d4c6544ef75d53c39
| 89,988
|
def count_bulls_cows(user_string, comp_string):
"""
Function compare two strings and count bulls and cows
:param user_string: string which has the same length as comp_string
:param comp_string: string which has the same length as user_string
:return: tuple with number of cows and bulls
"""
bulls = 0
cows = 0
for index, num in enumerate(user_string):
if comp_string[index] == user_string[index]:
bulls += 1
if num in comp_string and comp_string[index] != user_string[index]:
cows += 1
return bulls, cows
|
916073352c1a5a4b9fe4e2e95e5356f059d29af2
| 89,990
|
def map_fields(raw: str, field_map):
"""Map raw field values to column name and perform type cast on value"""
fields = raw.split(',')
return {key: field_map[key](fields[i]) for i, key in enumerate(field_map.keys())}
|
cbeb5b5abf2e8cf4b2ed8945b5282a69f552ecde
| 89,992
|
def is_after(t1, t2):
"""Takes 2 time objects, returns True if t1 follows t2 chronologically, otherwise False
"""
t1_seconds = t1.second + t1.minute*60 + t1.hour*3600
t2_seconds = t2.second + t2.minute*60 + t2.hour*3600
return t1_seconds > t2_seconds
|
76278acd2443264a8145981351b1c67a2b6b4876
| 90,002
|
def get_segnames(elf):
"""Find the segnames data structructure in an ELF. Return none if it
can't be found."""
return elf.find_section_named(".segment_names")
|
83d55a88eba3f74e0f15f62620f2aef36ef05ca5
| 90,007
|
def get_releases(listing: list, specie: str) -> list:
"""
return the list of gencode releases found on the site.
Highest fist.
Parameters
----------
listing : list
list of gencode FTP paths, some ending with "release_nn" or "release_Mnn"
specie : str
mouse of human. mouse release numbers are prefixed with "M"
Returns
------
list
releases sorted from highest to lowest
"""
# ignore releases without a consistent system.
releases = [int(ftpdir[-2:]) for ftpdir in listing if ftpdir[-2:].isdigit()]
releases = sorted([str(n) for n in releases if n > 21], reverse=True)
if specie == "mouse":
releases = ["M" + r for r in releases] # dear gencode: why?
return releases
|
bd0844cce447cfdbbbe95dd68b750764b804325b
| 90,009
|
from typing import List
def yaml_str_list(o) -> List[str]:
"""
Convert and flatten input to a list of str.
the input may be None, str, a list of str, or a list of them recursively.
"""
if o is None:
return []
elif isinstance(o, str):
return [o]
else:
r = []
for s in o:
r.extend(yaml_str_list(s))
return r
|
c12ebea2b8971cbc6d4e2918fc661c5e1b085669
| 90,017
|
import copy
def rollelem(a, index, to_index=0):
"""Roll the specified axis backwards, until it lies in a given position.
Parameters
----------
a : list
Input list.
index : int
The index of the item to roll backwards. The positions of the items
do not change relative to one another.
to_index : int, optional
The item is rolled until it lies before this position. The default,
0, results in a "complete" roll.
Returns
-------
res : list
Output list.
"""
res = copy.copy(a)
res.insert(to_index, res.pop(index))
return res
|
315f879ef3ea2f57acb65597f4c6446692934032
| 90,018
|
def copy_dict(value, impl=dict):
"""
Perform a deep copy of a dict using the specified impl for each new dict constructed.
Preserves the order of items as read from the source dict.
:param value: the dict value to copy
:param impl: the function to call to create new dicts
:return: a deep copy of value, using impl for each dict constructed along the way
"""
if isinstance(value, tuple):
return (copy_dict(e) for e in value)
if isinstance(value, list):
return [copy_dict(e) for e in value]
if isinstance(value, dict):
new_value = impl()
for k, v in value.items():
new_value[k] = copy_dict(v, impl=impl)
return new_value
return value
|
5493567ee46c39ab60912388a3f40b74017f266a
| 90,020
|
from typing import Dict
from typing import Counter
def get_split_stats(stats_all: tuple) -> Dict[str, Counter]:
"""
Given a tuple of ml4c3.datasets.StatsWrapper objects, iterate through them
and return a dictionary of these objects, keyed by splits, and only non-empty splits
"""
split_stats = {}
split_strings = ["train", "valid", "test"]
for split_string, stats in zip(split_strings, stats_all):
if len(stats.stats) > 0:
split_stats[split_string] = stats.stats
return split_stats
|
8c2718c762bc6062d7d55190a164f339b7a93b76
| 90,021
|
from typing import Optional
from typing import Tuple
def wrap_slice(
start: Optional[int],
stop: Optional[int],
step: Optional[int],
length: Optional[int],
) -> Tuple[int, int, int]:
"""Wraps slice indices into a window.
Arguments:
start (int):
:attr:`slice.start` index, or ``None``.
stop (int):
:attr:`slice.stop` index, or ``None``.
step (int):
:attr:`slice.step` value, or ``None``.
length (int):
Exclusive end of the virtual range to wrap, or ``None``.
Returns:
tuple of int: Wrapped slice parameters.
Examples:
>>> wrap_slice(3, 5, 1, 7)
(3, 5, 1)
>>> wrap_slice(-3, 5, 1, 7)
(4, 5, 1)
>>> wrap_slice(3, -5, 1, 7)
(3, 2, 1)
>>> wrap_slice(-3, -5, 1, 7)
(4, 2, 1)
>>> wrap_slice(None, 5, 1, 7)
(0, 5, 1)
>>> wrap_slice(3, None, 1, 7)
(3, 7, 1)
>>> wrap_slice(3, 5, None, 7)
(3, 5, 1)
>>> wrap_slice(3, 5, 1, None)
(0, 0, 1)
"""
if step is None:
step = 1
if length:
if start is None:
start = 0
elif not 0 <= start < length:
start %= length
if stop is None:
stop = length
elif not 0 <= stop < length:
stop %= length
else:
start = 0
stop = 0
return start, stop, step
|
78242a65dde43208e36b9aaa38a7912310747a3f
| 90,023
|
import torch
def rms(samples):
"""Root Mean Square (RMS)."""
return torch.sqrt((samples**2).mean())
|
989d5faae35b09f1860ab7e327dbe1a7f24b765d
| 90,025
|
def noop(*args, **kwargs):
"""
No-op for dev/null-ing handlers which
haven't been specified by the user.
"""
return None
|
746c0c8b81a48f35fea1aa4cc11aeee08a461427
| 90,028
|
import re
def stripmatch(page):
"""
Get the number of pages visited and match for the current search
:page: html page, BeautifulSoup object
:return: tuple
"""
try:
text = page.find(id="searchCountPages").text.strip()
except AttributeError:
repage = match = None
else:
numlist = [num for num in re.findall(r'-?\d+\.?\d*', text)]
repage = int(numlist[0])
if len(numlist) == 2:
match = int(numlist[1])
else:
match = int(''.join(numlist[1:]))
return repage, match
|
6af348929e046864d9643a2cb492f84e890519b8
| 90,035
|
from pathlib import Path
def existing_git_repo(path: Path) -> bool:
"""Check if a git repository exists
:param Path path: Repo path
:return: True, if .git directory exists inside path
"""
return path.is_dir() and Path(path / '.git').is_dir()
|
60d00eab72c23fbe7fdd4a7ad0868e081c712247
| 90,036
|
def determine_sentiment(delta):
"""Returns 1 for positive sentiment, 0 otherwise"""
if delta > 0:
return 1
else:
return 0
|
e9a8790a6566d21c6d01f564b377bd4cbec26865
| 90,045
|
def match_nested_lists(l1, l2):
""" Match nested lists term for term
:param l1: first list
:param l2: second list
:return: True or False
This differs from "match_lists_as_sets" in the sense that order is important. The
lists in question can only contain other lists or objects for which == is a valid
comparison.
"""
if not isinstance(l1, list): return False
if not isinstance(l2, list): return False
if len(l1) != len(l2): return False
for i in range(len(l1)):
if isinstance(l1[i], list) and isinstance(l2[i], list):
if not match_nested_lists(l1[i], l2[i]): return False
elif not isinstance(l1[i], list) and not isinstance(l2[i], list):
if l1[i] != l2[i]: return False
else: return False
return True
|
344d40f31fb7882f1e8291e2bb080cb9b0b69df5
| 90,046
|
def convert_parentheses(text: str):
"""Replaces -LRB- and -RRB- tokens present in SST with ( and )"""
return text.replace("-LRB-", "(").replace("-RRB-", ")")
|
a882f0298498b158fa276eac4307f4e46ecdb078
| 90,049
|
import re
def parse_config_str(config_str: str):
"""
Args:
config_str (str): [description]
### Examples:
>>> input_1: 'rand-re0.25'
>>> output_1: {'rand': True, 're': 0.25}
>>> input_2: 'baseline'
>>> output_2: {'baseline': True}
"""
configs = dict()
for kv_pair in config_str.split('-'):
result = re.split(r'(\d.*)', kv_pair)
if len(result) == 1:
k = result[0]
configs[k] = True
else:
assert len(result) == 3 and result[2] == ''
k, v, _ = re.split(r'(\d.*)', kv_pair)
configs[k] = float(v)
return configs
|
bcbf3d0a1f4cb3123f8ec9b08a11a1383650627a
| 90,061
|
def intersect_line_ray(lineSeg, raySeg):
""" Constructs a line from the start and end points of a given line
segment, and finds the intersection between that line and a ray
constructed from the start and end points of a given ray segment.
If there is no intersection (i.e. the ray goes in the opposite direction
or the ray is parallel to the line), returns None.
"""
lineStart, lineEnd = lineSeg
rayStart, rayEnd = raySeg
lineVector = (lineEnd[0] - lineStart[0], lineEnd[1] - lineStart[1])
rayVector = (rayEnd[0] - rayStart[0], rayEnd[1] - rayStart[1])
p1x, p1y = lineStart
p2x, p2y = rayStart
d1x, d1y = lineVector
d2x, d2y = rayVector
# Check if the ray is parallel to the line.
parallel = (
(d1x == 0 and d2x == 0)
or ((d1x != 0 and d2x != 0) and
(float(d1y) / d1x == float(d2y) / d2x))
)
intersection = None
# Only non-parallel lines can ever intersect.
if not parallel:
# Parametrize the line and ray to find the intersection.
parameter = (
float(p2y * d1x - p1y * d1x - p2x * d1y + p1x * d1y)
/ (d2x * d1y - d1x * d2y)
)
# Only consider intersections that occur in front of the ray.
if parameter >= 0:
intersection = (
p2x + parameter * d2x,
p2y + parameter * d2y,
)
return intersection
|
365836039161666eb30d0051916dceb7260f3c19
| 90,062
|
def format_template_names(name_list):
"""Create a comma-separated list of template names.
:param name_list: Input list of names
:return: Comma-separated string
"""
name_txt = ''
for i_name, name_i in enumerate(name_list):
if (i_name == 0):
name_txt += name_i
else:
name_txt += ',' + name_i
return name_txt
|
97e611fed2df7982243b38d4cff2e4786da3729a
| 90,067
|
def node_coords(G, n):
"""
Return the x, y, z coordinates of a node.
This is a helper function. Simplifies the code.
"""
x = G.nodes[n]["x_coord"]
y = G.nodes[n]["y_coord"]
z = G.nodes[n]["z_coord"]
return x, y, z
|
2fbab855bf387c7105beafcb5fcfc8f30b0a9b01
| 90,071
|
def modulo_indexed_register(modulo, value):
""" generate a predicate such a register <index>
must match <index> % modulo == value """
return lambda index: (index % modulo) == value
|
6ce215109f795da8f20f12738f9795ec33c0e230
| 90,072
|
def nint(x):
"""Round a value to an integer.
:param float x: original value
:return: rounded integer
:rtype: int
"""
return int(x + 0.5)
|
81bc84f89431e24c2b17bbdbbea977539267ad1b
| 90,074
|
import random
def generate_number_to_guess(lower_number, higher_number):
"""Generates a random number between the given lowest and highest number
Parameters
----------
lower_number : int
Lowest number for the generator
higher_number : int
highest number for the generator
Returns
-------
int
returns the generated random number that is in the given range.
"""
return random.randint(lower_number, higher_number)
|
71c07a3648dd6b8b6f2fb3dab53cd4982a82ac90
| 90,075
|
def split_config_args(args):
"""Split -[-]name=value configuration command-line arguments."""
res = []
for arg in args:
if arg.startswith('-'):
for part in arg.split('=', 1):
res.append(part)
else:
res.append(arg)
return res
|
b08e16ac24edd694b793d1b2ea9c6db88b939083
| 90,078
|
import re
def make_single_line(text):
""" Replaces newlines, spaces and tabs with a single space """
return re.sub(r'[\n\s\t]+', ' ', text).strip()
|
89e8a8cab402a95056fe31879b207c4a462829cd
| 90,085
|
def _m(*names: str) -> str:
"""Get module names"""
return '.'.join(s for s in names if s)
|
228363e5103914cd5770e5d4d5638c0cbe54ff89
| 90,091
|
def get_n50(sequence_lengths):
"""
Get n50 of sequence lengths
"""
sequence_lengths = sorted(sequence_lengths, reverse=True)
total_bases = sum(sequence_lengths)
target_bases = total_bases * 0.5
bases_so_far = 0
for sequence_length in sequence_lengths:
bases_so_far += sequence_length
if bases_so_far >= target_bases:
return sequence_length
return 0
|
da8f90650f142f31788a61156400fbd3c5977e43
| 90,092
|
import socket
def get_available_port(default_port):
"""Find an available port to bind to."""
port = default_port
with socket.socket() as s:
while True:
try:
s.bind(("127.0.0.1", port))
except OSError:
port += 1
else:
break
return port
|
1813b75270653524e64c8a6331dfa85f05d1f71c
| 90,093
|
from typing import Iterable
from pathlib import Path
def check_file_list_exists(files: Iterable[str]) -> bool:
"""引数のSQLファイルのリストが存在するか確認する。
SQLファイルが全て存在する場合は何もしない。
存在しない場合は例外IOErrorを送出する。
Raises:
IOError: ファイルが存在しない
Returns:
True: 全てのファイルは存在する
"""
for fname in files:
path = Path(fname)
if not path.exists():
raise IOError(f'SQLファイル[{fname}]が存在しません。')
return True
|
206a50a3347e675dfc6cb88c83baed9539289803
| 90,095
|
def create_request_events_bp(app):
"""Create request events blueprint."""
ext = app.extensions["invenio-requests"]
return ext.request_events_resource.as_blueprint()
|
dc41fb932cb04a8e0b89dd2d884661cf66373d05
| 90,102
|
def actual360(dt1, dt2):
"""Returns the fraction of a year between `dt1` and `dt2` on an actual / 360 day count basis."""
days = (dt2 - dt1).days
return days / 360
|
2771e073c19731ad99fd51ad0ba140cc118b9ad7
| 90,123
|
def getEpisodeId(path, conn):
"""
Return the show id given its name. (Database query)
:param path:
:param conn:
:return: id_episode (integer)
"""
cur = conn.cursor()
cur.execute("SELECT id_episode FROM episode WHERE path=?", (path,))
id_episode = cur.fetchone()[0]
return id_episode
|
01a1a70d99d0348858babb1bace1e27a8604bbc6
| 90,124
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.