content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def __utf8_bisearch(ucs, table):
""" auxiliary function for binary search in interval table. """
min = 0
max = len(table) - 1
if ucs < table[min][0] or ucs > table[max][1]:
return False
while max >= min:
mid = (min + max) / 2
if ucs > table[mid][1]:
min = mid + 1
elif ucs < table[mid][0]:
max = mid - 1
else:
return True
return False | 0fadd9b66054a81bf30ed2fb6d52ef991b01041c | 30,823 |
def get_cells(worksheet, get_range: str):
"""
Get cells from sheet
params
------
workbook: openpyxl.WorkSheet
loaded worksheet.
get_range: str
Get cells range.
Ex: "A1:B3"
return
------
cells: Tuple[Cell]
Got cells tuple
"""
cells = worksheet[get_range]
return cells | 179c20419975daac5913b149efb60b4cc22537d9 | 30,825 |
import sys
def read_input():
"""Write input from stdin into buffer until we
encounter an empty line which signals that postfix
finished delivering the message
"""
buff = []
while True:
line = sys.stdin.readline().rstrip('\n')
if line == '':
break
buff.append(line)
return buff | 8981f06c38fd3581183faa3e6384454bf1361a5b | 30,826 |
import torch
def ent_loss(probs):
"""Entropy loss"""
ent = -probs * torch.log(probs + 1e-8)
return ent.mean() | 4ccd777d3b434b3d1c79f36c735cf6252d749587 | 30,827 |
def IPRange(first, last):
"""
Generate a list of IP addresses
Args:
first: the first IP in the range
last: the last IP in the range
Returns:
A list of IPs from first to last, inclusive (list of str)
"""
all_ips = []
ip = first
while ip <= last:
all_ips.append(str(ip))
ip += 1
return all_ips | 16bd7302b02e0b15b85edb8a60bfc7749744b3fe | 30,828 |
import os
import glob
def list_all_files(path):
"""Clean list of all files in sub folders."""
pwd = os.getcwd()
os.chdir(path)
liste = [
file
for file in glob.glob(os.path.join("**"), recursive=True)
if os.path.isfile(file)
]
os.chdir(pwd)
return liste | b5d34f9efec7b1f4432e250932f49547ccacb2d5 | 30,829 |
import codecs
def load_translation_dict(dict_path):
""" Load Translation Dictionary (txt or tsv file).
Args:
dict_path: Path to Translation Dictionary.
Returns:
list: List of source word as strings.
list: List of target word as strings.
"""
translation_source = []
translation_target = []
file_type = dict_path.split(".")[-1]
if file_type == "tsv":
for line in list(codecs.open(dict_path, "r", encoding='utf8', errors='replace').readlines()):
line = line.strip().split("\t")
translation_source.append(line[0].lower())
translation_target.append(line[1].lower())
elif file_type == "txt":
with open(dict_path) as file_in:
for line in file_in:
line = line.rstrip("\n")
line = ' '.join(line.split())
[src, trg] = line.split(" ")
translation_source.append(src.lower())
translation_target.append(trg.lower())
else:
print("No supported dictionary file type")
return translation_source, translation_target | 5d0f4a75004da1274d8cb0fcc7d8061ea36ade0f | 30,831 |
from datetime import datetime
def python_type_to_sql_type(_python_type):
"""
Convert a python data type to ab SQL type.
:param _python_type: A Python internal type
"""
if _python_type == str:
return 'string'
elif _python_type == bytes:
return "blob"
elif _python_type == float:
return "float"
elif _python_type == int:
return "integer"
elif _python_type == datetime:
return "datetime"
elif _python_type == bool:
return "boolean"
else:
raise Exception("python_type_to_sql_type: _type_code \"" + str(_python_type) + "\"not supported") | d74c0a8e8b1ef2340e1fc1decddcd60aba718570 | 30,832 |
def mod1(num1, num2):
"""判断奇偶数:num1是外接传入的模板变量"""
return num1%num2 | 2ca6fd59156816138e95400b30a57e67ad63d761 | 30,833 |
import re
def string_to_list(s):
"""Return a list of strings from s where items are separated by any of , ; |"""
try:
return [text for text in re.split(r'\s*[,;\|]\s*', s) if text]
except TypeError:
if type(s) == list:
return s
raise | 4e679bfaf0d51120a2194a4db173d34a9eaf47d0 | 30,834 |
def standardise_name(name):
"""
Standardise field names: Survey (Title) -> survery_title
"""
result = name.lower().replace(" ", "_").replace("(", "").replace(")", "")
# remove any starting and ending "_" that have been inserted
start_loc = 1 if result[0] == "_" else 0
loc = result.rfind("_")
end_loc = loc if loc == (len(result) - 1) else len(result)
return result[start_loc:end_loc] | af9c5b52c1c7fc86ea758cb29dabb2f6405bb16e | 30,836 |
from typing import Iterable
def check_all_dicts(iterable_dict: Iterable[dict]):
"""Check if Iterable contains all dictionaries
Args:
iterable_dict (Iterable[dict]): Iterable of dictionaries
"""
# Check if dict
def check_dict(d):
return isinstance(d, dict)
# Check if all instances are type dict, return True or False
all_dict = all(map(check_dict, iterable_dict))
# print(all_dict)
if not all_dict:
raise BaseException("Iterable has mixed types, expected Iterable[dictionaries]")
return True | 0e87989d600d303e9bdadf04725c398841bcd214 | 30,839 |
def calculate_days_needed(bar_count, freq):
""" Returns number trading days needed.
Overshoots so that we more than enough to sample from the current
frequency slot plus previous ones.
"""
if freq.unit_str == 'd':
return bar_count * freq.num | 1ad6db9cdf745befaa1cfb1e00e33abdec5066fe | 30,840 |
def _get_timezone_name(timezone):
"""
Return the offset for fixed offset timezones, or the name of timezone if
not set.
"""
return timezone.tzname(None) or str(timezone) | 4cb02cdf53269b328c727eaa11c3d16acd99e3bb | 30,841 |
import itertools
def _gen_mols(atm_numbs, mols):
"""Generates the nested molecules list"""
if mols is None:
return [i for i, _ in enumerate(atm_numbs)]
else:
ret_val = []
# Get the molecules list.
curr_atm = 0
for i in mols:
if isinstance(i, int):
ret_val.append(
list(range(curr_atm, curr_atm + i))
)
curr_atm += i
else:
ret_val.append(
list(i)
)
curr_atm = max(i)
continue
# Check the correctness.
for i, j in itertools.zip_longest(
range(0, len(atm_numbs)),
sorted(itertools.chain.from_iterable(ret_val))
):
if i != j:
raise ValueError(
'Incorrect molecule specification, atom {} not correctly '
'given!'.format(i)
)
continue
return ret_val | 4da434b4741f48ffd0ed35e847e1715bde317a37 | 30,843 |
def get_pre_stage_net():
""" Get pre stage network dict """
network_dict = {'block_pre_stage': [{'conv4_3_CPM': [512, 256, 3, 1, 1]},
{'conv4_4_CPM': [256, 128, 3, 1, 1]}]}
return network_dict | 4437bd9902151ff3a53a36abf1a9cc81c698ddf0 | 30,845 |
import subprocess
import shlex
import sys
def exec(cmd):
"""
Executes the given command and returns its result as a string
"""
try:
out = subprocess.check_output(shlex.split(cmd))
except subprocess.CalledProcessError as e:
print(e)
sys.exit(1)
return out.decode("utf-8") | 0ffb620e9daa6232b602fc2758d1e72247dd6950 | 30,846 |
def trailing_silence_mask(f0):
"""
>>> f0 = torch.tensor([1.0, 2.0, 0.0, 1.0, 0.0, 0.0, 0.0])
>>> trailing_silence_mask(f0)
tensor([False, False, False, False, True, True, True])
"""
assert f0.ndim == 1
mask = ((f0.flip(0) != 0.0).cumsum(0) == 0).flip(0)
return mask | 03c76e96a94d9c80ca9ab38e5ce735bc161d1929 | 30,847 |
def df_if_two_one(value):
""" Final Data Cleaning Function
- This is run against station, latitude, longitude, and elevation for indidividual records
- Many of these records have usable data, so don't want to just throw them out.
- Example issues:
- Instead of a station of '000248532' a value may contain '000248532 000248532'
- Both are equal - function returns the first one
- Instead of a latitude of '29.583' a value may contain '29.583 29.58333333'
- This is from raw csv data files where they changed the number of decimal points userd
part of the way through a year.
- Function converts both to integers, which rounds up to the nearest whole number. If both
whole numbers are equal, then the function returns the first value from the original pair.
- exception handler:
- ValueError -> str of '': Looks like some empty strings for latitude and/or longitude slipped
through data cleaning. This handles those.
Args:
value (str): value to check and clean if needed
Returns: str
"""
try:
split = value.split(' ')
if len(split) > 1:
if '.' in split[0]:
if int(float(split[0])) == int(float(split[1])):
return split[0]
elif split[0] == split[1]:
return split[0]
return value
except ValueError as e:
if "could not convert string to float: ''" not in str(e):
raise ValueError(e)
return value | c417c683e2cedb37b2c557a78e358112f060edfe | 30,848 |
import hashlib
def hex_hash(path):
"""
Return the first 2 hex digits of the md5 of the given path.
Suitable for creating sub dirs to break up a large directory
"""
return hashlib.md5(path).hexdigest()[:2] | b3629cd8034e1944cdb3998592d1caca96deacb9 | 30,851 |
def is_signed_out_of_range(num: int, size: int) -> bool:
"""
Check if the signed number `num` is out of range for signed numbers of `size` byte length.
:param num:
:param size:
:return:
"""
if size == 1:
return -128 <= num <= 127
elif size == 2:
return -32_768 <= num <= 32_767
elif size == 4:
return -2_147_483_648 <= num <= 2_147_483_647
raise ValueError(f'Invalid number size: {size} not in (1, 2, 4)') | c4711a9c90d960db9c15adc55710cb92526bf736 | 30,852 |
def get_manhattan_distance(node):
"""Function to calculate the manhattan distance for a
particular configuration
Parameters
----------
node : [list]
[list to check for the heuristics]
Return
------
[int]
[returns the heuristic distance for a particular node]
"""
h_score = 0
node = list(node)
for i in range(9):
h_score += abs( node[i]/3 - (i%3) ) + abs( node[i] % 3 - (i/3) )
return h_score | 99d2b8828babf09509984289bf460914aa0eac69 | 30,854 |
import os
def insert_suffix(path, suffix):
"""
Inserts the provided suffix into the given path, before any file extensions.
Returns:
str: The path, with suffix inserted, or None if no path was provided.
"""
if path is None:
return None
path, ext = os.path.splitext(path)
return "".join([path, suffix, ext]) | 3e8f2e71a76a40f01f7985bd7d42ed5dcd7c6859 | 30,855 |
import argparse
def create_arg_parser():
""" Create an argument parser
Returns
-------
argparse.ArgumentParser
"""
description = "Split quadrilateral elements that have higher skewness " \
"value than the given skewness into triangular elements."
parser = argparse.ArgumentParser(description=description)
parser.add_argument('meshinput', type=str,
help="Mesh input file to process in gr3 or SMS 2dm format.")
parser.add_argument('meshoutput', type=str,
help="Output mesh file name in gr3 format.")
parser.add_argument('--skewness', dest='skewness', type=float, default=None,
help="Maximum skewness (not normalized.) to split. "
"If the skewness of an element is bigger than this, "
"the element will be split.")
parser.add_argument('--minangle', dest='minangle', type=float, default=None,
help="Minimum angle (degrees) to keep in quads. If any of "
"internal angles of a quad is smaller than mingangle, "
"the quad will be split.")
parser.add_argument('--maxangle', dest='maxangle', type=float, default=None,
help="Minimum angle (degrees) to keep in quads. If any of "
"internal angles of a quad is larger than maxgangle, "
"the quad will be split.")
parser.add_argument('--propfile', dest='propfile', type=str,default=None,
help="Write a prop file that shows elements that are split")
return parser | 2ab58345e054eb0357b251ff9820eb29a206fa3b | 30,856 |
def within_date(date_min, date_max, current_date):
"""
Test if a provided date is greater than or equal to a min date or less than max date
"""
if date_min <= current_date < date_max:
return True
else:
return False | 44d96e463b97fa9ca82e34b0c2bed3694959b525 | 30,857 |
def fontforge_skip_checks():
""" return a bitmask of the checks to skip
E.g. to skip:
0x2: Contours are closed?
0x40: Glyph names referred to from glyphs present in the font
0x200: Font doesn't have invalid glyph names
do:
return 0x2 + 0x40 + 0x200
override with @condition(force=True) to customize this
"""
return None | af8b7dab3f9691e5c6be402c4a52db365bb8cd44 | 30,859 |
def build_cold_start_test_splits(samples, test, proportion=0.1):
"""
:param samples: dict of u_id: ints
:param test: list of [(uid, rel, iid)] test set
:return:
"""
# builds "ranking" of more and less active users
user_ints = [(uid, len(ints)) for uid, ints in samples.items()]
user_ints = sorted(user_ints, key=lambda t: t[1])
n_to_keep = round(len(user_ints) * proportion)
top_users = set([uid for uid, _ in user_ints[-n_to_keep:]])
low_users = set([uid for uid, _ in user_ints[:n_to_keep]])
top_test = [triplet for triplet in test if triplet[0] in top_users]
low_test = [triplet for triplet in test if triplet[0] in low_users]
return low_test, top_test | b6a7b0ace78a366a20db5403647ed98f1b443aac | 30,861 |
def find_episode(episode_id, seasons):
"""
Return metadata for a specific episode from within a nested
metadata dict.
Returns an empty dict if the episode could not be found.
"""
for season in seasons:
for episode in season['episodes']:
if str(episode['id']) == episode_id:
return episode
return {} | 64255ca8e330c3b45768704644ac8bfddbfc1416 | 30,862 |
def child_or_children(value):
""" Return num followed by 'child' or 'children' as appropriate """
try:
value = int(value)
except ValueError:
return ''
if value == 1:
return '1 child'
return '%d children' | a22be46a3fd1086dac116c187189204b5ea1a6db | 30,864 |
import ast
import copy
import os
def transform_json_metric_event(metrics):
"""
Convert streaming metrics event format similar to output AWS TA metric event format
(The conversion helps the AWS app to interpret metrics and help populating existing dashboards)
"""
events = []
for metric_string in metrics:
metric = ast.literal_eval(metric_string)
metric_event = copy.deepcopy(metric)
# reformat metric_dimensions
extracted_dims = {
"dims": ",".join(["{}=[{}]".format(key, value) for key, value in metric_event["dimensions"].items()])
}
metric_event["metric_dimensions"] = extracted_dims["dims"]
metric_event.pop("dimensions")
# reformat metric values
for k, v in metric_event["value"].items():
if k == "count":
metric_event["SampleCount"] = v
if k == "sum":
metric_event["Sum"] = v
if k == "max":
metric_event["Maximum"] = v
if k == "min":
metric_event["Minimum"] = v
metric_event["Average"] = metric_event["Sum"] / metric_event["SampleCount"]
metric_event.pop("value")
sourcetype = os.environ.get("SOURCETYPE", "aws:cloudwatch")
index = os.environ.get("INDEX")
event = {
"event": metric_event,
"source": "{}:{}".format(metric_event["region"], metric_event["namespace"]),
"sourcetype": sourcetype,
"time": metric_event["timestamp"],
}
if index:
event["index"] = index
events.append(event)
return events | 72d195767827e98870e031ccafe835e04db3ca4f | 30,865 |
def filter_keyphrases_brat(raw_ann):
"""Receive raw content in brat format and return keyphrases"""
filter_keyphrases = map(lambda t: t.split("\t"),
filter(lambda t: t[:1] == "T",
raw_ann.split("\n")))
keyphrases = {}
for keyphrase in filter_keyphrases:
keyphrase_key = keyphrase[0]
# Merge annotations with ";"
if ";" in keyphrase[1]:
label_span = keyphrase[1].replace(';', ' ').split()
span_str = [min(label_span[1:]), max(label_span[1:])]
else:
label_span = keyphrase[1].split()
span_str = label_span[1:]
label = label_span[0]
span = (int(span_str[0]), int(span_str[1]))
text = keyphrase[2]
keyphrases[keyphrase_key] = {"keyphrase-label": label,
"keyphrase-span": span,
"keyphrase-text": text,
"tokens-indices": []}
return keyphrases | 863fa1fbad5e9b4478758ad2006c4fe54099a02f | 30,866 |
def _decoding_base_info(encoded_info):
"""
Decode base info
Args:
encoded_info(list or dict): encoded base info
"""
if isinstance(encoded_info, dict):
return encoded_info
base_info = dict()
for item in encoded_info:
base_info[item['symbol']] = item['base']
return base_info | d47e7940af8dc1f42168d5630d95345a6111c865 | 30,868 |
def compare_dicts(i):
"""
Input: {
dict1 - dictionary 1
dict2 - dictionary 2
(ignore_case) - ignore case of letters
Note that if dict1 and dict2 has lists, the results will be as follows:
* dict1={"key":['a','b','c']}
dict2={"key":['a','b']}
EQUAL
* dict1={"key":['a','b']}
dict2={"key":['a','b','c']}
NOT EQUAL
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
equal - if 'yes' dictionaries are equal
}
"""
d1=i.get('dict1',{})
d2=i.get('dict2',{})
equal='yes'
bic=False
ic=i.get('ignore_case','')
if ic=='yes': bic=True
for q2 in d2:
v2=d2[q2]
if type(v2)==dict:
if q2 not in d1:
equal='no'
break
v1=d1[q2]
rx=compare_dicts({'dict1':v1,'dict2':v2, 'ignore_case':ic})
if rx['return']>0: return rx
equal=rx['equal']
if equal=='no':
break
elif type(v2)==list:
# For now can check only values in list
if q2 not in d1:
equal='no'
break
v1=d1[q2]
if type(v1)!=list:
equal='no'
break
for m in v2:
if m not in v1:
equal='no'
break
if equal=='no':
break
else:
if q2 not in d1:
equal='no'
break
if equal=='no':
break
v1=d1[q2]
if bic and type(v1)!=int and type(v1)!=float and type(v1)!=bool:
v1=v1.lower()
v2=v2.lower()
if v2!=v1:
equal='no'
break
return {'return':0, 'equal':equal} | 466150bb1e6012653c1e16f679d14aaab6fe7b6e | 30,869 |
def liste_nom_prediction(liste_nom,Y):
"""
liste_nom -> La liste des noms des series
Y -> La liste des Y en fonction de leur clusterings
(Attention liste_nom et Y doivent avoir le meme ordre)
"""
res = []
for i in range(len(liste_nom)) :
res.append((liste_nom[i],Y[i]))
return res | 3a8ceca8c05cdf50eba601a1ee7a5df31b25992b | 30,870 |
def is_table_taxa_alike(feature_table1, feature_table2):
"""This method checks if `feature_table2` instance contains same taxonomy
as `feature_table1`
Parameters
----------
feature_table1
First FeatureTable
feature_table2
Second FeatureTable
Returns
-------
bool
True if taxonomies are same. False otherwise
"""
feature_table1_lineage_sorted = (
feature_table1.taxonomy.loc[:, "lineage"]
.sort_values(axis=0)
.reset_index(drop=True)
)
feature_table2_lineage_sorted = (
feature_table2.taxonomy.loc[:, "lineage"]
.sort_values(axis=0)
.reset_index(drop=True)
)
return feature_table1_lineage_sorted.equals(feature_table2_lineage_sorted) | e4fef557c168c885917d8183f3d0f0ab3969abb6 | 30,873 |
def _gen_eval_kwargs(name):
"""
Find the keyword arguments to pass to numexpr for the given operation.
Parameters
----------
name : str
Returns
-------
eval_kwargs : dict
Examples
--------
>>> _gen_eval_kwargs("__add__")
{}
>>> _gen_eval_kwargs("rtruediv")
{"reversed": True, "truediv": True}
"""
kwargs = {}
# Series and Panel appear to only pass __add__, __radd__, ...
# but DataFrame gets both these dunder names _and_ non-dunder names
# add, radd, ...
name = name.replace('__', '')
if name.startswith('r'):
if name not in ['radd', 'rand', 'ror', 'rxor']:
# Exclude commutative operations
kwargs['reversed'] = True
if name in ['truediv', 'rtruediv']:
kwargs['truediv'] = True
if name in ['ne']:
kwargs['masker'] = True
return kwargs | 17fc51c954ada4170a6fcfa68dda4018faa71cac | 30,876 |
def GetNiceArgs(level: int):
"""Returns the command/arguments to set the `nice` level of a new process.
Args:
level: The nice level to set (-20 <= `level` <= 19).
"""
if level < -20 or level > 19:
raise ValueError(
f"The level must be >= -20 and <= 19. The level specified is {level}.")
return ["nice", "-n", str(level)] | 6805178232e96caea19035b4286d7d9dddff8a88 | 30,877 |
from datetime import datetime
import pytz
def utc_to_unix(t):
""" UTC Y-M-D -> UTC unix time (ignore float second point)
t = "2000-01-01T00:00:00.111" """
t = t.split('.')[0]
dt = datetime.strptime(t, '%Y-%m-%dT%H:%M:%S')
tz = pytz.timezone('UTC')
dt = tz.localize(dt)
unix_time = int(dt.timestamp())
return unix_time | 7f870d05bb3382923a2f9485194c3435673e4b77 | 30,878 |
import subprocess
def __get_first_interface_linux():
"""
Return the first wireless interface in the system.
:pre: Only invoke in Linux system. Requires iw and awk commands.
:return: the first interface, otherwise "wlan0"
:rtype: str
"""
output = subprocess.check_output("iw dev | awk '$1==\"Interface\"{print $2}'", shell=True)
for row in output.decode('utf-8').split('\n'):
return row
return 'wlan0' | 60ac6aead5a1f393a0bcadab364049727a6745b5 | 30,879 |
def get_involved_objects(config):
"""Given a pytest config, get the list of objects specified via the
--involving flag"""
return config.getoption("--involving") or [] | 8da5599eb30bcd1a4960eefa8ed235b989badff2 | 30,880 |
def tmpdirec(tmp_path_factory):
"""Pytest fixture instantiating a new session-scope "data" folder.
Parameters
----------
tmpdir_factory :
Pytest fixture for creating temporary directories.
"""
return tmp_path_factory.mktemp("data") | 870e81aa93a95e9ce28be1c4a902f213ca13c626 | 30,884 |
def FirstPromotion(order): # 第一个具体策略
"""为积分为1000或以上的顾客提供5%折扣"""
return order.total() * 0.1 if order.customer.integration >= 1000 else 0 | 05f97a418c009f6cd1b120c8134e1e642d0f13aa | 30,885 |
import os
def __get_src_relative_path(path):
"""Return a path relative to ./src.
The src directory is important because of its relationship to BUILD_DIR,
established in the SConstruct file. For variant directories to work properly
in SCons, paths relative to the src or BUILD_DIR must often be generated.
"""
src_dir = os.path.abspath('src')
path = os.path.abspath(os.path.normpath(path))
if not path.startswith(src_dir):
raise ValueError('Path "%s" is not relative to the src directory "%s"' % (path, src_dir))
result = path[len(src_dir) + 1:]
return result | 6983f209fef058cd1c96b1120027f7c2c8ae929f | 30,886 |
def pluralize(noun):
"""Pluralize `noun`: extremely domain-specific.
"""
if noun.endswith('pus'):
return u'%sora' % noun[:-2]
else:
return u'%ss' % noun | d22dbf95903e3576325dfeda1489107830492a53 | 30,888 |
def safe_subpath(path, altitudes, h):
"""
Computes the maximum subpath of path along which the safety constraint is
not violated
Parameters
----------
path: np.array
Contains the nodes that are visited along the path
altitudes: np.array
1-d vector with altitudes for each node
h: float
Safety threshold
Returns
-------
subpath: np.array
Maximum subpath of path that fulfills the safety constraint
"""
# Initialize subpath
subpath = [path[0]]
# Loop through path
for j in range(len(path) - 1):
prev = path[j]
succ = path[j + 1]
# Check safety constraint
if altitudes[prev] - altitudes[succ] >= h:
subpath = subpath + [succ]
else:
break
return subpath | 179fc42254a76ef4247140d7292d547c6b2681b6 | 30,889 |
def get_center_location(N, M):
"""
Function: get_center_location\n
Parameters: N -> Number of rows in the Grid, M -> Number of columns in the Grid\n
Returns: a tuple of center location of the disaster area\n
"""
x = N // 2
y = M // 2
return (x, y) | 073c2e3d04562822d4252546c4683aa6220cb95d | 30,891 |
def generation_finder(data, gen_type):
"""Finds all generation matching requested type in a list.
Sums together and returns a float.
"""
find_generation = [i + 2 for i, x in enumerate(data) if x == gen_type]
generation_total = sum([data[i] for i in find_generation])
return float(generation_total) | 0f063361df9dd7fcbf478fbf360839990c2a1e44 | 30,892 |
def toggle_active_links(pathname):
"""Toggles active menu links based on url pathname
Args:
pathname (str): Url pathname
Returns:
bool: Active state for each page
"""
if pathname in ["/datyy/", "/datyy/summary"]:
return True, False, False, False, False
if pathname == "/datyy/orders":
return False, True, False, False, False
if pathname == "/datyy/products":
return False, False, True, False, False
if pathname == "/datyy/projects":
return False, False, False, True, False
if pathname == "/datyy/support":
return False, False, False, False, True
return False, True, False, False, False | 9d362d2a3d57d16c9163a4b09cabdd730f6ebb5a | 30,894 |
import argparse
def parse_args():
"""
Parse arguments
Args:
None
Returns:
Parsed arguments to Dict
Raises:
None
"""
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--world", type=str, default="world", help="Say hello to")
return parser.parse_args() | bd68c27315dc2c2e82f30c4fa60e20abb281fc6b | 30,895 |
import os
def annexe_to_bytes():
""" Convert the annexe folder to a zip, load the bytes thereof into
memory, and then delete the zip. """
if len(os.listdir("annexe/")) == 0:
return None
os.system("zip -r annexe.zip annexe/")
with open("annexe.zip", "rb") as annexe_zip:
result = annexe_zip.read()
os.system("rm annexe.zip")
return result | f35b5c1683caac99b052cf886f865fc6d6e56ab2 | 30,898 |
import os
def save_ipf(df, output_path):
"""
Write a socet ipf file from an ipf-defined pandas dataframe
Parameters
----------
df : pd.DataFrame
Pandas DataFrame
output_file : str
path to the output data file
Returns
-------
int : success value
0 = success, 1 = errors
"""
for ipf_file, ipf_df in df.groupby('ipf_file'):
output_file = os.path.join(output_path, ipf_file + '.ipf')
# Check that file can be opened
try:
outIPF = open(output_file, 'w', newline='\r\n')
except:
print('Unable to open output ipf file: {0}'.format(output_file))
return 1
#grab number of rows in pandas dataframe ipf group
numpts = len(ipf_df)
#Output ipf header
outIPF.write('IMAGE POINT FILE\n')
outIPF.write('{0}\n'.format(numpts))
outIPF.write('pt_id,val,fid_val,no_obs,l.,s.,sig_l,sig_s,res_l,res_s,fid_x,fid_y\n')
for index, row in ipf_df.iterrows():
#Output coordinates to ipf file
outIPF.write('{0} {1} {2} {3}\n'.format(row['pt_id'], row['val'], row['fid_val'], row['no_obs']))
outIPF.write('{:0.6f} {:0.6f}\n'.format(row['l.'], row['s.']))
outIPF.write('{:0.6f} {:0.6f}\n'.format(row['sig_l'], row['sig_s']))
outIPF.write('{:0.6f} {:0.6f}\n'.format(row['res_l'], row['res_s']))
outIPF.write('{:0.6f} {:0.6f}\n\n'.format(row['fid_x'], row['fid_y']))
outIPF.close()
return | 152e6081bffc2412bc2a4a6c3f445b9f045424a6 | 30,899 |
import re
def extractQuotes(a_string):
"""Assumes a_string is a string
returns a new string, any values between quotation marks of a_string"""
pattern = """'(.*)'"""
result = re.search(pattern, a_string)
if result:
result = result.group(1)
return result | 342e93f02383170dc5135d007e51698233fa527a | 30,900 |
def path_to_moves(path):
""" translate best path into motion sequence.
:param path: list of tuples with [(load id, location), ... ]
"""
moves = []
s1 = path[0]
for s2 in path[1:]:
for p1, p2 in zip(s1, s2):
if p1 != p2:
moves.append({p1[0]: (p1[1], p2[1])}) # {load id: (location1, location2)}
s1 = s2
return moves | ead93cffa6f6b5cc5838e2165c15d03ba5b01618 | 30,901 |
def default_wandb_args():
"""This allows us to parameterize the wandb_init_run fixture
The most general arg is "env", you can call:
@pytest.mark.wandb_args(env={"WANDB_API_KEY": "XXX"})
To set env vars and have them unset when the test completes.
"""
return {
"error": None,
"k8s": None,
"sagemaker": False,
"tensorboard": False,
"resume": False,
"env": {},
"wandb_init": {},
} | 6586218633a1e829d4007bb2695a8de6fa3f442c | 30,902 |
def run_one_day(fish: list[int], start_time: int = 6, new_time: int = 8):
"""Runs one day of lanternfish reproducing."""
fishes = fish.copy()
for i, f in enumerate(fish):
if f == 0:
fishes[i] = start_time
fishes.append(new_time)
else:
fishes[i] = f - 1
return fishes | ca4b1e533ba1604eaa688f4d7996ceafad3a7ed4 | 30,903 |
def round_custom(num, threshold=0.0001):
"""
Rounds the number in the argument if its absolute value is less than threshold value.
"""
if abs(num) < threshold:
num = round(num)
if num == -0:
return 0
return num | 6a09b8730678d273d6bdf76e52a04a1eef0546f3 | 30,904 |
def get_patch(input_array, i, j, filter_width, filter_height, stride):
"""
从输入数组中获取本次卷积的区域,
自动适配输入为2D和3D的情况
"""
start_i = i * stride
start_j = j * stride
if input_array.ndim == 2:
return input_array[
start_i: start_i + filter_height,
start_j: start_j + filter_width]
elif input_array.ndim == 3:
return input_array[:,
start_i: start_i + filter_height,
start_j: start_j + filter_width] | afbcdd2ac19894321eedf8ddd433a578472de13a | 30,905 |
def _get_vintools_version(setup_path="/home/mvinyard/software/vintools/setup.py"):
"""reports version without circular import of package"""
with open(setup_path, "r") as setup_py:
for n, line in enumerate(setup_py):
l = line.rstrip()
if l.startswith(" version"):
version = line.split('"')[-2]
return version | 46e451af288d8322a39bf036b2cea52ac87031ad | 30,906 |
def find_largest_digit_helper(n, big):
"""
:param n: the number to be compared
:param big: the index to record the current largest digit in the number
:return: the largest digit in the number
"""
if n > 0:
if n < 10:
if n > big:
return n
else:
return big
else:
last_digit = n % 10
new_num = n // 10
if last_digit != 0:
if last_digit > big:
big = last_digit
return find_largest_digit_helper(new_num, big)
else:
return find_largest_digit_helper(new_num, big)
else:
n = n * (-1)
if n < 10:
if n > big:
return n
else:
return big
else:
last_digit = n % 10
new_num = n // 10
if last_digit != 0:
if last_digit > big:
big = last_digit
return find_largest_digit_helper(new_num, big)
else:
return find_largest_digit_helper(new_num, big) | 47f8a366c01e16e3e227d744c63a1ea9c3ecc5de | 30,908 |
def _is_asn(asnstr):
""" check if asnstr is a valid AS number"""
if asnstr.upper().startswith('AS'):
return True
return False | fb4db154e8e28ea18f364ef6d552a309891ca8ef | 30,910 |
def marker_ui_label(m) -> str:
"""
Capture for the labels used in the marker UI. See also marker_ui_labels setting if you want
to override it.
"""
return str(m) | 0264bdda02f22fd44a81cc0a18081a7d27bcf513 | 30,911 |
def sex2dec(rain,decin):
"""
Converts sexagesimal coordinates to decimal. HMS and DMS separated by colons (:)
Parameters
----------
rain : str
input Right Ascension as a sexagesimal string -- e.g., '03:45:6789'
decin : str
input Declination as a sexagesimal string -- e.g., '-12:34:5678998765'
Returns
-------
list
[12.345678, -10.987654]
"""
if ':' in rain: ra=[float(val)*360./24 for val in rain.split(':')]
else: ra=[float(val)*360./24 for val in rain.split(' ')]
raout=ra[0]+ra[1]/60.+ra[2]/3600.
if ':' in decin: dec=[float(val) for val in decin.split(':')]
else: dec=[float(val) for val in decin.split(' ')]
if dec[0]<0: decout=dec[0]-dec[1]/60.-dec[2]/3600.
else: decout=dec[0]+dec[1]/60.+dec[2]/3600.
return [raout,decout] | 82a4fa431e483f59ed0fef0acd403714d18806e0 | 30,912 |
def ttfAutohintDict( parameterValue ):
"""Returns a dict for a TTFAutohint parameter value."""
ttfAutohintDict = {}
for ttfAutohintOption in parameterValue.split("--"):
if "=" in ttfAutohintOption:
[key, value] = ttfAutohintOption.split("=")
value = value.strip()
else:
key = ttfAutohintOption
value = None
if key:
ttfAutohintDict[key.strip(" -")] = value
return ttfAutohintDict | aa9839f64c9eefb1404238c8689e4826d8e525fd | 30,913 |
def _fmt_date(date_as_bytes):
"""Format mail header Date for humans."""
date_as_string = date_as_bytes.decode()
_month = {
'Jan': 1,
'Feb': 2,
'Mar': 3,
'Apr': 4,
'May': 5,
'Jun': 6,
'Jul': 7,
'Aug': 8,
'Sep': 9,
'Oct': 10,
'Nov': 11,
'Dec': 12,
}
date_list = date_as_string.split(',')
# week = date_list[0].replace(' ', '')
date_list = date_list[1].split(' ')
date_list = list(filter(lambda x: x != '', date_list))
day = date_list[0]
month = _month[date_list[1]]
year = date_list[2]
times = date_list[3]
time_zone = date_list[4]
return '{}-{}-{} {} {}'.format(year, month, day, times, time_zone) | e1e273eb22d60ca945ce9b065f6c4b8cf62cf82e | 30,914 |
from typing import Any
def parse_error(err: Any, raw: bool = True) -> dict:
"""
Parse single error object (such as pydantic-based or fastapi-based) to dict
:param err: Error object
:param raw: Whether this is a raw error or wrapped pydantic error
:return: dict with name of the field (or "__all__") and actual message
"""
message = err.msg or ""
if not raw:
if len(err.loc) == 2:
if str(err.loc[0]) == "body":
name = err.loc[1]
else:
name = err.loc[0]
elif len(err.loc) == 1:
if str(err.loc[0]) == "body":
name = "__all__"
else:
name = str(err.loc[0])
else:
name = "__all__"
else:
if len(err.loc) == 2:
name = str(err.loc[0])
message = f"{str(err.loc[1]).lower()}: {message}"
elif len(err.loc) == 1:
name = str(err.loc[0])
else:
name = "__all__"
return {"name": name, "message": message.capitalize()} | 73bb041e3d6e2259cf390d42485a9e9b7e77abba | 30,917 |
from datetime import datetime
import pytz
def from_timestamp(timestamp):
"""
Transform a UNIX UTC timestamp to a Python datetime object.
"""
if timestamp is None:
return None
return datetime.fromtimestamp(timestamp, tz=pytz.UTC) | 85bf1f0c5d4fb8395e86acce7a322885ec565e16 | 30,920 |
def rollout(env, maxsteps=100):
""" Random policy for rollouts """
G = 0
for i in range(maxsteps):
action = env.action_space.sample()
_, reward, terminal, _ = env.step(action)
G += reward
if terminal:
return G
return G | cef1043e82e048999f89e1ca6ed6011a62b83aaa | 30,921 |
import re
def properly_cut_text(
text: str, start_idx: int, end_idx: int, nbr_before: int = 30, nbr_after: int = 30
) -> str:
"""Properly cut a text around some interval."""
str_length = len(text)
start_idx = max(0, start_idx - nbr_before)
end_idx = end_idx + nbr_after
# Change the end depending on the value
match = re.search(r"\.[^\d]|\?|\!", text[end_idx:], flags=re.IGNORECASE)
if match:
end_idx = match.end() + end_idx
else:
end_idx = str_length
# Change the beginning depending on the value
match = re.search(r"(\.|\?|\!)(?!.*\1)", text[: start_idx - 1], flags=re.IGNORECASE)
if match:
start_idx = match.end() + 1
else:
start_idx = 0
return text[start_idx:end_idx].strip() | d9feda062252a1c22b1ae5b4ec1f2d48606696c5 | 30,923 |
def get_primary_transcript(database):
"""
Get the ID to identify the primary transcript in the
GTF file with the miRNA and precursor coordinates
to be able to parse BAM files with genomic
coordinates.
"""
if database.find("miRBase") > -1:
return "miRNA_primary_transcript"
else:
raise ValueError("Only miRBase is supported for this action.") | 717916d9da156a4d530413093478bb80e24eb24b | 30,924 |
from typing import Tuple
from typing import List
def read_fastq(filename: str) -> Tuple[List[str], List[str]]:
"""
Reads sequences and qualities from a .fastq file
filename: relative or absolute path of the .fa file to be read from
Returns:
List of sequence reads
List of qualities corresponding to each sequence read
"""
reads = []
qualities = []
with open(filename, "r") as f:
while True:
f.readline()
read = f.readline().rstrip()
f.readline()
seq_qualities = f.readline().rstrip()
if len(read) == 0:
break
reads.append(read)
qualities.append(seq_qualities)
return reads, qualities | cd4ffc29b2cd7b76b256c82e7ed438939e5c6ec4 | 30,925 |
def enable_runtime_call_stats() -> dict:
"""Enable run time call stats collection.
**Experimental**
"""
return {"method": "Profiler.enableRuntimeCallStats", "params": {}} | e9af8c51a8ab8e2c10f0023bebecd8703ce09b08 | 30,926 |
def default_globcfg():
"""Create a global configuration with parameters by default."""
return {
"base_address": 0,
"data_width": 32,
"address_width": 16,
"register_reset": "sync_pos",
"address_increment": "none",
"address_alignment": "data_width",
"force_name_case": "none",
} | c9961df8b37b3ed6d652d1772190c294b5bcaa39 | 30,927 |
from pathlib import Path
import re
def get_variable_from_py_file(file_path: Path, var_name: str):
"""Read variable value from a .py file."""
file_content = None
with open(file_path, encoding="utf-8") as f:
file_content = f.read()
variable_values_set = set()
start_pos = 0
while True:
file_content = file_content[start_pos:]
match = re.search(
rf'{var_name} *[:=] *["\'](.+)["\']',
file_content,
)
if match is None:
break
variable_values_set.add(match.group(1))
start_pos = match.end()
return variable_values_set | e8a890476e911834fed53850ddde361b5d92d775 | 30,928 |
def create_bed_info_gp(gp):
"""Creates the block_starts, block_sizes and exon_frames fields from a GenePredTranscript object"""
block_starts = ','.join(map(str, gp.block_starts))
block_sizes = ','.join(map(str, gp.block_sizes))
exon_frames = ','.join(map(str, gp.exon_frames))
return block_starts, block_sizes, exon_frames | 260ecdef20f4ec25e873b978e644e5d90755774e | 30,929 |
def char_ngrams(n, word, **kwargs):
"""This function extracts character ngrams for the given word
Args:
n (int): Max size of n-gram to extract
word (str): The word to be extract n-grams from
Returns:
list: A list of character n-grams for the given word
"""
del kwargs
char_grams = []
for i in range(len(word)):
# if char ngram of length n doesn't exist, if no ngrams have been extracted for the token,
# add token to the list and return. No need to compute for other windows.
# Ex: token is "you", n=4, return ["you"], token is "doing", n=4 return ["doin","oing"]
if len(word[i : i + n]) < n:
if not char_grams:
char_grams.append((word[i : i + n]))
return char_grams
char_grams.append((word[i : i + n]))
return char_grams | 27d46d014198e7290d98bfc8e31aa24f74454b48 | 30,930 |
import re
def idFromLink(link):
"""
Extract the job ID from the monster url
:link: Monster job url
:return: job ID, str
"""
if ".aspx" in link:
jobID = link[-14:-5]
elif "/monster/" in link:
jobID = re.findall(r'monster/.+?\?', link)[0][8:-1]
else:
jobID = link[link.rfind('/')+1:]
return jobID | b80756c0e17168e02c878fc0ca98a005e903636e | 30,933 |
import copy
def redact_loc(image_meta, copy_dict=True):
"""
Create a shallow copy of image meta with 'location' removed
for security (as it can contain credentials).
"""
if copy_dict:
new_image_meta = copy.copy(image_meta)
else:
new_image_meta = image_meta
new_image_meta.pop('location', None)
new_image_meta.pop('location_data', None)
return new_image_meta | f34e0577510c6cc05b1e36e02a48d9be2722c777 | 30,934 |
from typing import OrderedDict
def sort_request(request):
"""
Sort a JSON-RPC request dict.
This has no effect other than making the request nicer to read.
>>> json.dumps(sort_request(
... {'id': 2, 'params': [2, 3], 'method': 'add', 'jsonrpc': '2.0'}))
'{"jsonrpc": "2.0", "method": "add", "params": [2, 3], "id": 2}'
Args:
request: JSON-RPC request in dict format.
"""
sort_order = ["jsonrpc", "method", "params", "id", "session", "verbose"]
return OrderedDict(sorted(request.items(), key=lambda k: sort_order.index(k[0]))) | 0602f0e65845d942f39db0cd1dac18923e00d0b4 | 30,935 |
def create_data_descriptor(collection_id: str, var_id: str, spatial_extent: dict, temporal_extent: list) -> dict:
""" """
# Create WEkEO 'data descriptor'
data_descriptor = {
"datasetId": collection_id,
"boundingBoxValues": [
{
"name": "bbox",
"bbox": spatial_extent
}
],
"dateRangeSelectValues": [
{
"name": "position",
"start": temporal_extent[0],
"end": temporal_extent[1]
}
],
"stringChoiceValues": [
{
"name": "processingLevel",
"value": "LEVEL2"
},
{
"name": "productType",
"value": var_id
}
]
}
return data_descriptor | 97aecc1c5a20e01c314e33393aeaf52b44ec9b2e | 30,936 |
def get_mf6_mshape(disfile):
"""Return the shape of the MODFLOW 6 model.
Parameters
----------
disfile : str
path to a MODFLOW 6 discretization file
Returns
-------
mshape : tuple
tuple with the shape of the MODFLOW 6 model.
"""
with open(disfile, "r") as f:
lines = f.readlines()
d = {}
for line in lines:
# Skip over blank and commented lines
ll = line.strip().split()
if len(ll) < 2:
continue
if line.strip()[0] in ["#", "!"]:
continue
for key in ["NODES", "NCPL", "NLAY", "NROW", "NCOL"]:
if ll[0].upper() in key:
d[key] = int(ll[1])
if "NODES" in d:
mshape = (d["NODES"],)
elif "NCPL" in d:
mshape = (d["NLAY"], d["NCPL"])
elif "NLAY" in d:
mshape = (d["NLAY"], d["NROW"], d["NCOL"])
else:
print(d)
raise Exception("Could not determine model shape")
return mshape | 32f25a2a8a49737296bf3f5c4d6c8bc2768e935a | 30,938 |
def maketestfile(makepyfile):
"""Fixture for making python test files with single function and docstring."""
def make(*args, **kwargs):
func_name = kwargs.pop('func_name', 'test_foo')
return makepyfile(*args, func_name=func_name, **kwargs)
return make | 8f63a7782325189498129883955d34a4127bf0d9 | 30,940 |
def _iou_(test_poly, truth_poly):
"""Intersection over union"""
intersection_result = test_poly.intersection(truth_poly.geometry)
intersection_area = intersection_result.area
union_area = test_poly.union(truth_poly.geometry).area
return (intersection_area / union_area) | 1eb5f803f2d192b92503533d87f6a6b3c10a7271 | 30,941 |
def inplace(method_name):
"""
Returns a type instance method that will call the given method
name, used for inplace operators such as __iadd__ and __imul__.
"""
def method(self, *args):
getattr(self, method_name)(*args)
return self
return method | 47c006c3784eb4c8f165e39cdd2f46b16ffb84e3 | 30,942 |
def para_size_greater_than_n(para_list, n = 1):
"""
Returns paragraphs whose length are greater than n
:param para_list: a list of paragraphs
:param n: paragraphs having length >n are selected
:return: list of paragraphs having length >n
"""
if n > 0:
return [para for para in para_list if len(para)>n] | fb8b2a43f43b70821ae1a9be21fad39440ce75dd | 30,943 |
def sr( redchan, nirchan ):
"""
Simple Vegetation ratio
sr( redchan, nirchan )
"""
redchan = 1.0*redchan
nirchan = 1.0*nirchan
result =(nirchan/redchan)
return result | f9c3028392a46262181b46dd272085d10dce30eb | 30,945 |
import pathlib
def tmp_path(tmp_path):
"""Always present a pathlib's Path.
This is to avoid pytest using pythonlib2 in Python 3.5, which leads
to several slight differences in the tests.
This "middle layer fixture" has the same name of the pytest's fixture,
so when we drop Py 3.5 support we will be able to just remove this,
and all the tests automatically will use the standard one (no further
changes needed).
"""
return pathlib.Path(str(tmp_path)) | 77d0dc21b52b3737ea20df11201214df7d40cc5f | 30,946 |
def transfer(item, from_collection, to_collection, n=1):
"""Move an item from one dictionary of objects to another.
Returns True if the item was successfully transferred, False if it's not in
from_collection.
For example, to have the player pick up a pointy stick::
if transfer('pointy stick', tile.contents, player.inventory):
print("You take the stick.")
else:
print("There's no stick here.")
"""
if from_collection.get(item, 0) < n:
return False
from_collection[item] -= n
if item in to_collection:
to_collection[item] += n
else:
to_collection[item] = n
return True | fafa36344f443ef8e7e5741b5664e626b346512d | 30,947 |
def to_ascii(input_str):
"""Convert the bytes string to a ASCII string
Usefull to remove accent (diacritics)"""
if input_str is None:
return input_str
if isinstance(input_str, str):
return input_str
try:
return str(input_str, 'utf-8')
except UnicodeDecodeError:
return input_str.decode('utf-8', errors='ignore') | 34182342b436cf94c63e1ea309fed7e45139be12 | 30,949 |
import re
def inIpRange(host, spec):
"""Return True if host is in the ip range specification, False otherwise."""
# extract host octet values
match = re.search(r'^(\d+)\.(\d+)\.(\d+)\.(\d+)$', host)
if not match:
raise RuntimeError("Format error for host ip: %s" % host)
hostOcts = list(map(int, match.groups()))
# loop over spec octets and check that host's corresponding octet is included
specOcts = spec.split('.')
if len(specOcts) != 4:
raise RuntimeError("Must have 4 octets in spec: %s" % spec)
for i in range(0, 4):
# match all octets
if specOcts[i] == '*':
continue
# match exact octet value
if re.search(r'^\d+$', specOcts[i]):
if hostOcts[i] == int(specOcts[i]):
continue
else:
return False
# match range
match = re.search(r'^(\d+)-(\d+)$', specOcts[i])
if match:
startOct, endOct = list(map(int, match.groups()))
if startOct >= endOct:
raise RuntimeError(
"Start octet cannot be >= end octet: %s" % specOcts[i])
if hostOcts[i] < startOct or hostOcts[i] > endOct:
return False
else:
continue
# raise
raise RuntimeError("Unknown format for ip range spec: %s" % spec)
return True | 459092f5d51f96facc195bb4a1cc6db7aba01ecb | 30,950 |
import asyncio
async def async_function(wait: bool) -> int:
"""Blah.
Args:
wait: Blah
"""
if wait:
await asyncio.sleep(1)
return 5 | 7bf4b83d2d789bd276a489cbc724f8cd9dde5ffd | 30,952 |
import subprocess
import logging
def rotate(record, newpassword):
""" Grab any required fields from the record """
i = subprocess.call(["net", "user", record.login, newpassword], shell=True)
if i == 0:
logging.info('Password changed successfully')
record.password = newpassword
return True
logging.error('Password change failed')
return True | d640fcc519720264a210c3dac64aa48285c551b2 | 30,953 |
def ad(df, high, low, close, volume, ad):
"""
The Accumulation/Distribution Line is similar to the On Balance Volume (OBV),
which sums the volume times +1/-1 based on whether the close is higher than
the previous close. The Accumulation/Distribution indicator, however
multiplies the volume by the close location value (CLV). The CLV is based on
the movement of the issue within a single bar and can be +1, -1 or zero.
Parameters:
df (pd.DataFrame): DataFrame which contain the asset information.
high (string): the column name for the period highest price of the asset.
low (string): the column name for the period lowest price of the asset.
close (string): the column name for the closing price of the asset.
volume (string): the column name for the volume of the asset.
ad (string): the column name for the ad values.
Returns:
df (pd.DataFrame): Dataframe with ad of the asset calculated.
"""
money_flow_multiplier = (
(df[close] - df[low]) - (df[high] - df[close])
) / (df[high] - df[low])
df[ad + "_money_flow_volume"] = money_flow_multiplier * df[volume]
prev_ad = df.loc[0, ad + "_money_flow_volume"]
df.loc[0, ad] = prev_ad
ads = [0.0]
for row in df.loc[1:, [ad + "_money_flow_volume"]].itertuples(index=False):
ads.append(prev_ad + row[0])
prev_ad = ads[-1]
df = df.fillna(0)
df[ad] += ads
df.drop([ad + "_money_flow_volume"], axis=1, inplace=True)
return df | 4d7480cf7c78ee874404efadee7ee742e6655ac8 | 30,954 |
def g_time(parameters):
"""returns a time from a g-code"""
# default value
time = 0.0
# parse text
params = parameters.split(' ')
for param in params:
coordinate = param[0]
value = float(param[1:])
if coordinate == 's':
time = time + value
elif coordinate == 'p':
time = time + value * 1.0E-3
return(time) | de3852cbc04b1bdf3de2c21e89efe1aea5a370e4 | 30,956 |
import string
import re
def _remove_punctuation_chars_from_text(text: str, exclude_end_of_line_chars: bool = True) -> str:
"""
exclude_end_of_line_chars such as ?!#\.
"""
chars_to_remove = string.punctuation
if exclude_end_of_line_chars:
chars_to_remove = re.sub(r'[?!#\.]', '', chars_to_remove)
return text.translate(str.maketrans('', '', chars_to_remove)) | 849acc5a10f4f1e5efc2c75488d6a32b549364d2 | 30,957 |
def demo_policy(request, bigip):
"""create a `demo_policy` in `Common` partition."""
mgmt_root = bigip.bigip
name = "demo_policy"
partition = "Common"
rules = [
dict(
name='demo_rule',
ordinal=0,
actions=[],
conditions=[])
]
def teardown_policy():
if mgmt_root.tm.ltm.policys.policy.exists(
name=name, partition=partition):
pol = mgmt_root.tm.ltm.policys.policy.load(
name=name, partition=partition)
pol.delete()
pc = mgmt_root.tm.ltm.policys
# setting legacy to True inorder for the test
# to work for BIGIP versions 11.5, 11.6, 12.1 and 13
policy = pc.policy.create(name=name, partition=partition,
strategy="first-match",
rules=rules, legacy=True)
request.addfinalizer(teardown_policy)
return policy | 2a01f8bdd053e4eda12c3dbf5314b31480c4ce17 | 30,958 |
def lockerNumDigits (lockerNum, digits=3):
"""Adds leading zeroes to ensure len (lockerNum) == digits.
:param str lockerNum:
The locker number to ensure has sufficient digits
:param int digits:
The assigned number of digits for locker numbers to include
leading zeroes
:except ValueError:
lockerNum must be able to be represented as an int. Returns None
otherwise.
:return:
The locker number with sufficient digits unless len (lockerNum)
> digits, then returns None
:rtype: str or None
"""
try:
int(lockerNum)
except ValueError:
return None
if 1 <= len(lockerNum) <= digits:
lockerNum = "0" * (digits - len(lockerNum)) + lockerNum
return lockerNum | ee9c44051566f738dff2ccc19e6dd1fb1d1c9ac4 | 30,959 |
import re
def parse_issue_tracking(subject):
"""parse and build issue tracker url from issue number"""
has_tracker_ref = re.compile(r"\[#(\w+)\]")
if ref := has_tracker_ref.findall(subject):
issue_id = ref[0]
url = f"https://www.pivotaltracker.com/story/show/{issue_id}"
subject = has_tracker_ref.sub("", subject)
else:
url = None
return url, subject | e746c612ddf1d3f45b1ec3f664515d2180df783c | 30,961 |
def has_decreased(scores, in_last):
"""Return True iff the score in the last `in_last` descended."""
if in_last >= len(scores):
return True
last = scores[-(in_last + 1)]
for score in scores[-in_last:]:
if score < last:
return True
last = score
return False | 472f01b5573ecf965ab5b7c239904ac113cc0b67 | 30,962 |
def pred_lang(text, model):
""" Predict most likely language used"""
return model.predict(text)[0][0].replace('__label__', '') | 86bdd28459df82f8e84684ffce3c5166f947cd6a | 30,964 |
def orderlex(l1, l2, n1, n2):
""" lexicographic order on infinite words, returns :
0 if the n1-th shift of l1^infty is smaller than the n2-th shift of l2^infty, 1 if it is larger, and 2 if the two words coincide
(this can be determined by looking only length(l1)+length(l2) letters since u^infty = v^infty iff uv = vu).
"""
i = 0
while( (l1[(i+n1)%len(l1)] == l2[(i+n2)%len(l2)]) & (i <= (len(l1)+len(l2))) ):
i = i+1
if l1[(i+n1)%len(l1)] < l2[(i+n2)%len(l2)]:
return 0
else:
if l1[(i+n1)%len(l1)] > l2[(i+n2)%len(l2)]:
return 1
else:
return 2 | 06871fc13d8af63b24ce494c1e017595a6853403 | 30,965 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.