content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def _check_type(par,par_type):
"""function for checking if python parameter 'par' and DBASIC type 'par_type' are compatible"""
if par_type=='String':
if type(par)==str:
return True
else:
raise TypeError("Parameter of DBASIC type 'String' must be of python type 'str'")
elif par_type=='Uint64':
if type(par)==int:
if par>=0:
return True
else:
raise TypeError("Parameter of DBASIC type 'Uint64' can not be negative")
else:
raise TypeError("Parameter of DBASIC type 'Uint64' must be of python type 'int'")
else:
raise TypeError("Parameter DBASIC type should be 'Uint64' or 'String'") | 789b6d3a14f435ea0f6f5805f444356b8afd7d2f | 35,388 |
def add_to_bank_balance(balance):
"""
Determine if the player wants to increase the balance
Prompt for the balance to addition
If the player needs to increase the balance (boolean)
balance: Players have balance, each game will change (int)
add: The player needs to increase the balance (int)
Returns: The number of balance in value (int)
"""
add = input('Do you want to add to your balance? ')
if add == 'yes':
balance_add = input('Enter how many dollars to add to your balance: ')
balance_add = int(balance_add)
balance += balance_add
print("Balance:",balance)
return balance | bfb991bdea7faf355507354b1fc20540cd5eb2ff | 35,390 |
def GetUnixErrorOutput(filename, error, new_error=False):
"""Get a output line for an error in UNIX format."""
line = ''
if error.token:
line = '%d' % error.token.line_number
error_code = '%04d' % error.code
if new_error:
error_code = 'New Error ' + error_code
return '%s:%s:(%s) %s' % (filename, line, error_code, error.message) | 5689b28ea0475f08802c72a48b733e4625e6d9d5 | 35,392 |
import sys
def split_args(all_args):
"""
Split argument list all_args into arguments specific to this script and
arguments relating to the moses server. An isolated double dash acts as
the separator between the two types of arguments.
"""
my_args = []
mo_args = []
arglist = mo_args
i = 0
# IMPORTANT: the code below must be coordinated with
# - the evolution of moses command line arguments
# - mert-moses.pl
while i < len(all_args):
# print i,"MY_ARGS", my_args
# print i,"MO_ARGS", mo_args
if all_args[i] == "--[":
arglist = my_args
elif all_args[i] == "--]":
arglist = mo_args
elif all_args[i] == "-i" or all_args[i] == "-input-file":
my_args.extend(["-i", all_args[i + 1]])
i += 1
elif all_args[i] == "-inputtype":
if all_args[i + 1] != "0":
# Not yet supported! Therefore:
errmsg = (
"FATAL ERROR: "
"%s only supports plain text input at this point."
% sys.argv[0])
raise Exception(errmsg)
# my_args.extend(["--input-type",all_args[i+1]])
i += 1
elif all_args[i] == "-lattice-samples":
# my_args.extend(["--lattice-sample",all_args[i+2]])
# my_args.extend(["--lattice-sample-file",all_args[i+1]])
# mo_args[i:i+3] = []
# i += 2
# This is not yet supported! Therefore:
errmsg = (
"FATAL ERROR: %s does not yet support lattice sampling."
% sys.argv[0])
raise Exception(errmsg)
elif all_args[i] == "-n-best-list":
my_args.extend(["--nbest", all_args[i + 2]])
my_args.extend(["--nbest-file", all_args[i + 1]])
i += 2
elif all_args[i] == "-n-best-distinct":
my_args.extend(["-u"])
else:
arglist.append(all_args[i])
pass
i += 1
pass
return my_args, mo_args | 65e91201ac9ff7bdcd55e57bd6b0bcf680a68ea4 | 35,393 |
def left_child_index(i):
"""
:param i: int
Index of node in array (that is organized as heap)
:return: int
Position in array of left child of node
"""
return 2 * (i + 1) - 1 | 323106fbc8aa1a12144bde5c6e43cd6870c6b1da | 35,394 |
import os
import json
from datetime import datetime
def load_logs(log_dir):
""" load all logs and convert their time-stamps to datetime format """
# local func used for parsing...
def parse_nan_inf(arg):
print("got:",arg)
c = {"-Infinity":-float("inf"), "Infinity":float("inf"), "NaN":float("nan")}
return c[arg]
# load log files...
logs = []
for filename in os.listdir(log_dir):
if filename.endswith(".json"):
f = open(log_dir + filename)
try:
logs.append(json.load(f, parse_constant=parse_nan_inf))
except ValueError:
print(filename + " could not be loaded; probably nan or inf values contained!")
continue
else:
continue
# convert timestamp string to datetime object
for i in range(0,len(logs)):
logs[i].update({'time': datetime.strptime(logs[i][u'time'], '%Y-%m-%dT%H:%M:%S.%f')})
#sort by timestamp
logs = sorted(logs, key=lambda k: k['time'])
return logs | b18675da51c69469bd6cf2d8d0275c39383f92bf | 35,395 |
def add(bowl_a, bowl_b):
"""Return bowl_a and bowl_b added together"""
return bowl_a + bowl_b | e12bb4d5f4d21f4ae113f064d62d0db2ea6f8014 | 35,396 |
def process_fid_to_avg_gate_fid(F_pro: float, d:int):
"""
Converts
"""
F_avg_gate = (d*F_pro+1)/(d+1)
return F_avg_gate | a69b6c7e43bd88762d472a30fc55d0e13ae1aa36 | 35,398 |
from typing import Tuple
from typing import List
def _get_value_name_and_type_from_line(*, line: str) -> Tuple[str, str]:
"""
Get a parameter or return value and type from
a specified line.
Parameters
----------
line : str
Target docstring line.
Returns
-------
value_name : str
Target parameter or return value name.
type_name : str
Target parameter or return value type name.
"""
if ':' not in line:
return '', ''
splitted: List[str] = line.split(':', maxsplit=1)
value_name: str = splitted[0].strip()
type_name: str = splitted[1].strip()
return value_name, type_name | d2095fa2bc34a7086f60b40373a351f7c984dc96 | 35,399 |
def exclude_from_weight_decay(p):
""" exclude_from_weight_decay """
name = p.name
if name.find("layernorm") > -1:
return True
bias_suffix = ["bias", "_b", ".b_0"]
for suffix in bias_suffix:
if name.endswith(suffix):
return True
return False | 25289dc1246f9d2c65a7a1f22238efbb1c67f8bd | 35,400 |
import json
def JSONParser(data):
"""call json.loads"""
return json.loads(data) | d9356667b7dbadaea5196a85617ace1aeb1b40eb | 35,401 |
from typing import Counter
def verify_enabled_game_projects(ctx, option_name, value):
""" Configure all Game Projects which should be included in Visual Studio """
if not value:
return True, "", "" # its okay to have no game project
if (len(value) == 0):
return True, "", ""
if (value[0] == '' and len(value) == 1):
return True, "", ""
project_list = ctx.game_projects()
project_list.sort()
project_input_list = value.strip().replace(' ', '').split(',')
# Get number of occurrences per item in list
num_of_occurrences = Counter(project_input_list)
for input in project_input_list:
# Ensure spec is valid
if not input in project_list:
error = ' [ERROR] Unkown game project: "%s".' % input
return (False, "", error)
# Ensure each spec only exists once in list
elif not num_of_occurrences[input] == 1:
error = ' [ERROR] Multiple occurrences of "%s" in final game project value: "%s"' % (input, value)
return (False, "", error)
return True, "", "" | fbd859affc1f5c3e3c8ae2c31e20c47610615f69 | 35,402 |
def cut_tag_preserve(tags, tag):
"""
Cuts a tag from a list of tags without altering the original.
"""
tag_list = tags[:]
tag_list.remove(tag)
return ",".join(tag_list) | 691412fc466498413c32edd30eaca2d677d7e35a | 35,405 |
def _default_error_handler(error_handler):
"""
Use it to customize error handler.
In aiohttp-apispec we use 400 as default client http error code.
"""
error_handler.set_status(400)
return error_handler | b80896bc365bfdbebcd07b43131b80c938bfadff | 35,408 |
def is_related(field):
"""
Test if a given field is a related field.
:param DjangoField field: A reference to the given field.
:rtype: boolean
:returns: A boolean value that is true only if the given field is related.
"""
return 'django.db.models.fields.related' in field.__module__ | fff8bbc5f945e7f0ee576e4b501b2c9ca808541d | 35,409 |
import torch
def attention_score(att, mel_lens, r=1):
"""
Returns a tuple of scores (loc_score, sharp_score), where loc_score measures monotonicity and
sharp_score measures the sharpness of attention peaks
"""
with torch.no_grad():
device = att.device
mel_lens = mel_lens.to(device)
b, t_max, c_max = att.size()
# create mel padding mask
mel_range = torch.arange(0, t_max, device=device)
mel_lens = mel_lens // r
mask = (mel_range[None, :] < mel_lens[:, None]).float()
# score for how adjacent the attention loc is
max_loc = torch.argmax(att, dim=2)
max_loc_diff = torch.abs(max_loc[:, 1:] - max_loc[:, :-1])
loc_score = (max_loc_diff >= 0) * (max_loc_diff <= r)
loc_score = torch.sum(loc_score * mask[:, 1:], dim=1)
loc_score = loc_score / (mel_lens - 1)
# score for attention sharpness
sharp_score, inds = att.max(dim=2)
sharp_score = torch.mean(sharp_score * mask, dim=1)
return loc_score, sharp_score | ccdce864a91c9816143f414c2cde99b5f67c89c4 | 35,411 |
def sort_lists(reference, x):
"""
Sorts elements of lists `x` by sorting `reference`
Returns sorted zip of lists
"""
# sort zip of lists
# specify key for sorting the ith element of `reference` and `x` as the first element of the tuple of the sorted zip object, e.g. pair[0] = (reference[i], x[i])[0]
if isinstance(x[0], list):
s = sorted(zip(reference, *x), key=lambda pair: pair[0])
else:
s = sorted(zip(reference, x), key=lambda pair: pair[0])
return zip(*s) | 581e01f21902a029570dc45e5c6401696b80ee15 | 35,412 |
def mjd_to_f(in_mjd):
"""
:param in_mjd:
:return:
"""
return 1 / in_mjd | 8980a6ed365aeee4b2408e503af1fa3b3f110b36 | 35,413 |
def update_from_db():
"""read DB and returns a dict with the data
The returned dict must have the following structure:
{
ssid1:{
channel11:[
url11_1,
...
url11_n
]
],
...
channel1n:{...}
}
...
ssidn:{...}
}
where ssidn is a string, channel1n is an int and
url11_n is a string.
"""
return {
"JaverianaCali":{
1:["www.google.com"]
}
} | 59ecb0e412431f6d4ee712c62149344c7046fe8e | 35,414 |
import subprocess
def runBrowser(profileDir=None, browserArgs=None):
"""
Run in browser
"""
args = ["cfx", "run"]
if profileDir is not None:
args.append("-p")
args.append(profileDir)
if browserArgs is not None:
args.append("--binary-args")
args.append(browserArgs)
return subprocess.call(args) | 96a299a9fd4c3f01218b0ecba59da7bacf0f3be5 | 35,415 |
def pos_of(values, method):
"""Find the position of a value that is calculated by the given method.
"""
result = method(values)
pos = [i for i, j in enumerate(values) if j == result]
if len(pos) > 1:
print('Warning: The %s of the list is not distinct.' % (method,))
return pos[0] | 5300511a1dd8f76de51f58021cf011a4aa5ca757 | 35,416 |
def get_weeks():
"""
列出所有星期
:return:
"""
return ['周一', '周二', '周三', '周四', '周五', '周六', '周日'] | 82f14f54d571186f68035d8bd7c5b67ea404637d | 35,417 |
def crop_frame(img, width, height, x, y):
"""
Returns a crop of image (frame) based on specified parameters
Parameters
----------
img : array, required
Array representing an image (frame)
width : int, required
Width of the crop
height : int, required
Height of the crop
x : int, required
X position of the crop, by default None
y : int, required
Y position of the crop, by default None
Returns
-------
array
Cropped image (frame) based on input parameters
"""
img = img[y:y+height, x:x+width]
return img | 1cbc34aed421cf67cef57b8fb53b47015911f701 | 35,420 |
import math
def sigmoid(x):
"""
sigmoid function
Args:
x: number
Returns: sigmoid(number)
"""
return 1 / (1 + math.exp(-x)) | 3f4fc16c99af2cdf71aea9f108382e6464f50e6f | 35,421 |
def parse_releases(content):
"""
Parse latest releases of a manga
Parameters
----------
content : BeautifulSoup
BeautifulSoup object of the releases page content.
Returns
-------
releases : list of dicts
List of latest releases of a manga.
List is ordered latest-to-oldest
::
[
{
'chapter': 'chapter number',
'vol': 'volume number' or None,
'series': {
'name': 'Manga Name',
'id': 'Manga ID'
},
'group': {
'name': 'Scanlation Group Name',
'id': 'Scanlation Group ID'
}
}
]
"""
releases = content.find_all("div", class_="text", recursive=False)[:-1]
results = []
for i in range(0, len(releases), 5):
release = {}
release["series"] = {
"name": releases[i + 1].get_text(),
"id": releases[i + 1]
.a["href"]
.replace("https://www.mangaupdates.com/series.html?id=", ""),
}
vol = releases[i + 2].get_text()
release["vol"] = vol if vol else None
release["chapter"] = releases[i + 3].get_text()
release["group"] = {
"name": releases[i + 4].get_text(),
"id": releases[i + 4]
.a["href"]
.replace("https://www.mangaupdates.com/groups.html?id=", ""),
}
results.append(release)
return results | 9f1e99cf0e6e96cb60c6764c68052e94d341d196 | 35,423 |
def percent_to_volts(percentage: float) -> float:
"""Converts a float between 0 and 1 into an equivalent 3.3v value."""
if percentage > 1:
raise ValueError("volts expects a percentage between 0 and 1.")
return percentage * 3.3 | 09ec72f63f41de7d7123589d0b79d98989311fb8 | 35,424 |
def get_attitude_data(db_connection, scanno):
"""get attitude data from level1 database"""
query = db_connection.query(
'''select stw, latitude, longitude,
sunzd, orbit
from ac_level1b
join attitude_level1 using (stw)
where calstw = {0}
order by stw'''.format(scanno))
result = query.dictresult()
db_connection.close()
return {
'stw': [row['stw'] for row in result],
'latitude': [row['latitude'] for row in result],
'longitude': [row['longitude'] for row in result],
'sunzd': [row['sunzd'] for row in result],
'orbit': [row['orbit'] for row in result],
} | ea7b9905e2fff2fbc3af103da2964004e31c027c | 35,426 |
def get_central_longitude(lon_deg):
"""Get the central meridian for the zone corresponding to a longitude.
This function determines the zone that a longitude corresponds to, and the
central meridian for that zone, all in one step.
See also http://www.jaworski.ca/utmzones.htm.
Args:
lon_deg: Longitude for which to compute the central meridian, in degrees.
Positive values are interpreted as longitudes east of the Prime
Meridian.
This may be a regular `float`, a NumPy array, or a Pandas
Series.
If this parameter is array-like, it must be one-dimensional.
Returns:
The longitude(s) of the central meridian(s) for each input longitude.
The returned longitudes will be integers, though if the input `lon_deg`
is a NumPy array or similar, the output will also be as such.
Positive values indicate longitudes east of the Prime Meridian.
Examples:
Knoxville, TN's longitude is 83W. This corresponds to Zone 17, for
which the central meridian is at 81W:
>>> get_central_longitude(-83.9232)
-81.0
Get the central meridians for Knoxville, TN; Chicago, IL; and Tokyo, JP
(83W, 87W, and 139E, respectively) as a NumPy array:
>>> get_central_longitude(np.array([-83.92, -87.63, 139.69]))
array([-81., -87., 141.])
"""
return ((lon_deg // -6) * -6) - 3 | 044fc4eebe3ca9ba93799003964c45a5db9da017 | 35,428 |
def extract_instance_name(url):
"""Given instance URL returns instance name."""
return url.rsplit('/', 1)[-1] | 333c6f12ae44b0a5de0ed80d9e01c87dffeb18d2 | 35,430 |
def stream_table_dataframe_to_string(stream_table, **kwargs):
"""
Method to print a stream table from a dataframe. Method takes any argument
understood by DataFrame.to_string
"""
# Set some default values for keyword arguments
na_rep = kwargs.pop("na_rep", "-")
justify = kwargs.pop("justify", "center")
float_format = kwargs.pop("float_format", lambda x: "{:#.5g}".format(x))
# Print stream table
return stream_table.to_string(
na_rep=na_rep, justify=justify, float_format=float_format, **kwargs
) | 8028ef06d5535cdfca7feead2647883c15ac4c7f | 35,431 |
def cmyk_to_luminance(r, g, b, a):
"""
takes a RGB color and returns it grayscale value. See
http://www.johndcook.com/blog/2009/08/24/algorithms-convert-color-grayscale/
for more information
"""
if (r, g, b) == (0, 0, 0):
return a
return (0.299 * r + 0.587 * g + 0.114 * b) * a / 255 | 91a5720fe2f966913ac81330642d1f48c7d67b3a | 35,432 |
import os
def java_executable(env_prefix):
"""Returns the name of the Java executable."""
java_home = os.getenv('JAVA_HOME')
if java_home:
java_home_bin = os.path.join(java_home, 'bin', 'java')
if os.access(java_home_bin, os.X_OK):
return java_home_bin
return os.path.join(env_prefix, 'bin', 'java') | f1dcc44462acee62d7d9b264b66ca3034d18f79d | 35,433 |
def salt_run_cli(salt_master):
"""
Override salt_run_cli fixture to provide an increased default_timeout to the calls
"""
return salt_master.salt_run_cli(timeout=120) | 10d2d04d83ae747e0898a34823d3403a374381a9 | 35,434 |
def load(*args, **kwargs):
"""
Entry point for server side interface code
"""
return {} | 22904d2281b39379f8bb237856a6c8921c00ded1 | 35,435 |
def getDaySuffix(day):
"""Return st, nd, rd, or th for supplied day."""
if 4 <= day <= 20 or 24 <= day <= 30:
return 'th'
return ['st', 'nd', 'rd'][day % 10 - 1] | 4023b97164cbb7c73a1a3ddb2a3527fa2c297a1d | 35,439 |
import bisect
def find_le(array, x):
"""Find rightmost value less than or equal to x.
Example::
>>> find_le([0, 1, 2, 3], 2.0)
2
**中文文档**
寻找最大的小于等于x的数。
"""
i = bisect.bisect_right(array, x)
if i:
return array[i - 1]
raise ValueError | dfa67df6fadbdead10821c4aceb027b0c5f5d90a | 35,440 |
def flatten(array: list):
"""Flatten nested list to a single list"""
return [item for sublist in array for item in sublist] | 65692535197b946d5d5a39e657ff07c3424e7652 | 35,441 |
def range_not_tag(length, tag, name=None, start=1):
"""
返回一个范围字段,此时抓取的信息不能是tag字段
:param length:
:param tag:
:param name:
:param start:
:return:
"""
if name:
return '(?P<%s>(((?!%s).)*){%s,%s})' % (name, tag, start, length*3) | 330042d5a7c40844fd31e51eaef5e7e3478affec | 35,442 |
def get_actions(state):
""" Returns the set of legal moves in a state. """
move_list = list()
for i in range(3, -1, -1):
for row in range(2, -1, -1):
for column in range(2, -1, -1):
if state[i][row][column] == '.':
for board in range(1, 5, 1):
move_list.append((((i + 1), (column + (row * 3) + 1)), (board, 'R')))
move_list.append((((i+1), (column + (row * 3) + 1)), (board, 'L')))
return move_list | 8896d879a4229af26e5c39022f8fa05813b21589 | 35,443 |
def _transaction_sort_key(txn):
"""
If there are payments and invoices created on the same day
we want to make sure to process the payments first and then
the invoices. Otherwise the transaction history on an
invoice will look like a drunk cow licked it, that's bad.
:param txn:
:return: sort key
"""
txn_date = txn.transacted_on.isoformat()[:10]
type_weight = "0" if txn.transaction_type == txn.PAYMENT else "1"
txn_id = str(txn.id) # sort multiple invoices on same day by primary key id
return txn_date + type_weight + txn_id | c6e74a6c87ec7c46dba0507bf144bfddac954e3c | 35,447 |
import os
import json
def read_json(file_dir, name, num_files):
"""reads the json output of clustering"""
meetings = {}
for i in range(1, num_files + 1):
file_path = os.path.join(file_dir, '%s.%d.json' % (name, i))
with open(file_path, 'rb') as _json_file:
inputjson = json.load(_json_file)['utts']
for submeeting_name, submeeting_content in inputjson.items():
meeting_name = submeeting_name.rsplit('-', 1)[0]
speaker_ids = submeeting_content["output"][0]["rec_tokenid"].split()
assert speaker_ids[-1] == str(4)
meetings.setdefault(meeting_name, []).append(speaker_ids[:-1])
return meetings | 8ae183dcf97264c366461fe8347cc9c229135c36 | 35,448 |
def get_reordered_parameters(parameters):
"""get_reordered_parameters"""
# put the bias parameter to the end
non_bias_param = []
bias_param = []
for item in parameters:
if item.name.find("bias") >= 0:
bias_param.append(item)
else:
non_bias_param.append(item)
reordered_params = tuple(non_bias_param + bias_param)
return len(non_bias_param), len(reordered_params), reordered_params | bceb663f0398ee5b5b122805f69b6cb814c8c529 | 35,449 |
import os
def recursive_scandir(top_dir, dir_first=True):
"""Recursively scan a path.
Args:
top_dir: The path to scan.
dir_first: If true, yield a directory before its contents.
Otherwise, yield a directory's contents before the
directory itself.
Returns:
A generator of tuples of a path relative to the top path, and an
os.DirEntry object of the file or directory at that path. The
top_dir itself is not included.
"""
def f(relpath, dir_entry):
if dir_first and dir_entry is not None:
yield relpath, dir_entry
path = os.path.join(top_dir, relpath)
for entry in os.scandir(path):
entry_relpath = os.path.join(relpath, entry.name)
if entry.is_dir():
for item in f(entry_relpath, entry):
yield item
else:
yield entry_relpath, entry
if not dir_first and dir_entry is not None:
yield relpath, dir_entry
return f('', None) | c4a31caf9fde2ffd1798ae0daed8e459e1dcbda9 | 35,450 |
def cleanup_column_names(df,rename_dict={},do_inplace=True):
"""This function renames columns of a pandas dataframe
It converts column names to snake case if rename_dict is not passed.
Args:
rename_dict (dict): keys represent old column names and values point to
newer ones
do_inplace (bool): flag to update existing dataframe or return a new one
Returns:
pandas dataframe if do_inplace is set to False, None otherwise
"""
if not rename_dict:
return df.rename(columns={col: col.lower().replace(' ','_')
for col in df.columns.values.tolist()},
inplace=do_inplace)
else:
return df.rename(columns=rename_dict,inplace=do_inplace) | 2ee32d7742c20b9eeeffb9481d940f1e4c036937 | 35,451 |
import math
def returnNewDelta(delta_old):
"""returns the side of the new polygonal approximation.
Arguments:
delta_old -- the side of the previous approximation
"""
return math.sqrt( 2. * (1. - math.sqrt(1. - 0.25 * delta_old**2) ) ) | 7ffececdc6affb3b7f9aa215415077bc14baa9e3 | 35,452 |
import os
def maybe_java_home(s):
"""
If JAVA_HOME is in the environ, return $JAVA_HOME/bin/s. Otherwise, return
s.
"""
if "JAVA_HOME" in os.environ:
return os.path.join(os.environ["JAVA_HOME"], "bin", s)
else:
return s | 4bd3a1b9cb3891de599638ebd467a3ca7673665b | 35,453 |
import os
def input_file(s_input: str) -> str:
"""
Used for parsing some inputs to this program, namely filenames given as input.
Whitespace is removed, but no case-changing occurs. Existence of the file is verified.
:param s_input: String passed in by argparse
:return str: The filename
"""
stripped_file = s_input.strip()
if not os.path.exists(stripped_file):
raise FileNotFoundError("The input file [%s] does not exist." % stripped_file)
return os.path.abspath(stripped_file) | 8b8268d049df78274a2deea54cc718696e2fc03a | 35,454 |
import sys
def NotecardExceptionInfo(exception):
"""Construct a formatted Exception string.
Args:
exception (Exception): An exception object.
Returns:
string: a summary of the exception with line number and details.
"""
name = exception.__class__.__name__
return sys.platform + ": " + name + ": " \
+ ' '.join(map(str, exception.args)) | 7249250ed384b02a26f96d6e26c02967d7111c32 | 35,456 |
def max_contig_sum(L):
""" L, a list of integers, at least one positive
Returns the maximum sum of a contiguous subsequence in L """
sizeL=len(L)
max_so_far,max_ending_here=0,0
for i in range(sizeL):
max_ending_here+=L[i]
if max_ending_here<0:
max_ending_here=0
elif max_so_far<max_ending_here:
max_so_far=max_ending_here
return max_so_far | 356c87a66ff1071e729e3de379550a0a61a2f4f4 | 35,457 |
def cal_recom_result(user_click,user_sim):
"""
recom by usercf algo
:param user_click:dict,用户的点击序列 key userid, value [itemid1,itemid2]
:param user_sim:经过排序后的用户的相似度矩阵 key:userid value:list,[(useridj,score1),(useridk,score2)]
:return:dict,key userid value:dict,value_key:itemid1,value_value:recom_score
"""
recom_result = {}
topk_user = 3
item_num = 5
# 需要过滤掉该用户已点击过的物品
for user, item_list in user_click.items():
# 定义一个临时的字典,来完成过滤
tmp_dict = {}
for itemid in item_list:
tmp_dict.setdefault(itemid, 1)
recom_result.setdefault(user, {})
for zuhe in user_sim[user][:topk_user]:
userid_j, sim_score = zuhe
# 如果user_j 不在点击序列里面,则将其过滤掉;若在,则完成推荐
if userid_j not in user_click:
continue
for itemid_j in user_click[userid_j][:item_num]:
recom_result[user].setdefault(itemid_j,sim_score)
return recom_result | d4d7e0e0c0e112ecf9e48214a1f253f82a052818 | 35,460 |
def test_module(unifi_session):
"""
Test Module for Demisto
:param unifi_session: Unifi Session from Unifi class
:return: Return 'OK' if success
"""
result = False
if unifi_session.base_url:
result = "ok"
return result | e375b8f673499c093cdfb691e154e5f7ba9f1737 | 35,462 |
import argparse
from pydoc import locate
import yaml
def parse(arg_file, description=None):
"""
:param description: string
:param arg_file: string
:returns: dict
Example YAML file for arguments:
---
string:
help: Example string parameter.
type: str
default: 'default string'
bool_false:
help: Example boolean with a default of false.
default: False
action: 'store_true'
bool_true:
help: Example boolean with a default of true
default: True
action: 'store_false'
list:
help: List of n number of unnamed arguments.
default: ''
nargs: '*'
Example usage:
python3.8 ./script.py --string "this is a test string" --bool_false --bool_true --list a b c
returns
{'string': 'this is a test string', 'bool_false': True, 'bool_true': False, 'list': ['a', 'b', 'c']}
"""
with open(arg_file, 'r') as y:
data = yaml.load(y, Loader=yaml.FullLoader)
parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawTextHelpFormatter)
for argument, parameters in data.items():
if 'type' in parameters.keys():
parameters['type'] = locate(parameters['type'])
parser.add_argument('--' + argument, **parameters)
return vars(parser.parse_args()) | 544e4ceafdbdfa1a1cd3bd84e30e11b044d7115b | 35,463 |
def denormalize(column, startvalue, endvalue):
"""
converts [0:1] back with given start and endvalue
"""
normcol = []
if startvalue>0:
if endvalue < startvalue:
raise ValueError("start and endval must be given, endval must be larger")
else:
for elem in column:
normcol.append((elem*(endvalue-startvalue)) + startvalue)
else:
raise ValueError("start and endval must be given as absolute times")
return normcol | b10bb2de587db74fcc48e93567f44af900173dc7 | 35,464 |
import requests
import json
def post_request(url, json_payload, **kwargs):
"""
post_request function is used to send a POST request in order to interact
with remote services through rest apis
url: url to the remote service
json_payload: json object containing remote service input parameters
kwargs: additional parameters to be scpecified such as 'cp_cl_api_key'
to authenticate
"""
try:
if 'cp_cl_api_key' in kwargs:
# prepare url to send a post request
url = url + "/post"
# prepare headers
headers = {'Content-Type': 'application/json', 'cp_api_key': kwargs['cp_cl_api_key']}
# send request
print(json_payload)
response = requests.post(url=url, headers=headers, json=json_payload)
else:
# no service key has been specified
print("no cp_cl_api_key has been specified")
return {}
except:
# if any error occurs print it
print("Network exception occurred with POST request!!!")
return {}
status_code = response.status_code
print("post_request: received response with status code {}".format(status_code))
json_data = json.loads(response.text)
return json_data | 8c9de68163df2c056f317aafd6affc79e49ce2cc | 35,465 |
def tokenise_table_name(table_name):
"""Given a feature class or feature dataset name, returns the schema (optional) and simple name"""
dot_count = table_name.count(".")
if dot_count == 2:
dot_pos = [pos for pos, char in enumerate(table_name) if char == "."]
return {
"database": table_name[:dot_pos[0]],
"schema": table_name[dot_pos[0] + 1:dot_pos[1]],
"name": table_name[dot_pos[1] + 1:]
}
elif dot_count == 1:
return {
"database": None,
"schema": table_name[:table_name.index(".")],
"name": table_name[table_name.index(".") + 1:]
}
else:
return {"database": None, "schema": None, "name": table_name} | 7664bd8099cea88c8d0313c62ce5d4da38fcf947 | 35,467 |
from typing import List
def get_indices_of_extra_letters(row: List[str], split: int, key_length: int) -> List[int]:
"""
Returns indices of all letters which don't belong in a row because they make its length bigger
than the given split.
"""
row_without_placeholder = ''.join(char for char in row if char != '_')
difference = len(row_without_placeholder) - split
difference = key_length - split
if difference == 0:
return []
# Extra letter indices start from the split's index and go till the end of that row
indices_of_extra_letters = [i for i in range(split, key_length)]
# Remove indices of '_' placeholders, and make sure the indices list is only
# as long as the original key
indices_of_extra_letters = [e for e in indices_of_extra_letters[:key_length] if row[e] != '_']
return indices_of_extra_letters | 1c275b43d3605ef7514c14d6f0f99994f4eb7c3e | 35,468 |
import subprocess
def test_specification(module) -> str:
"""テスト仕様
"""
return "<br/>".join(
subprocess.run(
f"pytest --collect-only --quiet {module.__file__}",
universal_newlines=True,
check=True,
shell=True,
stdout=subprocess.PIPE,
).stdout.splitlines()
) | a499c19f5f95b260c582d373514bbd86b89981ff | 35,469 |
def to_index_tuple(idx):
"""Converts a numpy array to a tuple of integer indexes.
Converts a numpy array to a tuple of integer indexes.
Args:
idx: The numpy array containing the indexes.
Returns:
A tuple of indexes.
"""
return tuple(idx.astype(int).tolist()) | 4c7a3182625f48a54e07432960f68c58f0828070 | 35,470 |
def ConvertTokenToInteger(string, location, tokens):
"""Pyparsing parse action callback to convert a token into an integer value.
Args:
string (str): original string.
location (int): location in the string where the token was found.
tokens (list[str]): tokens.
Returns:
int: integer value or None.
"""
try:
return int(tokens[0], 10)
except ValueError:
pass | f0332c672156bb95b0d14d0af94d197464ab70a0 | 35,472 |
def midpoint(imin, imax):
"""Returns middle point
>>> midpoint(0, 0)
0
>>> midpoint(0, 1)
0
>>> midpoint(0, 2)
1
>>> midpoint(1, 1)
1
>>> midpoint(1, 2)
1
>>> midpoint(1, 5)
3
"""
middle_point = (int(imin) + int(imax)) / 2
return middle_point | 389058bb50f0e1f3d31498edcac2469a97545a3f | 35,474 |
def get_num_image_channels(module_or_spec, signature=None, input_name=None):
"""Returns expected num_channels dimensions of an image input.
This is for advanced users only who expect to handle modules with
image inputs that might not have the 3 usual RGB channels.
Args:
module_or_spec: a Module or ModuleSpec that accepts image inputs.
signature: a string with the key of the signature in question.
If None, the default signature is used.
input_name: a string with the input name for images. If None, the
conventional input name `images` for the default signature is used.
Returns:
An integer with the number of input channels to the module.
Raises:
ValueError: If the channel information is missing or malformed.
"""
if input_name is None:
input_name = "images"
input_info_dict = module_or_spec.get_input_info_dict(signature)
try:
shape = input_info_dict[input_name].get_shape()
except KeyError:
raise ValueError("Module is missing input '%s' in signature '%s'." %
(input_name, signature or "default"))
try:
_, _, _, num_channels = shape.as_list()
if num_channels is None:
raise ValueError
except ValueError:
raise ValueError(
"Shape of module input is %s, "
"expected [batch_size, height, width, num_channels] "
"with known num_channels" % shape)
return num_channels | ade88e40833749b48461d7f996ab8824258ad7df | 35,475 |
def normalize_information_coefficients(a, method, clip_min=None, clip_max=None):
"""
:param a: array; (n_rows, n_columns)
:param method:
:param clip_min:
:param clip_max:
:return array; (n_rows, n_columns); 0 <= array <= 1
"""
if method == '0-1':
return (a - a.min()) / (a.max() - a.min())
elif method == 'p1d2':
return (a + 1) / 2
elif method == 'clip':
return a.clip(clip_min, clip_max)
else:
raise ValueError('Unknown method {}.'.format(method)) | 121445cdfdfa01ef6498fad9f35baa1158f77c6f | 35,476 |
import warnings
def check_case(name):
"""
Check if the name given as parameter is in upper case and convert it to
upper cases.
"""
if name != name.upper():
warnings.warn("Mixed case names are not supported in database object names.", UserWarning)
return name.upper() | 8e973c6c087e3bb9b3abd076e2d97b3770f3589c | 35,477 |
import requests
def get_elevation_for_location(latitude: float, longitude: float):
"""
Function to get the elevation of a specific location given the latitude and
the longitude
"""
url = (
f"https://api.open-elevation.com/api/v1/lookup?"
f"locations={round(latitude, 4)},{round(longitude, 4)}"
)
elevation = int(requests.get(url, timeout=30).json()["results"][0]["elevation"])
return elevation | 35d317bc6d926f2c24bd4c5095ebb6513010a652 | 35,478 |
from typing import List
def listToCSV(lst: List) -> str:
"""
Changes a list to csv format
>>> listToCSV([1,2,3])
'1,2,3'
>>> listToCSV([1.0,2/4,.34])
'1.0,0.5,0.34'
"""
strings = ""
for a in lst:
strings += str(a) + ","
strings = strings[0:len(strings) - 1]
return strings | 89fc272c4b9fc0a3a406f67d7b655b2c72755d07 | 35,479 |
def fibonacci(n):
"""
This function prints the Nth Fibonacci number.
>>> fibonacci(3)
1
>>> fibonacci(10)
55
The input value can only be an integer, but integers lesser than or equal to 0 are invalid, since the series is not defined in these regions.
"""
if n<=0:
return "Incorrect input."
elif n==1:
return 0
elif n==2:
return 1
else:
return fibonacci(n-1)+fibonacci(n-2) | 165a6bf1fc73d24e0b3c25599040c26a56afdcd9 | 35,480 |
def _CreatePatchInstanceFilter(messages, filter_all, filter_group_labels,
filter_zones, filter_names,
filter_name_prefixes):
"""Creates a PatchInstanceFilter message from its components."""
group_labels = []
for group_label in filter_group_labels:
pairs = []
for key, value in group_label.items():
pairs.append(
messages.PatchInstanceFilterGroupLabel.LabelsValue.AdditionalProperty(
key=key, value=value))
group_labels.append(
messages.PatchInstanceFilterGroupLabel(
labels=messages.PatchInstanceFilterGroupLabel.LabelsValue(
additionalProperties=pairs)))
return messages.PatchInstanceFilter(
all=filter_all,
groupLabels=group_labels,
zones=filter_zones,
instances=filter_names,
instanceNamePrefixes=filter_name_prefixes,
) | 7692812fe66b8db42bd76550281c7751d7648b1c | 35,481 |
import re
def fix_dependent_sources(input_line):
""" Fix table syntax for dependent sources """
table_statement = re.match(r".*table.*", input_line, flags=re.IGNORECASE)
n_line = input_line
# If source values come from a table
if table_statement:
# If there is no equal sign, the following (illegal) syntax is being used: table {expr} ((x1,y1) ... (xn,yn))
if not "=" in table_statement[0]:
n_line = re.sub(r"([^\n]+table\s+)([^\n]+)(?=\s*\(\s*\()\(([^\n]+)\)",
repl=r"\1\2 = \3",
string=n_line,
flags=re.IGNORECASE)
# If there is an equal sign, but no commas between the values: table {expr} = (x1 y1) ... (xn yn)
else:
table_points_match = re.match(r"([^\n]+table\s+)([^\n]+)(\s=\s)([^\n]+)", n_line, flags=re.IGNORECASE)
if table_points_match:
table_points = table_points_match[4]
new_table_points = re.sub(r"(\([^,\s]+)([,\s]+)([^,\s]+\))",
repl=r"\1,\3",
string=table_points,
flags=re.IGNORECASE)
n_line = re.sub(r"([^\n]+table\s+)([^\n]+)(\s=\s)([^\n]+)",
repl=fr"\1\2\3{new_table_points}",
string=input_line,
flags=re.IGNORECASE)
return n_line | cac896ab90bcf5554d4c95fd44ad49f178ffe5ff | 35,483 |
import math
def row_deco_2(in_array, num_chunks, ghost_zone_size, block):
"""
Returns:
---------
chunks: nested list
[[dataid, start_idx, end_idx], [dataid, start_idx, end_idx]]
the start_idx and end_idx that is returned accounts for the
"""
tot_rows = in_array.shape[1]
tot_cols = in_array.shape[2]
rows_in_interior = tot_rows - ghost_zone_size*2
start_idx = ghost_zone_size
extras = 0
while rows_in_interior%block != 0: # this loop ensures that the data chunks are divisible by block
rows_in_interior-=1
extras+=1
extras_top = math.ceil(extras/2) # so only take half of the extras
# extras_bot = math.floor(extras/2) # this isn't used
start_idx+=extras_top # add the half the extra rows to the top of the raster to center the interior
tot_blocks_in_interior = rows_in_interior / block
blocks_per_chunk = tot_blocks_in_interior // num_chunks
remaining = tot_blocks_in_interior % num_chunks
end = start_idx + rows_in_interior # the last row of pixels in the last datachunk
chunk_id = 0
chunks = []
while start_idx < end:
end_idx = start_idx+(blocks_per_chunk*block)
if remaining > 0:
end_idx += block
remaining -= 1
#chunks.append([chunk_id,in_array[:,start_idx:end_idx,:]])
chunks.append([chunk_id, int(start_idx), int(end_idx)]) # don't actually need the data, just the location for slices
chunk_id+=1
start_idx = end_idx
return chunks | 675a91f5d80849511f861c6acc1597bfdd25a6b9 | 35,486 |
import re
def _normalizeText(text):
"""Normalize text before transforming."""
text = text.lower()
text = re.sub(r'<br />', r' ', text).strip()
text = re.sub(r'^https?:\/\/.*[\r\n]*', ' L ', text, flags=re.MULTILINE)
text = re.sub(r'[\~\*\+\^`_#\[\]|]', r' ', text).strip()
text = re.sub(r'[0-9]+', r' N ', text).strip()
text = re.sub(r'([/\'\-\.?!\(\)",:;])', r' \1 ', text).strip()
return text.split() | 021007a7773902de7fd167803639650ccb6e5ba8 | 35,489 |
def sublista_gestos_compostos(gesto, m):
"""
Verifica se o gesto corresponde a um gesto composto.
:param gesto: Lista com os gestos compostos unidos.
:param m: gesto individual
:return: Lista com os gestos compostos divididos, caso o gesto em m pertença a um gesto composto. Caso contrário retorna a lista com o gesto original.
"""
for g in gesto:
if m in g:
return g
return [m] | f00464bb0d820329b913b2ede6813f6ea4cf50da | 35,490 |
import subprocess
def should_gather_results(directory, min_threshold):
# type: (str, int) -> bool
"""Should results be gathered for `directory` based on `min_threshold`?
:param directory: directory to determine whether or not the results should
be gathered.
:param min_threshold: the minimum threshold that should be met in order to
gather results.
:return: True if the results should be gathered; False otherwise.
"""
"""
e.g.,
$ df -P /local/home
Filesystem 1024-blocks Used Available Capacity Mounted on
/dev/sdb1 2113645484 2006254976 0 100% /local/home
"""
output = subprocess.check_output(
["df", "-P", directory],
)
lines = output.splitlines()
assert len(lines) == 2, "Output unexpected: %s" % output
fields = lines[-1].split()[1:3]
capacity, used = [int(field) for field in fields]
# print(capacity, used)
return min_threshold < 100 * used / capacity | 8fb7d137f24d94d3a9cc9959b70338196367a77a | 35,491 |
def med_is_decimal(s):
"""Takes a string and returns whether all characters of the string are digits."""
return set(s) <= set('1234567890') | f9078d20fe5b8d70493f34e2e53e95713fd7d37f | 35,493 |
def filter_on_cdr3_length(df, max_len):
"""
Only take sequences that have a CDR3 of at most `max_len` length.
"""
return df[df['amino_acid'].apply(len) <= max_len] | abe853dd0a5baeb3a178c41de9b15b04912e2033 | 35,494 |
def calc_array_coverage(solid_angle, number_of_bars):
"""
Calculate the coverage for an entire array
:param solid_angle: The solid angle in sr
:param number_of_bars: The number of solids in the array.
:return:
"""
return number_of_bars * solid_angle | 1d071116dea5f6a9e91c37168681b5386df0ac76 | 35,495 |
import mmap
import pickle
def load(path):
"""Load serialized object with out-of-band data from path based on zero-copy shared memory.
Parameters
----------
path : pathlib.Path
Folder used to save serialized data with serialize(). Usually a folder /dev/shm
"""
num_buffers = len(list(path.iterdir())) - 1 # exclude meta.idx
buffers = []
for idx in range(num_buffers):
f = open(path / f'{idx}.bin', 'rb')
buffers.append(mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ))
with open(path / 'meta.pkl', 'rb') as f:
return pickle.load(f, buffers=buffers) | 6c0ccc1d4941a6073b9f005774f53e3428dfc276 | 35,496 |
import socket
def GetOpenPort():
"""Returns an open port on the host machine.
Return:
an open port number as an int
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', 0))
return int(sock.getsockname()[1]) | c0a329da5be24bc878217b150dd89ee77e5229b0 | 35,497 |
import torch
def secondary_sequence_metrics(x, ss_num=7):
"""
Compute metrics associated with with secondary structure.
It counts how many times a secondary structure appears in the proteins, and the median length of a sequence of
secondary structure, e.g. H, H, ..., H.
Parameters
----------
x : torch.Tensor:
The node features.
ss_num : int, optional
The number of secondary structures. The default is 7.
Returns
-------
scores : torch.Tensor
The score having the following format: [SS_1_count, ..., SS_ss_num_count, median_seq_len]
"""
# Get number of amino acids
n = x.shape[0]
# Initialize scores
ss_counts, seq_len = [0]*ss_num, []
current_seq_len, current_ss = 0, -1
# Compute the scores
for i in range(n):
ss = int(x[i,1])
ss_counts[ss-1] += 1
if current_ss == -1:
current_seq_len += 1
current_ss = ss
elif ss != current_ss:
seq_len.append(current_seq_len)
current_seq_len = 1
current_ss = ss
else:
current_seq_len += 1
seq_len.append(current_seq_len) # last one to add
ss_counts = [x/n for x in ss_counts]
seq_len = float(torch.median(torch.tensor(seq_len)))
scores = torch.tensor(ss_counts + [seq_len])
return scores | 8ddc8df86438e0dee3aec45c2e573f4b39cca114 | 35,499 |
import os
def get_export_arguments():
"""Convert environment values into arguments."""
env = os.environ
return (env['EXPORT_PARENT'], env['GCS_DESTINATION'],
[ct.strip() for ct in env['EXPORT_CONTENT_TYPES'].split(',')],
[at.strip() for at in env['EXPORT_ASSET_TYPES'].split(',')]) | 9ed9474833806cf0e03daa86ed62617593adb8f3 | 35,500 |
import sys
def getPythonVersion(verbose=False):
"""returns which version of python is this"""
python_version = str(
"Python {major}.{minor}"
).format(
major=str(sys.version_info[0]),
minor=str(sys.version_info[1])
)
if verbose:
python_version = str(
"Python: {version}\n{flags}\n{copyright}\nBackend Python Library: {backend}"
).format(
version=str(sys.version),
flags=str(sys.flags),
copyright=str(sys.copyright),
backend=str(sys.executable)
)
return python_version | 5c7e9d51687dc5f7e2bac28ba59cc37798d9f111 | 35,502 |
import re
def clean_text(text):
"""
Remove code blocks, urls, and html tags.
"""
text = re.sub(r'<code[^>]*>(.+?)</code\s*>', '', text, flags=re.DOTALL | re.MULTILINE)
text = re.sub(r'<div[^>]*>(.+?)</div\s*>', '', text, flags=re.DOTALL | re.MULTILINE)
text = re.sub(r'<blockquote[^>]*>(.+?)</blockquote\s*>', '', text, flags=re.DOTALL | re.MULTILINE)
text = re.sub('<.*?>', '', text)
text = text.replace('"', '"')
text = re.sub(r'http\S+', '', text)
text = re.sub(r'www.\S+', '', text)
return text | 91934ecd7e5d037be1198bc645da8e507b5955ce | 35,503 |
import csv
def parse_labels_months(file_path='data/ae_pseudonyms.csv') -> dict:
"""
Parse age in months from csv
using row shorter dir stub
"""
dict_ = dict()
with open(file_path, newline='') as file:
reader = csv.reader(file)
for row in reader:
key = row[1].split('/')[-1]
age_days = float(row[3])
age_months = age_days / 31
dict_[key] = age_months
return dict_ | 64e0347f0d8a9fe1b1b7dbae8564c4dbff3c5b20 | 35,504 |
def list_string_to_dict(string):
"""Inputs ``['a', 'b', 'c']``, returns ``{'a': 0, 'b': 1, 'c': 2}``."""
dictionary = {}
for idx, c in enumerate(string):
dictionary.update({c: idx})
return dictionary | 0d9e3516e32bc69ee24d6afb19a7babcdba528f9 | 35,506 |
import os
def check_op_libSVM(input_dir='.', delete_file=True):
"""Perform terminal operation to identify possible classification failures
on the basis of number of files.
This works only for libSVM classification with stored results, as it
relies on files stored in the persistency directories.
This function navigates to input_dir (which is the result directory of the
classification) and checks the number of files starting with 'features' in
'persistency_run0/LibSVMClassifierNode/' in each subdirectory. In case the
classification was successfully performed, the number of files here should
equal the number of splits used. If not, this is a hint that something
went wrong!
The list returned by this function contains alternating
(i) name of 'root directory' for the respective condition
(ii) number of files
...
.. note:: This function only works if the feature*.pickle files are
explicitly saved in your NodeChain!
**Parameters**
:input_dir:
optional: string with the path where csv files are stored.
default='.'
:delete_file:
optional: controls if the file 'temp_check_op.txt' will be removed
default=True
:Author: Sirko Straube, Anett Seeland
:Created: 2010/11/09
"""
#navigating to operation dir
current_path=os.getcwd()
os.chdir(input_dir)
#rcode=os.system('cd ' + input_dir)
#analyzing directories and writing results in temp_check_op.txt
rcode=os.system('for f in *; do if [ -d $f ]; then echo $f; ' +
'echo find $f/persistency_run0/LibSVMClassifierNode/feature*.pickle ' +
'| wc -w; fi; done > temp_check_op.txt')
#transferring data to Python list
f=open('temp_check_op.txt')
oplist=[]
for line in f:
oplist.append(line)
f.close()
#probably deleting and navigating back
if delete_file:
rcode=os.system('rm temp_check_op.txt')
rcode=os.system('cd ' + current_path)
return oplist | 6217ad4bf384841bd4da2765d5278e3a69819d35 | 35,507 |
def _consume_rule(string):
""" Usage:
>>> _consume_rule('one {ONE} other {OTHER}')
<<< ('one', '{ONE} other {OTHER}')
>>> _consume_rule('other {OTHER}')
<<< ('other', '{OTHER}')
"""
def _get_rule_num(rule):
return {
'zero': 0, 'one': 1, 'two': 2,
'few': 3, 'many': 4, 'other': 5
}[rule]
left_bracket_pos = string.index('{')
rule = string[:left_bracket_pos].strip()
if rule[0] == "=":
rule = rule[1:]
rule = int(rule)
rule = {0: "zero",
1: "one", 2: "two", 3: "few", 4: "many", 5: "other"}[rule]
else:
if rule not in ('zero', 'one', 'two', 'few', 'many', 'other'):
raise ValueError()
return _get_rule_num(rule), string[left_bracket_pos:].strip() | 398e3f16884ac4f2fe567d9eaa0e14c0e8f7fa42 | 35,508 |
def bytes_leading(raw_bytes, needle=b'\x00'):
"""
Finds the number of prefixed byte occurrences in the haystack.
Useful when you want to deal with padding.
:param raw_bytes:
Raw bytes.
:param needle:
The byte to count. Default \x00.
:returns:
The number of leading needle bytes.
"""
leading = 0
# Indexing keeps compatibility between Python 2.x and Python 3.x
_byte = needle[0]
for x in raw_bytes:
if x == _byte:
leading += 1
else:
break
return leading | f57a4eef0bbf28df31c5a1f49d3e681f056403a9 | 35,512 |
def tags_from_context(context):
"""Helper to extract meta values from a Celery Context"""
tag_keys = (
'compression', 'correlation_id', 'countdown', 'delivery_info', 'eta',
'exchange', 'expires', 'hostname', 'id', 'priority', 'queue', 'reply_to',
'retries', 'routing_key', 'serializer', 'timelimit', 'origin', 'state',
)
tags = {}
for key in tag_keys:
value = context.get(key)
# Skip this key if it is not set
if value is None or value == '':
continue
# Skip `timelimit` if it is not set (it's default/unset value is a
# tuple or a list of `None` values
if key == 'timelimit' and value in [(None, None), [None, None]]:
continue
# Skip `retries` if it's value is `0`
if key == 'retries' and value == 0:
continue
# Celery 4.0 uses `origin` instead of `hostname`; this change preserves
# the same name for the tag despite Celery version
if key == 'origin':
key = 'hostname'
# prefix the tag as 'celery'
tag_name = 'celery.{}'.format(key)
tags[tag_name] = value
return tags | 55376b0711a919757dfb97a314cd6b1a4175c8ae | 35,513 |
import re
def check_email(email):
"""
Checks if an email is valid
:param password: password to check
:return: True if the password is valid, false otherwise
"""
return bool(re.fullmatch(r"[^@]+@[^@]+\.[^@]+", email)) | f176a34b4666964dc96a84f75ee0c3e5539e0f6a | 35,514 |
def jaccard(a, b):
"""Creates a ...
Args:
a (float):
b (float):
Returns:
``float``: ``jac``
"""
jac = 1 - (a * b) / (2 * abs(a) + 2 * abs(b) - a * b)
return jac | 26acf9b1d0e0bacb800db7ade109cba52f299066 | 35,515 |
from typing import Sequence
def partial_numerator(n: int, a0: int, block: Sequence[int]) -> int:
"""Computes the numerator of the partial quotient p_n/q_n for the continued
fraction expansion sqrt(D) = [a0; (block)].
Adapted from http://mathworld.wolfram.com/ContinuedFraction.html"""
# handle base cases
if n == 0:
return a0
if n == 1:
return a0 * (block[0]) + 1
# compute answer using recurrence relation
period = len(block)
part_num_1 = partial_numerator(n - 1, a0, block)
part_num_2 = partial_numerator(n - 2, a0, block)
return block[(n - 1) % period] * part_num_1 + part_num_2 | 1cc350f1ee769e2e7fc599e729c593e5d38c17d8 | 35,517 |
import glob
import os
def contains_results(path, min_num=1):
"""
determines whether a directory contains at least min_num results or not
"""
return len(glob.glob(os.path.join(path, '*-results.json'))) >= min_num | b2f30c94bab9de546a2f8c0d803b560bba32dbe5 | 35,518 |
def all_cnt_info():
"""
Returns a print of all constants needed
"""
all_cnt_info={}
all_cnt_info['area']='Catchment size in square km'
all_cnt_info['timestep']='Time step for the model relative to an hourly timestep'
return all_cnt_info | 6ec9b96cfcae9a7f6e564c865fffb84b5450dabe | 35,519 |
import sys
import getpass
def _ask_pass(prompt: str) -> str:
"""Pide un password por consola, ocultándolo si se puede"""
prompt = "%s: " % prompt
if sys.stdin.isatty():
return getpass.getpass(prompt)
return input(prompt) | cc6466fd09cafb2b8c84ec2faafdc34bf93c4306 | 35,521 |
def ir(x):
"""
Rounds floating point to thew nearest integer ans returns integer
:param x: {float} num to round
:return:
"""
return int(round(x)) | 3ef85ede1dd773e2b9f67138a9448e9b47fd9610 | 35,522 |
def get_pos_association_dict(volumestokeep, outfiles_partititon):
""" Converts 3d index to numeric index
"""
index = 0
_3d_to_numeric_pos_dict = dict()
for i in range(outfiles_partititon[0]):
for j in range(outfiles_partititon[1]):
for k in range(outfiles_partititon[2]):
_3d_to_numeric_pos_dict[(i,j,k)] = index
index += 1
return _3d_to_numeric_pos_dict | 092c9d33a59f10166443767558d65ad0901b594a | 35,523 |
import os
def get_sql() -> str:
""" get content of ../../create_tables.sql as string """
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, '../../create_tables.sql')
with open(filename) as f:
return f.read() | 9f2c5ac9c31a133c576daa15cb35c63d7d1ce77a | 35,525 |
from typing import List
def longest_consecutive_subsequence(arr: List[int]) -> int:
"""
1. Put the array in a set
2. For each array element i:
i. check if i-1 exists in set.
ii. if not, it is the starting point of a new subsequence.
iii. Count consecutive.
3. return max
O(n) & O(n)
"""
set_arr = set(arr)
max_count = 0
for el in arr:
cur_count = 0
if el - 1 not in set_arr:
while el in set_arr:
cur_count += 1
el += 1
max_count = max(max_count, cur_count)
return max_count | f0115c3298c805e7af09d7f3ae5fd17c333ab726 | 35,526 |
def get_longitude_positive_direction(Body, Naif_id, rotation):
"""Define the positive longitudes in ographic CRS based on the rotation sens.
The general rule is the following:
* Direct rotation has longitude positive to West
* Retrograde rotation has longitude positive to East
A special case is done for Sun/Earth/Moon for historical reasons for which longitudes
are positive to East independently of the rotation sens
Another special case is that longitude ographic is always to East for small bodies, comets, dwarf planets (Naif_id >= 900)
"""
if rotation == 'Direct':
direction = 'west'
elif rotation == 'Retrograde':
direction = 'east'
elif rotation == '':
direction = ''
else:
assert False, rotation
if Body in ('Sun', 'Moon', 'Earth'):
direction = 'east'
if Naif_id >= 900:
direction = 'east'
return direction | ff153ac2bd96f11e0341676b4fc6b348d4b256be | 35,528 |
def get_Cm_NO():
"""非居室の照明区画iに設置された照明設備の多灯分散照明方式による補正係数
Args:
Returns:
float: Cm_NO 非居室の照明区画iに設置された照明設備の多灯分散照明方式による補正係数
"""
return 1.0 | 1c3eaad477fe5d1ce88b8721ae1268d1bfa6aefa | 35,529 |
def setup_with_context_manager(testcase, cm):
"""
Use a contextmanager in a test setUp that persists until teardown.
So instead of:
with ctxmgr(a, b, c) as v:
# do something with v that only persists for the `with` statement
use:
def setUp(self):
self.v = setup_with_context_manager(self, ctxmgr(a, b, c))
def test_foo(self):
# do something with self.v
"""
val = cm.__enter__()
testcase.addCleanup(cm.__exit__, None, None, None)
return val | e1996c9650f02c89e8516ca9ae030f1a50576eda | 35,530 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.