content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def GetAutoCompList(self, command):
"""Apply SimpleCompleter results to base results from the
'smart' completer.
"""
baseList = self.BaseGetAutoCompList(command)
scompList = self.scomp.GetAutoCompList(command)
# Wipeout duplicates by creating a set, then sort data alphabetically
baseList.extend(scompList)
rlist = list(set(baseList))
rlist.sort()
return rlist | 2dd48f62b4fd121ecda238f553d54b13bdbb45d9 | 34,390 |
def uppercase(f):
"""
I am uppercase
"""
def f_wrapper():
"""
I am uppercase wrapper
"""
return f().upper()
return f_wrapper | 50ac66e3c1a1440b60466676af48c27a81ca8c5c | 34,391 |
def interpolation_search(sample_input, lowest, highest, item):
"""
function to search the item in a give list of item
:param sample_input: list of number
:param lowest: the lowest element on our list
:param highest: the highest element on our list
:param item: the item element to search in our list
:return: true if found else fals
"""
distance = item - sample_input[lowest]
value_range = sample_input[highest] - sample_input[lowest]
ratio = distance / value_range
found = False
estimation = int(lowest + ratio * (highest - lowest))
if sample_input[estimation] == item:
found = True
elif item < sample_input[estimation]:
highest = estimation
else:
lowest = estimation
while lowest <= highest and not found:
mid = (lowest + highest) // 2
if sample_input[mid] == item:
found = True
elif sample_input[mid] < item:
lowest = mid + 1
else:
highest = mid - 1
return found | 027cf3657a2540a87d2d7da2b122f4c5f47e9c54 | 34,392 |
def get_last_search_position(logger_file):
"""
Required for resuming the previous search operation
:param logger_file: Logger file name
:return: Last position id
"""
with open(logger_file, 'r+') as f:
lines = f.read().splitlines()
try:
last_pos = lines[-1].split(' - ')[1]
except IndexError:
last_pos = ''
return last_pos | a58de436fab3f3da32913c475e205cd3ed49de6e | 34,393 |
def check_domain_whitelist(string, whitelist):
""" Returns True if a white listed domain appears in the string, otherwise returns False.
:param string: A string possibly containing a domain name.
:param whitelist: A list of strings containing white listed domains.
:return: Bool
"""
for i in whitelist:
if i in string:
return True
return False | e92376a001fee8365ade4b829dd5daa7079063d8 | 34,394 |
def summarize_reduced_diffs(reduced_diffs):
"""
Print a human-readable summary of the relevant reduced diff data
"""
buf = ""
### General summary
if 'sum_data_units_read_gibs' not in reduced_diffs:
read_gibs = reduced_diffs.get('sum_data_units_read_bytes', 0) * 2.0**(-40)
write_gibs = reduced_diffs.get('sum_data_units_written_bytes', 0) * 2.0**(-40)
else:
read_gibs = reduced_diffs.get('sum_data_units_read_gibs', 0)
write_gibs = reduced_diffs.get('sum_data_units_written_gibs', 0)
buf += "Read: %10.2f TiB, %10.2f MOps\n" % (
read_gibs,
reduced_diffs.get('sum_host_read_commands', 0) / 1000000.0)
buf += "Written: %10.2f TiB, %10.2f MOps\n" % (
write_gibs,
reduced_diffs.get('sum_host_write_commands', 0) / 1000000.0)
buf += "WAF: %+10.4f\n" % reduced_diffs.get('max_write_amplification_factor', 0)
return buf | 8e3be22d11a3ae9f011edfa0fb28fdb859c7a980 | 34,395 |
import html
def unescape(str):
"""
反转译
"""
return html.unescape(str) | aa5e0a9e15b5757d7c762fae4e144ae347ce88c4 | 34,396 |
import torch
def copy_model_to_gpu(model, loss=None):
"""
Copies a model and (optional) loss to GPU and enables cudnn benchmarking.
For multiple gpus training, the model in DistributedDataParallel for
distributed training.
"""
if not torch.backends.cudnn.deterministic:
torch.backends.cudnn.benchmark = True
model = model.cuda()
if loss is not None:
loss = loss.cuda()
return model, loss
else:
return model | 4fa61832eacdf3ab055e931fd9f952406f4aeca4 | 34,397 |
def get_strings(filename):
"""
Read strings from files generated by an IDAPython script and store them in a list for further processing.
"""
list_strings= []
with open(filename,'rU') as f:
list_strings= [line[:-1] for line in f.readlines()]
return list_strings | 1f01bce01bd601e9bf25c8673fdcc97443384719 | 34,398 |
def _add_algorithm_defaults(algorithm):
"""Central location specifying defaults for algorithm inputs.
Converts allowed multiple inputs into lists if specified as a single item.
"""
defaults = {"archive": [],
"min_allele_fraction": 10.0,
"tools_off": []}
convert_to_list = set(["archive", "tools_off"])
for k, v in defaults.items():
if k not in algorithm:
algorithm[k] = v
for k, v in algorithm.items():
if k in convert_to_list:
if not isinstance(v, (list, tuple)):
algorithm[k] = [v]
return algorithm | 1c8b2b41e86fe8a484714916999e5f3b4de08ed4 | 34,403 |
from typing import List
from typing import Any
from typing import Dict
def locate_or_create_pip_section_target(deps: List[Any]) -> Dict[str, Any]:
"""
:param deps: the "dependencies" section of the Conda YAML data
:return: the (first) member of deps that is a dictionary; if there is none, one is appended.
"""
target_dep = None
for dep in deps:
if isinstance(dep, dict):
target_dep = dep
break
if target_dep is None: # pragma: no cover
target_dep = {}
deps.append(target_dep)
return target_dep | a30cdf9c665f22ed3891b47eff1a54993d5c245e | 34,404 |
import os
def path_to_yaml_helper(yaml_path):
"""
Replace current OS path separater with forward slashes.
"""
return yaml_path.replace(os.path.sep, '/') | c3afe8078f6ab3e1585da3d8a69e0ebdd644b5b4 | 34,405 |
from typing import Any
import hashlib
def hex(data: Any) -> str:
"""Get sha512."""
if isinstance(data, str):
data = data.encode("utf-8")
return hashlib.sha512(data).hexdigest() | aad95dbcf69245d41b23115fd022f99340de060d | 34,406 |
import click
def extract_coord(reg_str):
"""Given a SAM-compatible genome coordinate, extract the values."""
reg_str = reg_str.replace(",", "")
try:
contig, reg_str = reg_str.rsplit(":", 1)
except ValueError:
# No start and end specified
return reg_str, None, None
try:
start, end = reg_str.rsplit("-", 1)
except ValueError:
# Only start specified
return contig, int(reg_str) - 1, None
# Start and end specified.
# Convert coord to zero-based, half open.
start, end = int(start) - 1, int(end)
if start < 0:
raise click.BadParameter("Start position must be at least 1.")
if start > end:
raise click.BadParameter(
"Invalid interval: {0} - {1}.".format(start + 1, end))
return contig, start, end | 158273aed268769ba15f40259465d8ef78e8d149 | 34,408 |
import os
def get_zip_info_from_offset(zip_file, offset):
"""Get the ZipInfo object from a zip file.
Returns: A ZipInfo object found at the 'offset' into the zip file.
Returns None if no file can be found at the given 'offset'.
"""
file_size = os.stat(zip_file.filename).st_size
if offset >= file_size:
return None
infos = zip_file.infolist()
if not infos or offset < infos[0].header_offset:
return None
for i in range(1, len(infos)):
prev_info = infos[i - 1]
cur_offset = infos[i].header_offset
if offset >= prev_info.header_offset and offset < cur_offset:
zip_info = prev_info
return zip_info
zip_info = infos[len(infos) - 1]
if offset < zip_info.header_offset:
return None
return zip_info | 9b82afa8888a06927d0e7edd06fb0d87a88fc084 | 34,409 |
import difflib
def compare_configs(cfg1, cfg2):
"""
This function, using the unified diff function, will compare two config files and identify the changes.
'+' or '-' will be prepended in front of the lines with changes
:param cfg1: old configuration file path and filename
:param cfg2: new configuration file path and filename
:return: text with the configuration lines that changed. The return will include the configuration for the sections
that include the changes
"""
# open the old and new configuration files
f1 = open(cfg1, 'r')
old_cfg = f1.readlines()
f1.close()
f2 = open(cfg2, 'r')
new_cfg = f2.readlines()
f2.close()
# compare the two specified config files {cfg1} and {cfg2}
d = difflib.unified_diff(old_cfg, new_cfg, n=9)
# create a diff_list that will include all the lines that changed
# create a diff_output string that will collect the generator output from the unified_diff function
diff_list = []
diff_output = ''
for line in d:
diff_output += line
if line.find('Current configuration') == -1:
if line.find('Last configuration change') == -1:
if (line.find('+++') == -1) and (line.find('---') == -1):
if (line.find('-!') == -1) and (line.find('+!') == -1):
if line.startswith('+'):
diff_list.append('\n' + line)
elif line.startswith('-'):
diff_list.append('\n' + line)
# process the diff_output to select only the sections between '!' characters for the sections that changed,
# replace the empty '+' or '-' lines with space
diff_output = diff_output.replace('+!', '!')
diff_output = diff_output.replace('-!', '!')
diff_output_list = diff_output.split('!')
all_changes = []
for changes in diff_list:
for config_changes in diff_output_list:
if changes in config_changes:
if config_changes not in all_changes:
all_changes.append(config_changes)
# create a config_text string with all the sections that include changes
config_text = ''
for items in all_changes:
config_text += items
return config_text | e0ad56de3f601a4f04347036106eb4de112afa83 | 34,410 |
def started_puller(system, puller, puller_start_message):
"""
fixture that create and start a PullerActor and actor before launching the test and stop it after the test end
"""
system.ask(puller, puller_start_message)
return puller | 2e8673d9e6d88c347a8b6b9ce5230cbf4efa9927 | 34,412 |
def _limit_description_to_key(description):
"""Translate between the description of the Text widget and the corresonding
key and value pos in the fitarg dictionary.
Parameters
----------
description : str
The string describing the widget
Returns
--------
key : string
The key in the fitarg dictionary
attr: (0, 1)
The entry position in the value of the fitarg dictionary
"""
# Splits parameter name and _min or _max off
# attr is either min or max
key, attr = description.split('_')
# Add limit so its is the correct key for self.fitarg
key = 'limit_' + key
if attr == 'min':
attr = 0
elif attr == 'max':
attr = 1
else:
raise NotImplementedError(
"Uuups there is something wrong." \
"attr was %s but min/max was expected" % attr
)
return key, attr | 2fe559dbce1c61ac0c298e3132ca188b52a4b9e2 | 34,413 |
import warnings
import collections
import re
def read_quants_gianninas(fobj):
"""Read and parse custom file format of physical stellar parameters from
Gianninas et al 2014, [1]_.
Parameters
----------
fobj : file object
An opened file object to the text file with parameters.
Example file format:
line 0: 'Name SpT Teff errT log g errg '...
line 1: '========== ===== ======= ====== ===== ====='...
line 2: 'J1600+2721 DA6.0 8353. 126. 5.244 0.118'...
Returns
-------
dobj : collections.OrderedDict
Ordered dictionary with parameter field names as keys and
parameter field quantities as values.
Examples
--------
>>> with open('path/to/file.txt', 'rb') as fobj:
... dobj = read_quants_gianninas(fobj)
References
----------
.. [1] http://adsabs.harvard.edu/abs/2014ApJ...794...35G
"""
# Read in lines of file and use second line (line number 1, 0-indexed)
# to parse fields. Convert string values to floats. Split specific values
# that have mixed types (e.g. '1.000 Gyr').
lines = []
for line in fobj:
lines.append(line.strip())
if len(lines) != 3:
warnings.warn(
("\n" +
"File has {num_lines}. File is expected to only have 3 lines.\n" +
"Example file format:\n" +
"line 0: 'Name SpT Teff errT log g'...\n" +
"line 1: '========== ===== ======= ====== ====='...\n" +
"line 2: 'J1600+2721 DA6.0 8353. 126. 5.244'...").format(
num_lines=len(lines)))
dobj = collections.OrderedDict()
for mobj in re.finditer('=+', lines[1]):
key = lines[0][slice(*mobj.span())].strip()
value = lines[2][slice(*mobj.span())].strip()
try:
value = float(value)
except ValueError:
try:
value = float(value.rstrip('Gyr'))
except ValueError:
pass
if key == 'og L/L':
key = 'log L/Lo'
dobj[key] = value
return dobj | 0b0750686cb127c81d80566a7d6e40d48570fd72 | 34,414 |
def addtable(table, table_name):
"""
Adds a table and assigns a name.
"""
table_name=table.copy(deep=True) # we remove external reference
table_name['fromdepth'] = table_name['fromdepth'].astype(float)
table_name['todepth'] = table_name['todepth'].astype(float)
table_name['collarid'] = table_name['collarid'].astype(str)
table_name.sort_values(by=['collarid', 'fromdepth'], inplace=True)
table_name.reset_index(level=None, drop=True, inplace=True, col_level=0, col_fill='')
return table_name | de5ce38d546d0688d0f4f60e71618a0fa9bb4639 | 34,415 |
def getManifestSchemaVersion(manifest):
""" returns manifest schema version for manifest"""
return manifest["manifest"]["schemaVersion"] | 45e028fbc7e08572d24495d8a69b48c150a0722a | 34,417 |
import math
def getCosineSetSim(concepts_1: set, concepts_2: set):
""" Returns Cosine Set Similarity for the given concept sets """
intersection = len(concepts_1.intersection(concepts_2))
return intersection/(math.sqrt(len(concepts_2)*len(concepts_1))) | f6c03fa83d55c19f6d5953ef84a4d9233262386d | 34,420 |
import functools
def dec_busy(func):
"""
Decorator to set the amp/lcd controller state to busy while executing function.
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self.busy = True
output = func(self, *args, **kwargs)
self.busy = False
return output
return wrapper | 730a7cbe34f8323098aca441142c1c3c5fd1788c | 34,421 |
def is_run(part):
"""
>>> is_run([4])
False
>>> is_run([1, 2, 3])
True
>>> is_run([3, 2, 1])
False
"""
if len(part) != 3:
return False
return part[0] + 2 == part[1] + 1 == part[2] | a7719c1a02a8a8a573e2bc69052711ec551b100f | 34,422 |
def form_tree_definition(proj, formname):
"""Turn the form (instrument) definition in the project xml file into
a python dictionary tree.
In the XML structure items are defined in different places and the are
various places where 'ref' items have to be looked up to find 'def'.
Turn it into a nice neat:
FormDef
ItemGroup(s)
Item(s)
"""
"""
Form Def
<FormDef OID="Form.ob_template" Name="Ob Template" Repeating="No" redcap:FormName="ob_template">
<ItemGroupRef ItemGroupOID="ob_template.timestamp_template" Mandatory="No"/>
...
</FormDef>
<ItemGroupDef OID="ob_template.timestamp_template" Name="Ob Template" Repeating="No">
<ItemRef ItemOID="timestamp_template" Mandatory="No" redcap:Variable="timestamp_template"/>
<ItemRef ItemOID="title_measurements_template" Mandatory="No" redcap:Variable="title_measurements_template"/>
...
</ItemGroupDef>
<ItemGroupDef OID="ob_template.supqn_tired_template" Name="Confirm and Submit" Repeating="No">
...
</ItemGroupDef>
...
"""
tree = {}
xpath = r"./Study/MetaDataVersion/FormDef/[@OID='%s']" % formname
root = proj.find(xpath, proj.nsmap)
tree = {**tree, **root.attrib}
# tree['ItemGroupRef'] = []
# tree['ItemGroupDef'] = []
tree['ItemGroup'] = []
for igr in root.getchildren():
igr_oid = igr.attrib['ItemGroupOID']
igd_xpath = r"./Study/MetaDataVersion/ItemGroupDef/[@OID='%s']" % igr_oid
igd = proj.find(igd_xpath, proj.nsmap)
itemgroup = {**igr.attrib, **igd.attrib}
itemgroup['Item'] = []
for itemref in igd.getchildren():
item_oid = itemref.attrib['ItemOID']
itmd_xpath = r"./Study/MetaDataVersion/ItemDef/[@OID='%s']" % item_oid
itm = proj.find(itmd_xpath, proj.nsmap)
item = {
**itm.attrib
}
itemgroup['Item'].append(item)
tree['ItemGroup'].append(itemgroup)
return tree | d1a2eb638eed9443af11ec933942f2179e20c79b | 34,423 |
def parse_one_line(line):
""" Get one line of a file in this format
16:17:266:2864 3:4:194:2443
and return a couple of lists
[16,17,266,2864], [3,4,194,2443]
"""
line_trial, line_success = line.split(' ')
one_trial = [int(s) for s in line_trial.split(':')]
one_success = [int(s) for s in line_success.split(':')]
return one_trial, one_success | 8397e9fa742f42af5348d0bab0d459e98991b95f | 34,426 |
def get_sanitized_git_link(git_repository_slug: str) -> str:
"""
>>> assert get_sanitized_git_link(git_repository_slug='pypa/pip') == 'git+https://github.com/pypa/pip.git'
"""
sanitized_git_link = 'git+https://github.com/{git_repository_slug}.git'.format(git_repository_slug=git_repository_slug)
return sanitized_git_link | 0fc8647cd2b74ef6720b2ece7bddae851d73c467 | 34,427 |
import traceback
def stop_transfer_if_fail(f):
"""stop transfer if unexpected exception occurs"""
def wrapper(transfer, *args, **kwargs):
try:
return f(transfer, *args, **kwargs)
except Exception as e:
transfer.stop(warning='%s closed because of %s' %
(transfer.display_name, str(e)))
traceback.print_exc()
return wrapper | cc013b8ff44447c555e7d5becefe9067a15ec3a2 | 34,428 |
def get_employee_sick_leave_days(employee, month):
"""Calls the sick leave days for the given employee and month."""
return month.get_employee_sick_leave_days(employee) | 171a17828c834718e79267bd1524194c66904bf7 | 34,429 |
def unsupLossBSchedule(iteration):
"""
Schedule for weighting loss between forward and backward loss
"""
if iteration > 400000:
return 0.5
elif iteration > 300000:
return 0.5
elif iteration > 200000:
return 0.5
elif iteration > 190000:
return 0.5
elif iteration > 180000:
return 0.4
elif iteration > 170000:
return 0.3
elif iteration > 160000:
return 0.2
elif iteration > 150000:
return 0.1
else:
return 0.0 | 44bf2ec144af58fd6d29187d326b2e44a03a49d8 | 34,430 |
def sufficiency(tankstellen, scenario, capacity):
"""Calculate ratio if charging point is sufficient for e cars that want to charge.
Args:
tankstellen (gdf): Df containing all charging points with respective load (Belastung)
scenario (string): Name of scenario column to calculate sufficiency
capacity (string): Name of capacity column to calculate sufficiency
Returns:
list_sufficiency (list): List containing sufficiency per charging point based on capacity and load
"""
list_sufficiency = []
for j in range(len(tankstellen)):
list_sufficiency.append(
tankstellen[scenario][j]/tankstellen[capacity][j])
return list_sufficiency | 4c9895f3250ba54aaaa3f9f1b4ca9d426f4c26cc | 34,431 |
import requests
def get_tnstats(url, cookie):
"""
Retrieve Tunneled Node's agregated statistics
:param url: base url
:param cookie: Cookie value
:return: TN Statisctics JSON
:Example:
result = get_tnstats(base_url, sessionid)
"""
header = {'cookie': cookie}
get_tn_stats = requests.get(url + "tunneled_node_server/ports/aggregate_statistics", headers=header, verify=False,
timeout=2)
return get_tn_stats.json() | 2b182029d5e7fabebcb8df5fdd5ce42bf83ae1cd | 34,432 |
def check_sample_id(value: str, length: int = 15) -> bool:
"""
Light validation of BMH Sample ID
:param value: sample_id
:param length: expected length of string
"""
if len(value) != length:
return False
components = value.split("-")
if len(components) != 3:
return False
elif components[0] != "BMH":
return False
elif not components[1].isdigit() or len(components[1]) != 4:
return False
elif not components[2].isdigit() or len(components[2]) != 6:
return False
else:
return True | e7d9da2f86f41503db09dfe74b4f181df8a1d579 | 34,433 |
def getLimFromPoints(pointsLists):
"""
Bestimmt die größten- und kleinsten X- bzw. Y-Werte aller Punkte
Parameter: Eine Liste von Listen von Punkten. [[(x:flaot, y:flaot), ...], ...]
Rückgabewert: ((xmin:flaot, xmax:flaot), (ymin:flaot, ymax:flaot))
"""
xmin = xmax = pointsLists[0][0][0]
ymin = ymax = pointsLists[0][0][1]
for list in pointsLists:
for point in list:
xmin = min(xmin, point[0])
ymin = min(ymin, point[1])
xmax = max(xmax, point[0])
ymax = max(ymax, point[1])
return ((xmin, xmax), (ymin, ymax)) | 161a5623bb1f75583d9bb38fca7c54034d0012af | 34,434 |
def section_string_to_dict(sectioned_report):
"""Parses a string containing the sectioned report into a dictionary containing each section and its content
INPUT: String containing a sectioned report
OUTPUT: Dictionary containing each section with its content"""
sectioned_report_to_dict = {}
labels = ['###FINDINGS', '###COMPARISON', '###INDICATION', '###IMPRESSION', 'XXXX',
'FINDINGS', 'COMPARISON', 'INDICATION', 'IMPRESSION']
labels += [e.lower() for e in labels]
labels += [e.capitalize() for e in labels]
cur_key = ''
cur_section = ''
for line in sectioned_report:
line = line.rstrip().lstrip()
if line in labels:
if cur_section != '':
if cur_key.startswith("#"):
sectioned_report_to_dict[cur_key[3:]] = cur_section
else:
sectioned_report_to_dict[cur_key] = cur_section
cur_key = line
cur_section = ''
else:
cur_key = line
else:
if cur_key and line != '\r': cur_section += line
if cur_key.startswith("#"):
sectioned_report_to_dict[cur_key[3:]] = cur_section
else:
sectioned_report_to_dict[cur_key] = cur_section
return sectioned_report_to_dict | 96be7d9a90d68b75965fc33deaf8c904ccc36eb4 | 34,437 |
import re
def normalize(text: str) -> str:
"""
Replace all whitespaces in the text with a single space.
For example " foo bar " is converted to "foo bar".
"""
return re.sub(r"\s+", " ", text).strip() | b8015d6004b57ff13c9d78acac4ff479ea327a11 | 34,438 |
import os
def get_dir(filename: str, mode: str = 'dir') -> str:
"""Get file dir's name and dir's basename.
If file located at /path/to/dir/file, then the dirname is "/path/to/dir", and basename is "dir"
Args:
filename: str. Local filename, normally it's __file__.
mode: str. `file` return the file path, `dir` return the dir path, `basename` return dir basename.
Returns:
str: file path or dir path or dir basename based on mode.
"""
file_path = os.path.abspath(filename)
dir_path = os.path.dirname(file_path)
dir_basename = os.path.basename(dir_path)
if mode == 'file':
return os.path.abspath(filename)
elif mode == 'dir':
return dir_path
elif mode == 'basename':
return dir_basename
else:
return dir_path | a5d91ac39bbbac32bc0608166e08116b641d4301 | 34,439 |
def qgis_vector_dissolve(
processing, context, INPUT, FIELD, OUTPUT, USING_GDAL_FUNCTION=False
):
"""qgis dissolve input vector based on values in FIELD list
----------
Notes
-------
Returns:
-------
None,
"""
if USING_GDAL_FUNCTION:
out = processing.run(
"gdal:dissolve",
{"INPUT": INPUT, "FIELD": FIELD, "OUTPUT": OUTPUT},
context=context,
)
else:
out = processing.run(
"native:dissolve",
{"INPUT": INPUT, "FIELD": FIELD, "OUTPUT": OUTPUT},
context=context,
)
return out | c8d262902d1631bae2f322c5e7070b72f2b4a03c | 34,440 |
from typing import List
from typing import Union
def numeros_al_final_comprension(lista: List[Union[float, str]]) -> List[Union[float, str]]:
"""Re-escribir utilizando comprensión de listas."""
letras = [elem for elem in lista if type(elem) == str]
numeros = [elem for elem in lista if type(elem) in [int, float]]
return letras + numeros | 5b7c08f614099590e927eeed3d3f4339b244813b | 34,441 |
import time
import select
import errno
def NoIntrPoll(pollfun, timeout=-1):
"""
This wrapper is used to handle the interrupt exceptions that might
occur during a poll system call. The wrapped function must be defined
as poll([timeout]) where the special timeout value 0 is used to return
immediately and -1 is used to wait indefinitely.
"""
# When the timeout < 0 we shouldn't compute a new timeout after an
# interruption.
endtime = None if timeout < 0 else time.time() + timeout
while True:
try:
return pollfun(timeout)
except (IOError, select.error) as e:
if e.args[0] != errno.EINTR:
raise
if endtime is not None:
timeout = max(0, endtime - time.time()) | aa6abde06644560f0d0bc1983c226b7342b253fe | 34,442 |
def egfr_38():
"""Create test fixture for EGFR on 38"""
return {
"variant": "7-55142262-G-A",
"assembly": "GRCh38",
"total_observations": {
"allele_count": 7,
"allele_number": 152192,
"decimal": "0.000045995"
},
"max_pop_freq": {
"population": "Latino/Admixed American",
"allele_count": 3,
"allele_number": 15278,
"decimal": "0.000196361"
},
"gnomad_url": "https://gnomad.broadinstitute.org/variant/7-55142262-G-A?dataset=gnomad_r3" # noqa: E501
} | 93d0275e2b8e6794e8a09ec8720fb1e23824ade4 | 34,443 |
import os
def create_dirs_path_object(path):
"""
Check and create path
"""
dir_exist = os.path.exists(path)
if not dir_exist:
os.makedirs(path)
return True
else:
return False | 5d5609e1f38c46064a1217b68142633f9c83b84b | 34,445 |
def check_digit13(firsttwelvedigits):
"""Check sum ISBN-13."""
# minimum checks
if len(firsttwelvedigits) != 12:
return None
try:
int(firsttwelvedigits)
except Exception: # pragma: no cover
return None
# checksum
val = sum(
(i % 2 * 2 + 1) * int(x) for i, x in enumerate(firsttwelvedigits))
thirteenthdigit = 10 - int(val % 10)
if thirteenthdigit == 10:
thirteenthdigit = '0'
return str(thirteenthdigit) | 434cb3e71ecba49a0e5a13c73a6808c8c5e80446 | 34,446 |
def get_verbose_name( object ):
"""
Returns the verbose name for a model.
"""
return object._meta.verbose_name | 5af7289fed0f7b7fec0e1140a80b56291c9764a3 | 34,447 |
def param_for_gem():
"""
Identify the parameters that will generate such "gem found".
Once you find these three numbers (number, num1 and num2),
return them in a list.
>>> output = param_for_gem()
>>> len(output) == 3
True
>>> all([isinstance(num, int) for num in output])
True
"""
return [2, 1, 1] | 6573e9dc7b524530524a3a94712da9114ab3a924 | 34,448 |
import torch
def batch_colout(X: torch.Tensor, p_row: float, p_col: float) -> torch.Tensor:
"""Applies ColOut augmentation to a batch of images, dropping the same
random rows and columns from all images in a batch.
Args:
X: Batch of images of shape (N, C, H, W).
p_row: Fraction of rows to drop (drop along H).
p_col: Fraction of columns to drop (drop along W).
Returns:
torch.Tensor: Input batch tensor with randomly dropped columns and rows.
"""
# Get the dimensions of the image
row_size = X.shape[2]
col_size = X.shape[3]
# Determine how many rows and columns to keep
kept_row_size = int((1 - p_row) * row_size)
kept_col_size = int((1 - p_col) * col_size)
# Randomly choose indices to keep. Must be sorted for slicing
kept_row_idx = sorted(torch.randperm(row_size)[:kept_row_size].numpy())
kept_col_idx = sorted(torch.randperm(col_size)[:kept_col_size].numpy())
# Keep only the selected row and columns
X_colout = X[:, :, kept_row_idx, :]
X_colout = X_colout[:, :, :, kept_col_idx]
return X_colout | 1ff4311127749afc9eb56797b76d18c706ebf378 | 34,450 |
def filter_endswith(word, ending=None):
"""Filter words ending with a specified suffix.
:param word (str): A word.
:param ending (str, optional): The optional ending to check.
:rtype bool: The resulting check.
"""
return word.lower().endswith(ending) | cc189ee272a2662d061ce1eb5a79103f03842eed | 34,452 |
def paginate(query, page, count):
"""
Returns the items given the count and page specified
:param query:
:param page:
:param count:
"""
return query.paginate(page, count) | ce6bc412de90b9333c8febbf75a35b2abfa3e0ae | 34,453 |
def _get_s3_presigned_put_url(s3_client, bucket, filepath, md5sum, lifetime_sec):
"""
Creates a pre-signed URL for S3-like backends, e.g. Minio.
Note that since our production object storage backend is GCS, we do not enforce or require
any Content-MD5 value.
:param: s3_client: an initialized S3 client. We will use this to create the presigned PUT url.
:param: bucket: the bucket where the user can PUT their object.
:param: filepath: the file path inside the bucket that the user can PUT their object.
:param: md5sum: the base64-encoded MD5sum of the object the user is planning to PUT.
This is ignored for this function and added solely to maintain API compatibility with other
private presigned URL functions.
:param: lifetime_sec: how long before the presigned URL expires, in seconds.
"""
# S3's PUT Object parameters:
# https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
method = "put_object"
fields = {
"Bucket": bucket,
"Key": filepath,
}
response = s3_client.generate_presigned_url(
ClientMethod=method,
Params=fields,
ExpiresIn=lifetime_sec,
)
return response | d020281a0ce4d71eb3492d5f8a7a693b35d03c32 | 34,454 |
import os
def _json_add_reader(json, filename):
""" Add LAS Reader Element and return """
json['pipeline'].insert(0, {
'type': 'readers.las',
'filename': os.path.abspath(filename)
})
return json | adb4fdbadbcef88b49f6cb93665df0fa99318138 | 34,455 |
import os
def normalised_path(input_path:str) -> str:
"""returns a normalised "real"/"full" filepath for a given directory then checks its a valid directory"""
normalised_path:str = os.path.realpath(os.path.expanduser(input_path)) + "\\"
if os.path.isdir(normalised_path):
return normalised_path
else:
raise NotADirectoryError("Not a Valid Directory") | 917d093c7b1f181871eab43006ec1ca5cb97e8eb | 34,457 |
def add(x, y):
""" Adds to num and returns an output"""
return x + y | bf1edf11bbd831594383dcc942fd75fd5bc0e398 | 34,458 |
def __find_lags_in_frame(data, target_name, max_depth):
"""
Finds lags in Dataframe
:return: Data about found lags
:param max_depth: Max possible lag depth
:param data: Dataframe
:param target_name: Target variable
"""
# для всех переменных осуществляется подбор
result = list()
for column in [c for c in data.columns if not c == target_name]:
lag = list(range(0, max_depth + 1))
result.append([column, lag])
return result | c662adbdefbbf6dddf4de6f10d17495100186359 | 34,459 |
def get_change_sign(word):
"""
# sign['a'] = # a à á â ã ạ ả ấ ầ ậ ắ ặ
# sign['e'] = # è é ê ẹ ẻ ẽ ế ề ể ễ ệ
# sign['i'] = # ì í ỉ ị
# sign['o'] = # ò ó ô õ ọ ỏ ố ồ ổ ộ ớ ờ ỡ ợ
# sign['u'] = # ù ú ụ ủ ứ ừ ữ ự
# sign['y'] = # ý ỳ ỵ ỷ
"""
sign_ = {}
sign_['a'] = ['à', 'á', 'â', 'ã', 'ạ',
'ả', 'ấ', 'ầ', 'ậ', 'ắ', 'ặ', 'ằ', 'ắ', 'ă', 'ẫ', 'ẵ', 'ẳ', 'ẩ']
sign_['ẫ'] = ['à', 'á', 'â', 'ã', 'ạ',
'ả', 'ấ', 'ầ', 'ậ', 'ắ', 'ặ', 'ằ', 'ắ', 'ă', 'a', 'ẵ', 'ẳ', 'ẩ']
sign_['ă'] = ['à', 'á', 'â', 'ã', 'ạ',
'ả', 'ấ', 'ầ', 'ậ', 'ắ', 'ặ', 'ằ', 'ắ', 'a', 'ẫ', 'ẵ', 'ẳ', 'ẩ']
sign_['à'] = ['a', 'á', 'â', 'ã', 'ạ',
'ả', 'ấ', 'ầ', 'ậ', 'ắ', 'ặ', 'ằ', 'ắ', 'ă', 'ẫ', 'ẵ', 'ẳ', 'ẩ']
sign_['á'] = ['à', 'a', 'â', 'ã', 'ạ',
'ả', 'ấ', 'ầ', 'ậ', 'ắ', 'ặ', 'ằ', 'ắ', 'ă', 'ẫ', 'ẵ', 'ẳ', 'ẩ']
sign_['â'] = ['à', 'á', 'a', 'ã', 'ạ',
'ả', 'ấ', 'ầ', 'ậ', 'ắ', 'ặ', 'ằ', 'ắ', 'ă', 'ẫ', 'ẵ', 'ẳ', 'ẩ']
sign_['ã'] = ['à', 'á', 'â', 'a', 'ạ',
'ả', 'ấ', 'ầ', 'ậ', 'ắ', 'ặ', 'ằ', 'ắ', 'ă', 'ẫ', 'ẵ', 'ẳ', 'ẩ']
sign_['ạ'] = ['à', 'á', 'â', 'ã', 'a',
'ả', 'ấ', 'ầ', 'ậ', 'ắ', 'ặ', 'ằ', 'ắ', 'ă', 'ẫ', 'ẵ', 'ẳ', 'ẩ']
sign_['ả'] = ['à', 'á', 'â', 'ã', 'ạ',
'a', 'ấ', 'ầ', 'ậ', 'ắ', 'ặ', 'ằ', 'ắ', 'ă', 'ẫ', 'ẵ', 'ẳ', 'ẩ']
sign_['ấ'] = ['à', 'á', 'â', 'ã', 'ạ',
'ả', 'a', 'ầ', 'ậ', 'ắ', 'ặ', 'ằ', 'ắ', 'ă', 'ẫ', 'ẵ', 'ẳ', 'ẩ']
sign_['ầ'] = ['à', 'á', 'â', 'ã', 'ạ',
'ả', 'ấ', 'a', 'ậ', 'ắ', 'ặ', 'ằ', 'ắ', 'ă', 'ẫ', 'ẵ', 'ẳ', 'ẩ']
sign_['ậ'] = ['à', 'á', 'â', 'ã', 'ạ',
'ả', 'ấ', 'ầ', 'a', 'ắ', 'ặ', 'ằ', 'ắ', 'ă', 'ẫ', 'ẵ', 'ẳ', 'ẩ']
sign_['ắ'] = ['à', 'á', 'â', 'ã', 'ạ',
'ả', 'ấ', 'ầ', 'ậ', 'a', 'ặ', 'ằ', 'ắ', 'ă', 'ẫ', 'ẵ', 'ẳ', 'ẩ']
sign_['ặ'] = ['à', 'á', 'â', 'ã', 'ạ',
'ả', 'ấ', 'ầ', 'ậ', 'a', 'a', 'ằ', 'ắ', 'ă', 'ẫ', 'ẵ', 'ẳ', 'ẩ']
sign_['ằ'] = ['à', 'á', 'â', 'ã', 'ạ',
'ả', 'ấ', 'ầ', 'ậ', 'ắ', 'ặ', 'a', 'ắ', 'ă', 'ẫ', 'ẵ', 'ẳ', 'ẩ']
sign_['ắ'] = ['à', 'á', 'â', 'ã', 'ạ',
'ả', 'ấ', 'ầ', 'ậ', 'ắ', 'ặ', 'ằ', 'a', 'ă', 'ẫ', 'ẵ', 'ẳ', 'ẩ']
sign_['ẵ'] = ['à', 'á', 'â', 'ã', 'ạ',
'ả', 'ấ', 'ầ', 'ậ', 'ắ', 'ặ', 'ằ', 'ắ', 'ă', 'ẫ', 'a', 'ẳ', 'ẩ']
sign_['ẳ'] = ['à', 'á', 'â', 'ã', 'ạ',
'ả', 'ấ', 'ầ', 'ậ', 'ắ', 'ặ', 'ằ', 'ắ', 'ă', 'ẫ', 'ẵ', 'a', 'ẩ']
sign_['ẩ'] = ['à', 'á', 'â', 'ã', 'ạ',
'ả', 'ấ', 'ầ', 'ậ', 'ắ', 'ặ', 'ằ', 'ắ', 'ă', 'ẫ', 'ẵ', 'ẳ', 'a']
sign_['e'] = ['è', 'é', 'ê', 'ẹ', 'ẻ', 'ẽ', 'ế', 'ề', 'ể', 'ễ', 'ệ']
sign_['è'] = ['e', 'é', 'ê', 'ẹ', 'ẻ', 'ẽ', 'ế', 'ề', 'ể', 'ễ', 'ệ']
sign_['é'] = ['è', 'e', 'ê', 'ẹ', 'ẻ', 'ẽ', 'ế', 'ề', 'ể', 'ễ', 'ệ']
sign_['ê'] = ['è', 'é', 'e', 'ẹ', 'ẻ', 'ẽ', 'ế', 'ề', 'ể', 'ễ', 'ệ']
sign_['ẹ'] = ['è', 'é', 'ê', 'e', 'ẻ', 'ẽ', 'ế', 'ề', 'ể', 'ễ', 'ệ']
sign_['ẻ'] = ['è', 'é', 'ê', 'ẹ', 'e', 'ẽ', 'ế', 'ề', 'ể', 'ễ', 'ệ']
sign_['ẽ'] = ['è', 'é', 'ê', 'ẹ', 'ẻ', 'e', 'ế', 'ề', 'ể', 'ễ', 'ệ']
sign_['ế'] = ['è', 'é', 'ê', 'ẹ', 'ẻ', 'ẽ', 'e', 'ề', 'ể', 'ễ', 'ệ']
sign_['ề'] = ['è', 'é', 'ê', 'ẹ', 'ẻ', 'ẽ', 'ế', 'e', 'ể', 'ễ', 'ệ']
sign_['ể'] = ['è', 'é', 'ê', 'ẹ', 'ẻ', 'ẽ', 'ế', 'ề', 'e', 'ễ', 'ệ']
sign_['ễ'] = ['è', 'é', 'ê', 'ẹ', 'ẻ', 'ẽ', 'ế', 'ề', 'ể', 'e', 'ệ']
sign_['ệ'] = ['è', 'é', 'ê', 'ẹ', 'ẻ', 'ẽ', 'ế', 'ề', 'ể', 'ễ', 'e']
sign_['i'] = ['ì', 'í', 'ỉ', 'ị', 'ĩ']
sign_['ì'] = ['i', 'í', 'ỉ', 'ị', 'ĩ']
sign_['í'] = ['ì', 'i', 'ỉ', 'ị', 'ĩ']
sign_['ỉ'] = ['ì', 'í', 'i', 'ị', 'ĩ']
sign_['ị'] = ['ì', 'í', 'ỉ', 'i', 'ĩ']
sign_['ĩ'] = ['ì', 'í', 'ỉ', 'ị', 'i']
sign_['o'] = ['ò', 'ó', 'ô', 'õ', 'ọ', 'ỏ',
'ố', 'ồ', 'ổ', 'ộ', 'ớ', 'ờ', 'ỡ', 'ợ', 'ở', 'ơ', 'ỗ']
sign_['ỗ'] = ['ò', 'ó', 'ô', 'õ', 'ọ', 'ỏ',
'ố', 'ồ', 'ổ', 'ộ', 'ớ', 'ờ', 'ỡ', 'ợ', 'ở', 'ơ', 'o']
sign_['ò'] = ['o', 'ó', 'ô', 'õ', 'ọ', 'ỏ',
'ố', 'ồ', 'ổ', 'ộ', 'ớ', 'ờ', 'ỡ', 'ợ', 'ở', 'ơ', 'ỗ']
sign_['ó'] = ['ò', 'o', 'ô', 'õ', 'ọ', 'ỏ',
'ố', 'ồ', 'ổ', 'ộ', 'ớ', 'ờ', 'ỡ', 'ợ', 'ở', 'ơ', 'ỗ']
sign_['ô'] = ['ò', 'ó', 'o', 'õ', 'ọ', 'ỏ',
'ố', 'ồ', 'ổ', 'ộ', 'ớ', 'ờ', 'ỡ', 'ợ', 'ở', 'ơ', 'ỗ']
sign_['õ'] = ['ò', 'ó', 'ô', 'o', 'ọ', 'ỏ',
'ố', 'ồ', 'ổ', 'ộ', 'ớ', 'ờ', 'ỡ', 'ợ', 'ở', 'ơ', 'ỗ']
sign_['ọ'] = ['ò', 'ó', 'ô', 'õ', 'o', 'ỏ',
'ố', 'ồ', 'ổ', 'ộ', 'ớ', 'ờ', 'ỡ', 'ợ', 'ở', 'ơ', 'ỗ']
sign_['ỏ'] = ['ò', 'ó', 'ô', 'õ', 'ọ', 'o',
'ố', 'ồ', 'ổ', 'ộ', 'ớ', 'ờ', 'ỡ', 'ợ', 'ở', 'ơ', 'ỗ']
sign_['ố'] = ['ò', 'ó', 'ô', 'õ', 'ọ', 'ỏ',
'o', 'ồ', 'ổ', 'ộ', 'ớ', 'ờ', 'ỡ', 'ợ', 'ở', 'ơ', 'ỗ']
sign_['ồ'] = ['ò', 'ó', 'ô', 'õ', 'ọ', 'ỏ',
'ố', 'o', 'ổ', 'ộ', 'ớ', 'ờ', 'ỡ', 'ợ', 'ở', 'ơ', 'ỗ']
sign_['ổ'] = ['ò', 'ó', 'ô', 'õ', 'ọ', 'ỏ',
'ố', 'ồ', 'o', 'ộ', 'ớ', 'ờ', 'ỡ', 'ợ', 'ở', 'ơ', 'ỗ']
sign_['ộ'] = ['ò', 'ó', 'ô', 'õ', 'ọ', 'ỏ',
'ố', 'ồ', 'ổ', 'o', 'ớ', 'ờ', 'ỡ', 'ợ', 'ở', 'ơ', 'ỗ']
sign_['ớ'] = ['ò', 'ó', 'ô', 'õ', 'ọ', 'ỏ',
'ố', 'ồ', 'ổ', 'ộ', 'o', 'ờ', 'ỡ', 'ợ', 'ở', 'ơ', 'ỗ']
sign_['ờ'] = ['ò', 'ó', 'ô', 'õ', 'ọ', 'ỏ',
'ố', 'ồ', 'ổ', 'ộ', 'ớ', 'o', 'ỡ', 'ợ', 'ở', 'ơ', 'ỗ']
sign_['ỡ'] = ['ò', 'ó', 'ô', 'õ', 'ọ', 'ỏ',
'ố', 'ồ', 'ổ', 'ộ', 'ớ', 'ờ', 'o', 'ợ', 'ở', 'ơ', 'ỗ']
sign_['ợ'] = ['ò', 'ó', 'ô', 'õ', 'ọ', 'ỏ',
'ố', 'ồ', 'ổ', 'ộ', 'ớ', 'ờ', 'ỡ', 'o', 'ở', 'ơ', 'ỗ']
sign_['ở'] = ['ò', 'ó', 'ô', 'õ', 'ọ', 'ỏ',
'ố', 'ồ', 'ổ', 'ộ', 'ớ', 'ờ', 'ỡ', 'ợ', 'o', 'ơ', 'ỗ']
sign_['ơ'] = ['ò', 'ó', 'ô', 'õ', 'ọ', 'ỏ', 'ố',
'ồ', 'ổ', 'ộ', 'ớ', 'ờ', 'ỡ', 'ợ', 'ở', 'o', 'ỗ']
sign_['u'] = ['ù', 'ú', 'ụ', 'ủ', 'ứ', 'ừ', 'ữ', 'ự', 'ư', 'ử', 'ũ']
sign_['ù'] = ['u', 'ú', 'ụ', 'ủ', 'ứ', 'ừ', 'ữ', 'ự', 'ư', 'ử', 'ũ']
sign_['ú'] = ['ù', 'u', 'ụ', 'ủ', 'ứ', 'ừ', 'ữ', 'ự', 'ư', 'ử', 'ũ']
sign_['ụ'] = ['ù', 'ú', 'u', 'ủ', 'ứ', 'ừ', 'ữ', 'ự', 'ư', 'ử', 'ũ']
sign_['ủ'] = ['ù', 'ú', 'ụ', 'u', 'ứ', 'ừ', 'ữ', 'ự', 'ư', 'ử', 'ũ']
sign_['ứ'] = ['ù', 'ú', 'ụ', 'ủ', 'u', 'ừ', 'ữ', 'ự', 'ư', 'ử', 'ũ']
sign_['ừ'] = ['ù', 'ú', 'ụ', 'ủ', 'ứ', 'u', 'ữ', 'ự', 'ư', 'ử', 'ũ']
sign_['ữ'] = ['ù', 'ú', 'ụ', 'ủ', 'ứ', 'ừ', 'u', 'ự', 'ư', 'ử', 'ũ']
sign_['ự'] = ['ù', 'ú', 'ụ', 'ủ', 'ứ', 'ừ', 'ữ', 'u', 'ư', 'ử', 'ũ']
sign_['ư'] = ['ù', 'ú', 'ụ', 'ủ', 'ứ', 'ừ', 'ữ', 'ự', 'u', 'ử', 'ũ']
sign_['ử'] = ['ù', 'ú', 'ụ', 'ủ', 'ứ', 'ừ', 'ữ', 'ự', 'ư', 'u', 'ũ']
sign_['ũ'] = ['ù', 'ú', 'ụ', 'ủ', 'ứ', 'ừ', 'ữ', 'ự', 'ư', 'ử', 'u']
sign_['y'] = ['ý', 'ỳ', 'ỵ', 'ỷ', 'ỹ']
sign_['ý'] = ['y', 'ỳ', 'ỵ', 'ỷ', 'ỹ']
sign_['ỳ'] = ['ý', 'y', 'ỵ', 'ỷ', 'ỹ']
sign_['ỵ'] = ['ý', 'ỳ', 'y', 'ỷ', 'ỹ']
sign_['ỷ'] = ['ý', 'ỳ', 'ỵ', 'y', 'ỹ']
sign_['ỹ'] = ['ý', 'ỳ', 'ỵ', 'ỷ', 'y']
return sign_[word] | 14c4eaac88b59bacbdcdc0e016475ae0f3d80b24 | 34,460 |
def reverse_bits(n):
"""
Reverses the bits in an integer.
:param n: an integer value.
:return: finds the integer value of a reversed integer bit.
"""
return int(bin(n)[::-1][0:-2], 2) | e19979b487c84ab4a7925ca64aaebeddd2fb0655 | 34,463 |
import torch
def UnNormalize_tensor(tensor: torch.Tensor, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: UnNormalized image.
"""
for t, m, s in zip(tensor, mean, std):
t.mul_(s).add_(m)
# The normalize code -> t.sub_(m).div_(s)
return tensor.clamp(0.0, 1.0) | 49454d108e36dce96be4128ceac48a42d6f9a7b4 | 34,464 |
def read_positive_integer_custom(text, position):
"""Read a number starting from the given position, return it and the first
position after it in a tuple. If there is no number at the given position
then return None.
"""
if position >= len(text) or (not text[position].isdigit()):
return ("", position)
t, pos = read_positive_integer_custom(text, position + 1)
t = str(t)
t = text[position] + t
return (int(t), pos) | e63e9d8fcd4ac424836cb973f8048bc4564b4e1b | 34,465 |
from typing import List
import re
def remove_obsidian_comments(content: List[str]) -> List[str]:
"""
Remove the obsidian comments from the content. %% comment %%
"""
return re.sub(r'\s*\%\%(.|\n)*?\s*\%\%', '', '\n'.join(content)).split('\n') | 58d234c5fb6668310115beb4e8a9e03b98b14c04 | 34,466 |
from math import isnan
def clamp(x: float, lower=0., upper=1.) -> float:
"""
Clamps a float to within a range (default [0, 1]).
"""
if x <= lower:
return lower
elif x >= upper:
return upper
elif isnan(x):
raise FloatingPointError('clamp is undefined for NaN')
return x | e1af0a40b4f0e9ecb0f917faa4b8ab5f4c9b2ac5 | 34,467 |
import itertools
def repeat():
"""Repeate a sequence a number of times."""
repeated = itertools.repeat('AB', times=2)
return list(repeated) | 13cd3f501080c79025971323b953eba3816d6834 | 34,468 |
import logging
def get_annotator_idx(jam, feature_name, annotator_name, filename):
"""Gets the annotator index of the annotation annotated by
annotator_name."""
annotator_idx = -1
for i, annotator in enumerate(jam[feature_name]):
if annotator.annotation_metadata.annotator.name == annotator_name:
annotator_idx = i
break
if annotator_idx == -1:
logging.warning("Annotator %s not found in %s" % (annotator_name,
filename))
return annotator_idx | 5b21681528e08958dc0b405b3d5d3a21d3e33851 | 34,470 |
import socket
def get_free_ports(n):
"""Based on https://gist.github.com/dbrgn/3979133"""
ports = []
sockets = []
for i in range(n):
s = socket.socket()
s.bind(('', 0))
port = s.getsockname()[1]
ports.append(port)
sockets.append(s)
for s in sockets:
s.close()
return ports | eff425d1c1c10267271bbf821de7294c58d03765 | 34,471 |
import random
def rand_trial(tgt_num,min_itv,max_itv):
"""
Generate a list of number, which would be used to make sure that there
are some filler sentence(s) between any 2 target sentences.
Parameters
----------
tgt_num: Int
The number of target sentences
min_itv: Int
The minimum number of filler sentences between 2 target sentences
max_itv >= 1.
max_itv: Int
The maximum number of filler sentences between 2 target sentences.
Note that "tgt_num*max_itv <= the number of filler sentences".
Returns
----------
li : List
"li" contains 2 types of numbers, one is the number zero, the other
is numbers that are greater than or equal to one. Zero indicates
a target sentence. Numbers greater than or equal to 0 indicate the
number of filler sentences between 2 targets. eg.
"[2, 0, 1, 0, 1, 0, 2, 0, 2, 0]" would helps generating a trial
list in which 2 filler sentences is at the beginning of the trial
list, then 1 target sentence, then 1 filler sentence, etc.
"""
li=[]
for i in range(tgt_num):
#Randomly choose the interval between 2 target sentences
rand_itv=random.randint(min_itv,max_itv)
li.append(rand_itv)
li.append(0)
return li | 8d16b943804556c80d84a9050ce89cc2e4cf82dc | 34,474 |
import random
def random_int(s, m):
"""Generate an int that fits in the given fixed point format.
"""
# Integer bits
ret = random.getrandbits((m-1) or 1)
# Sign weight
ret -= 2**(m-1) if s else 0
return ret | 7b8022180774310c367f3509d045f4f82f44db82 | 34,477 |
def qual_class_name(cls):
""" Returns the fully qualifieid class name (module + class name). """
return cls.__module__ + "." + cls.__name__ | ff433a30616851fdb0951fba4295230f9fa01907 | 34,478 |
def get_bytes(t, iface='eth0') -> int:
"""Get raw network speed."""
with open('/sys/class/net/' + iface + '/statistics/' + t + '_bytes', 'r') as f:
data = f.read()
return int(data) | 55a5030b53281df85736e5c9e029990d1a557348 | 34,481 |
def fit_to_display(img, fit_by_height = None):
"""
Returns a proportionally resized image with the largest dimensions that can fit on the display.
`fit_by_height`: whether to force resizing using height
"""
width, height = img.size
ratio = float(width) / height
if fit_by_height == None:
if (height*2) >= width:
fit_by_height = True
else:
fit_by_height = False
if (fit_by_height):
if not(height == 64):
dif = 64 - height
height = int(round(height + dif))
width = int(round((dif*ratio) + width))
#Takes care of the case when shrinking the image would eliminate its width dimension
if width == 0:
width = 1
return img.resize((width, height))
else:
if not (width == 128):
dif = 128 - width
width = int(round(width + dif))
height = int(round((dif/ratio) + height))
#Takes care of the case when shrinking the image would eliminate its height dimension
if height == 0:
height = 1
return img.resize((width, height)) | 722c920a91deee42bd4f3af018199104e7598a5a | 34,482 |
def WDM_suppression(m, m_c, a_wdm, b_wdm, c_wdm):
"""
Suppression function from Lovell et al. 2020
:return: the factor that multiplies the CDM halo mass function to give the WDM halo mass function
dN/dm (WDM) = dN/dm (CDM) * WDM_suppression
where WDM suppression is (1 + (a_wdm * m_c / m)^b_wdm)^c_wdm
"""
ratio = a_wdm * m_c / m
factor = 1 + ratio ** b_wdm
return factor ** c_wdm | e9ad362bc63be58a465d0e11a0bfb45ad3f1bc2d | 34,483 |
def remove_chars(value,char_list=''):
"""
Remove specific chars from a string
:param value: Value to be formatted
:type value: String
:param char_list: String containing the characters you want to remove
:type char_list: String
:returns: String without punctuation symbols
:rtype: String
Example:
>>> remove_chars('Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse id lacus rhoncus, varius lorem vel, congue quam.', '.,:;-_/*')
'Lorem ipsum dolor sit amet consectetur adipiscing elit Suspendisse id lacus rhoncus varius lorem vel congue quam'
"""
return ''.join(ch for ch in value if ch not in char_list) | 2090ad14efff19974af8aa7bd68cdee11536e056 | 34,485 |
def rule(index):
"""Convert decimal index to binary rule list."""
return [int(x) for x in list(format(index, '08b'))] | 33b167503e9caac338d747240dcc07b0d40bc057 | 34,487 |
def mania_key_fix(objs_each_key, mode=0):
"""
Remove the 1/4 spaced adjacent notes to make the map perfectly playable.
It's a lazy hack for the obvious loophole in the note pattern algorithm.
Should set to inactive for low key counts.
mode 0: inactive
mode 1: remove latter note
mode 2: remove former note
mode 3: move note to next lane
mode 4: mode note to next lane, limiting to no adjacent note in next lane (should be internal use only)
"""
if mode == 0:
return objs_each_key
if mode == 1:
for k, objs in enumerate(objs_each_key):
prev_obj = (-1, -1, -1, -100)
filtered_objs = []
for i, obj in enumerate(objs):
if obj[3] > prev_obj[3] + 1:
filtered_objs.append(obj)
prev_obj = obj
objs_each_key[k] = filtered_objs
return objs_each_key
if mode == 2:
for k, objs in enumerate(objs_each_key):
prev_obj = (-1, -1, -1, 2147483647)
filtered_objs = []
for i, obj in reversed(list(enumerate(objs))):
if obj[3] < prev_obj[3] - 1:
filtered_objs.append(obj)
prev_obj = obj
objs_each_key[k] = filtered_objs
return objs_each_key
if mode == 3 or mode == 4:
for k in range(len(objs_each_key)):
objs = objs_each_key[k]
prev_obj = (-1, -1, -1, -100)
filtered_objs = []
for i, obj in enumerate(objs):
if obj[3] > prev_obj[3] + 1:
filtered_objs.append(obj)
prev_obj = obj
else:
target_key = (k+1) % len(objs_each_key)
target_key_objs = objs_each_key[target_key]
j = 0
while target_key_objs[j][3] <= obj[3]:
j += 1
if j == len(target_key_objs):
break
j -= 1
if mode == 3: # check if target spot is empty
if j != len(target_key_objs) - 1:
check_next = target_key_objs[j+1]
if check_next[0] <= obj[1]:
continue
if target_key_objs[j][1] + 50 < obj[0]:
new_obj = (obj[0], obj[1], target_key, obj[3])
target_key_objs = target_key_objs[:j+1] + [new_obj] + target_key_objs[j+1:]
objs_each_key[target_key] = target_key_objs
if mode == 4: # check if target spot is empty and has no possible double keys
if j != len(target_key_objs) - 1:
check_next = target_key_objs[j+1]
if check_next[0] <= obj[1] or check_next[3] <= obj[3] + 1:
continue
if target_key_objs[j][1] + 50 < obj[0] and target_key_objs[j][3] + 1 < obj[3]:
new_obj = (obj[0], obj[1], target_key, obj[3])
target_key_objs = target_key_objs[:j+1] + [new_obj] + target_key_objs[j+1:]
objs_each_key[target_key] = target_key_objs
objs_each_key[k] = filtered_objs
if mode == 3: # if mode is 3, do another pass with mode 4
return mania_key_fix(objs_each_key, mode=4)
return objs_each_key | e5108d853fcb05b4bc86769bacad5e3495472ad6 | 34,488 |
def getColor(index, colors):
""" returns the colour at the position of an array which contains colours """
return colors[index%len(colors)] | e49f5dc983936695f5810c363d6ff86913802937 | 34,489 |
def get_sections_url(request):
"""
A fixture that returns URL for two different section endpoints:
- /v1/hearing/<hearing id>/sections/
- /v1/section/?hearing=<hearing id>
"""
return {
'nested': lambda hearing: '/v1/hearing/%s/sections/' % hearing.id,
'root': lambda hearing: '/v1/section/?hearing=%s' % hearing.id
}[request.param] | 1ecae93851fe50f26e6f6bb0fc0ae6f386ddb626 | 34,490 |
def _qualify_optional_type(cpp_type):
# type: (str) -> str
"""Qualify the type as optional."""
return 'boost::optional<%s>' % (cpp_type) | a86e1135cb6e17aae6215041356d7eda7c68f747 | 34,491 |
def suggest_removal_features(df):
"""
Will find features that appear to be almost index like with their
feature values.
Args:
df: pd.Dataframe
Pandas DataFrame object.
Returns:
Returns back a list of features to remove.
"""
features_to_remove = set()
# Return back index like features
for feature in df.columns:
if len(df[feature].value_counts().index.tolist()) >= int(
df.shape[0] / 2):
features_to_remove.add(feature)
return features_to_remove | e990c52c12fa6ba36f79daa89e25d3acb9ea5fef | 34,492 |
def parse_file_tokens(parse_tokens):
"""Parses tokens from a file."""
def factory(filename: str):
with open(filename, 'r', encoding='utf-8') as test_file:
file_content = test_file.read()
return parse_tokens(file_content)
return factory | 83e27a2bc08acdd28c6da88c96b2206169701293 | 34,493 |
def camelCase(List):
"""Returns list containing camelcase words"""
camelCaseList = []
nonCamelCaseList = []
for s in List:
if s != s.lower() and s != s.upper() and "_" not in s:
camelCaseList.append(s)
else:
nonCamelCaseList.append(s)
return camelCaseList, nonCamelCaseList | f9b076d1a094cc7f6d2dbead58940574f4b535b0 | 34,495 |
import time
def measure_time(func):
"""
A decorator to measure execution time of an arbitrary function
func: a function to measure
"""
def wrapper(*args, **kargs):
t = time.time()
result = func(*args, **kargs)
print(f"{func.__name__} took {t} seconds")
return result
return wrapper | dfb4107d7e519a9c4ba79d830664cf73d2653e76 | 34,497 |
import numpy
def estDt(gr, cfl, u):
""" estimate the timestep """
# use the proported flame speed
dt = cfl*gr.dx/numpy.max(numpy.abs(u))
return dt | 57482f6d30adc4f05537e8f5c87d7d10db31fd8d | 34,498 |
import subprocess
def git_add(directory):
"""Runs the git command to add all the files for the plugin"""
return subprocess.Popen(["git", "add", "*"], cwd=directory) | 37f99efcf0c8aa812493c0351f60b82ca55fde50 | 34,499 |
import torch
def charbonnier_loss(pred: torch.Tensor,
target: torch.Tensor,
q: float = 0.2,
eps: float = 0.01) -> torch.Tensor:
"""Generalized Charbonnier loss function between output and ground truth.
The loss function is
.. math::
loss = ((u-u_gt)^2+(v-v_gt)^2+eps)^q
Generalized Charbonnier loss was used in LiteFlowNet when fine tuning,
with eps=0.01 q=0.2.
Args:
pred (torch.Tensor): output flow map from flow_estimator
shape(B, 2, H, W).
target (torch.Tensor): ground truth flow map shape(B, 2, H, W).
q (float): the exponent in charbonnier loss.
eps (float): small constant to numerical stability when
fine-tuning model. Defaults to 0.01.
Returns:
Tensor: loss map with the shape (B, H, W).
"""
assert pred.shape == target.shape, \
(f'pred shape {pred.shape} does not match target '
f'shape {target.shape}.')
diff = torch.add(pred, -target)
loss_map = (torch.sum(diff * diff, dim=1) + eps)**q # shape (B, H, W).
return loss_map | 481de25b3d379f0c5f6c6f79f353166bc1bc1194 | 34,500 |
import math
def angulo_desejado(ll_alvo, ll_atual):
"""Dados latitude e longitude do alvo e atuais, calcula o angulo em que o robo deve estar para
atingir o alvo"""
dy = ll_alvo[0]-ll_atual[0]
dx = ll_alvo[1]-ll_atual[1]
angulo = math.atan2(dx, dy) / math.pi * 180
return angulo | 545ff24c790940b70fac96f1aad1900a0f247a49 | 34,501 |
import subprocess
def lambda_handler(event, context):
"""
Run the Whitespace program baudelaire.ws with the provided input and return
a response containing the resulting output, in a format suitable for
API Gateway integration.
Input is taken from the query string parameter "input". It must be
a sequence of ASCII characters terminated with a newline.
The EsCo (Esoteric Combine) interpreter is used to run the program.
"""
input = event["queryStringParameters"]["input"]
completed_interpretation = subprocess.run(
["./esco", "--quiet", "--type", "ws", "baudelaire.ws"],
text=True,
encoding="ascii",
input=input,
stdout=subprocess.PIPE)
# Discard the first two lines of the output (they contain the message
# "Enter a word and press Enter:" and then an empty line).
trimmed_output = completed_interpretation.stdout.split("\n", 2)[2]
return {
"statusCode": 200,
"headers": {"Access-Control-Allow-Origin": "*"},
"body": trimmed_output,
} | 3e2bdab09fff41e815a5211ea926ff859d5a682f | 34,503 |
from typing import Optional
def format_op_params(params: Optional[list]) -> str:
"""format operation params nicely for display"""
if params is not None:
return "(" + ", ".join(map(str, params)) + ")" if len(params) > 0 else ""
return "" | 4620011e1f12ac0b0165e591eaf50ed539142cdf | 34,504 |
def num_filled_bits(bloom_filter):
"""Determines how many bits in a filter are filled.
Args:
bloom_filter (BloomFilter): Bloom filter object to check.
Returns:
int: Number of filled bits in the filter.
"""
return '{0:b}'.format(bloom_filter.value).count('1') | e3b61c002c88c10e5d36bbbb80cf4833b22884a1 | 34,505 |
import json
def build_address_map(tilegrid_file):
"""
Loads the tilegrid and generates a map (baseaddr, offset) -> tile name(s).
Parameters
----------
tilegrid_file:
The tilegrid.json file/
Returns
-------
A dict with lists of tile names.
"""
address_map = {}
# Load tilegrid
with open(tilegrid_file, "r") as fp:
tilegrid = json.load(fp)
# Loop over tiles
for tile_name, tile_data in tilegrid.items():
# No bits or bits empty
if "bits" not in tile_data:
continue
if not len(tile_data["bits"]):
continue
bits = tile_data["bits"]
# No bus
if "CLB_IO_CLK" not in bits:
continue
bus = bits["CLB_IO_CLK"]
# Make the address as integers
baseaddr = int(bus["baseaddr"], 16)
offset = int(bus["offset"])
address = (
baseaddr,
offset,
)
# Add tile to the map
if address not in address_map:
address_map[address] = []
address_map[address].append(tile_name)
return address_map | c5debba6159f847267b228cefdbe3ae88015c887 | 34,506 |
def log_intensity(V, H, params):
"""Evaluate the log likelihood of spiking with an exponential link function.
V: 2D array with voltage and θV in the first two columns
H: 2D array with θ1 and θ2 in the first two columns
params: list of parameters (see predict() for specification)
"""
return V[:, 0] - H[:, 0] - H[:, 1] - V[:, 1] - params[3] | 1540b9cf8c174bc49d68c4cd86fa90a127fbec07 | 34,507 |
def trim_missing_at_end_data_df(df_data, neg_lim=None):
"""
Removes rows at end of file that have empty data
:param df_data:
:param neg_lim:
:return:
"""
df_data = df_data.reset_index(drop=True)
nan_rows = df_data[df_data.iloc[:, :3].isnull().T.any().T]
nan_indexes = list(nan_rows.index)
if len(nan_indexes):
i_start = None
i_end = None
if nan_indexes[0] == 0:
i_start = nan_indexes[-1] + 1
for i in range(1, len(nan_indexes)):
if nan_indexes[i] - nan_indexes[i - 1] > 1:
i_start = nan_indexes[i - 1] + 1
i_end = nan_indexes[i]
break
else:
i_end = nan_indexes[0]
df_data = df_data[i_start:i_end]
if neg_lim is not None:
# remove large neg values
neg_rows = df_data[df_data.iloc[:, 2] < neg_lim]
neg_indexes = list(neg_rows.index)
if len(neg_indexes):
df_data = df_data[:neg_indexes[0]]
return df_data | 3665a5b360f4b14f452a14b86764f6b5fbebd3b0 | 34,508 |
def get_word(word_type):
"""Get a word from a user and return that word."""
# The lower() function converts the string to lowercase before testing it
if word_type.lower() == 'adjective':
# Use 'an' in front of 'adjective'
a_or_an = 'an'
else:
# Otherwise, use 'a' in front of 'noun' or 'verb'
a_or_an = 'a'
return input('Enter a word that is {0} {1}: '.format(a_or_an, word_type)) | 44447b8478e857956b5a328c514d632a8ba90bb3 | 34,509 |
import threading
def isMultiThreadMode():
"""
Checks if running in multi-thread(ing) mode
"""
return threading.activeCount() > 1 | 09f08fc36b23ac9e279f7aad46917751118dc856 | 34,510 |
def def_readme():
""" Check Readme Markdown """
readme = ''
with open('README.md') as file_content:
readme = file_content.read()
return readme | 904ed946ca6135b6a350b42916a7f0f64398054b | 34,511 |
def validate_item_pickup_transaction_locations(loan, destination, **kwargs):
"""Validate the loan item, pickup and transaction locations."""
pickup_location_pid = loan["pickup_location_pid"]
item_location_pid = kwargs["item_location_pid"]
if destination == "ITEM_AT_DESK":
return pickup_location_pid == item_location_pid
elif destination == "ITEM_IN_TRANSIT_FOR_PICKUP":
return pickup_location_pid != item_location_pid | b5a79c35dd8ec4028ed947a95dc628a3306d6cc4 | 34,513 |
import requests
def log_me_in_direct(base_url, user_data=None):
"""
:param user_data:
:returns cookies
:param base_url:
:return:
"""
add_url = "/geo/init"
response = requests.post(base_url + add_url, data=user_data)
return response.history[0].cookies | 3dd25d6270d59ab9f8558d4511f8875c51a72668 | 34,514 |
import io
import array
def part1(stdin: io.TextIOWrapper, stderr: io.TextIOWrapper) -> int:
"""Find two numbers that sum to 2020, then multiply them."""
numbers = array.array('i')
iterations = 0
for line in stdin:
iterations += 1
number = int(line.strip())
pair = 2020 - number
try:
numbers.index(pair)
stderr.write(f"{number} + {pair} in {iterations} iterations\n")
return number * pair
except ValueError:
numbers.append(number)
raise Exception("No matches found.") | cd0b88df8d6dbb7cd6aefff70a4fe68a43ee4c8c | 34,515 |
from string import printable
def unprintable(mystring):
"""return only the unprintable characters of a string"""
return ''.join(
character
for character in mystring
if character not in printable
) | bb48580d525d1e829f5b4b33cd4c0e540aa3a21a | 34,516 |
import os
def get_images(image_path):
"""
get all images at the specified image_path (! no check for actual image files)
:param image_path: the path that is searched
:return: number of images, file paths
"""
images = os.listdir(image_path)
number_images = len(images)
image_file_paths = ['{}/{}'.format(image_path, images[i]) for i in range(number_images)]
return number_images, image_file_paths | e4c5d4fb33f6be57437e7fa2cebc3996cba7a9a1 | 34,517 |
def sum_credit_debit(credit, debit):
"""
Calculates the total of the transactions
:param credit: Credits
:param debit: Debits
:return:
"""
credit_sum = 0
for entry in credit:
credit_sum += entry.amount
debit_sum = 0
for entry in debit:
debit_sum += entry.amount
return credit_sum - debit_sum | e375afea5e108e89e0f998f1cb28fb559a806077 | 34,518 |
def can_review(user):
"""Checks if a user can review a translate"""
return user.permissions['perm_review'] | ab0dcb8cb0372c1421e6b9171e01e2be7113e3f1 | 34,519 |
def select_frames(frames, frames_per_video):
"""
Select a certain number of frames determined by the number (frames_per_video)
:param frames: list of frames
:param frames_per_video: number of frames to select
:return: selection of frames
"""
step = len(frames)//frames_per_video
if step == 0:
step = 1
first_frames_selection = frames[::step]
final_frames_selection = first_frames_selection[:frames_per_video]
return final_frames_selection | d00ca8381e919cb38858d236186c4ebbd2b8a064 | 34,520 |
def doc_freqs(docs) -> dict:
"""
Takes in a list of spacy Doc objects and return a dictionary of
frequencies for each token over all the documents. E.g. {"Aarhus": 20, "the": 2301, ...}
"""
res_dict = {}
for doc in docs:
# create empty list to check whether token appears multiple times in doc
duplicates = []
for token in doc:
if token.text not in duplicates:
# if token is not in dict; add token as key and 1 as value
if token.text not in res_dict:
res_dict[token.text] = 1
# if the token is already in dic; add 1 to the value of that token
else:
res_dict[token.text] += 1
duplicates.append(token.text)
return res_dict | 2bdeaf2150b4c2ecb8f240984d6d5f7eb210c585 | 34,524 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.