content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
from typing import Dict
def headers_msb_creator(base_header: Dict[str, str]):
"""
Create the right headers for MSB.
Args:
base_header (Dict[str, str]): the base header to use
Returns:
Dict[str, str]: the needed headers
"""
headers = base_header.copy()
headers["cache-control"] = "no-cache"
return headers | 4a49d0898b7fda4a05657852f825cba78cf8cb53 | 110,740 |
from typing import Dict
def read_map(path: str) -> Dict[str, str]:
"""
Given a file-like object (`String`, `File`) as a parameter, this will read each
line from a file and expect the line to have the format `col1\tcol2`. In other
words, the file-like object must be a two-column TSV file.
WDL syntax: Map[String, String] read_map(String|File)
"""
d = dict()
with open(path, "r") as f:
for line in f:
line = line.rstrip()
if not line:
# remove extra lines
continue
key, value = line.split('\t', 1)
d[key] = value.strip()
return d | 6ed568bdcefb2817574ccbedd79e365ee037fd50 | 110,741 |
def normalize(self, asOf=None, multiplier=100):
"""
Returns a normalized series or DataFrame
Example:
Series.normalize()
Returns: series of DataFrame
Parameters:
-----------
asOf : string
Date format
'2015-02-29'
multiplier : int
Factor by which the results will be adjusted
"""
if not asOf:
x0 = self.iloc[0]
else:
x0 = self.loc[asOf]
return self / x0 * multiplier | f3989762e306ffb8001514c6939a406b162c8073 | 110,742 |
def check_workbook_exists(service, spreadsheet_id, tab_name):
"""
Checks if the workbook exists within the spreadsheet.
"""
try:
spreadsheet = service.spreadsheets().get(
spreadsheetId=spreadsheet_id).execute()
sheets = spreadsheet['sheets']
exists = [True for sheet in sheets if sheet['properties']
['title'] == tab_name]
return True if exists else False
except Exception as e:
print(f'Failed to check workbook {tab_name} for spreadsheet '
f'{spreadsheet_id}')
raise(e) | c5fa4daeb3108246052372cda0be6df8fe94d1f0 | 110,744 |
def heat_flux_to_temperature(heat_flux: float, exposed_temperature: float = 293.15):
"""Function returns surface temperature of an emitter for a given heat flux.
:param heat_flux: [W/m2] heat flux of emitter.
:param exposed_temperature: [K] ambient/receiver temperature, 20 deg.C by default.
:return temperature: [K] calculated emitter temperature based on black body radiation model.
"""
epsilon = 1.0 # radiation view factor
sigma = 5.67e-8 # [W/m2/K4] stefan-boltzmann constant
# E_dash_dash_dot = epsilon * sigma * (T_1 ** 4 - T_0 ** 4) # [W/m2]
return ((heat_flux / sigma / epsilon) + exposed_temperature ** 4) ** 0.25 | bb6fc94e8468859d135de264754a1b41ccf8048d | 110,746 |
def remove_genomic_transcripts(dataframe):
"""Remove rows where value on the column 'transcript_novelty' equals to 'Genomic'
Args:
dataframe: pandas DataFrame
Returns:
pandas DataFrame
"""
not_genomic_transcript = dataframe["transcript_novelty"] != "Genomic"
return dataframe[not_genomic_transcript] | dea13081f35a5ad07269618fb7fe8081d8533d85 | 110,749 |
import six
import struct
def bytes_to_int(byte_array, big_endian=True, signed=False):
"""
Converts a byte array to an integer.
"""
if six.PY3:
order = 'little'
if big_endian:
order = 'big'
return int.from_bytes(byte_array, byteorder=order, signed=signed)
else:
length = len(byte_array)
if length == 1:
code = 'B'
elif length == 2:
code = 'H'
elif length == 4:
code = 'L'
elif length == 8:
code = 'Q'
else:
raise Exception("bytes_to_int : length of byte_array should be 1, 2, 4, or 8")
if big_endian:
code = '>'+code
else:
code = '<'+code
if signed:
code = code.lower()
return struct.unpack(code, byte_array)[0] | 44216a2b083306cac4a701ae392cac17ece29699 | 110,753 |
def get_ref_spec(spec='LIOx'):
"""
Store of reference species for families
Parameters
----------
spec (str): species/tracer/variable name
Returns
-------
ref_spec (str) reference species for a given family
Notes
-----
This is for use in conbination with functions that calculate relative values
(e.g. in units of Ox, I, etc)
"""
d = {
'Cly': 'Cl',
'Cl': 'Cl',
'LOx': 'O3',
'POx': 'O3',
'LOX': 'O3',
'POX': 'O3',
'LIOx': 'I',
'PIOx': 'I',
'PClOx': 'Cl',
'LClOx': 'Cl',
'PClOxI': 'Cl',
'LClOxI': 'Cl',
'PClOxII': 'Cl',
'LClOxII': 'Cl',
'PClOxI': 'Cl',
'LCI': 'Cl',
'LCII': 'Cl',
'PBrOx': 'Br',
'LBrOx': 'Br',
'Br': 'Br',
'Bry': 'Br',
'I': 'I',
'Iy': 'I',
'IxOy': 'I',
# core species
'SO4': 'S',
'NIT': 'N',
'NITs': 'N',
'NH4': 'N',
'ISOP': 'C',
'NO': 'N',
'NO2': 'N',
'N2O5': 'N',
'O3': 'O3',
'SO2': 'S',
# Other VOCs
'ACET': 'C',
'ALD2': 'C',
'DMS': 'S',
# include halogens
'HOI': 'I',
'I2': 'I',
'CH3I': 'I',
'CH3IT': 'I', # Vestigle spec from v10 (is CH3I)...
'CH2I2': 'I',
'CH2IBr': 'I',
'CH2ICl': 'I',
'CHBr3': 'Br',
'CH2Br2': 'Br',
'CH3Br': 'Br',
'CH2Cl2': 'Cl',
'CHCl3': 'Cl',
'CH3Cl': 'Cl',
'HCl': 'Cl',
'HBr': 'Br',
}
try:
return d[spec]
except KeyError:
pstr = "WARNING: Just returning provided species ('{}') as ref_spec"
print(pstr.format(spec))
return spec | c8821d32efd2ffada3f282d4f668bd303943dbd5 | 110,755 |
import keyword
def force_to_valid_python_variable_name(old_name):
"""Valid c++ names are not always valid in python, so
provide alternate naming
>>> force_to_valid_python_variable_name('lambda')
'opt_lambda'
>>> force_to_valid_python_variable_name('inputVolume')
'inputVolume'
"""
new_name = old_name.strip()
if new_name in keyword.kwlist:
return f"opt_{new_name}"
else:
return new_name | a71d1bdbb0864ba17aa6e20b41e98fbc9fd91860 | 110,757 |
def timedelta_float(td, units='days'):
"""
Returns a float of timedelta TD in UNITS
(either 'days' or 'seconds').
Can be negative for things in the past.
timedelta returns the number of
days and the number of seconds, but you have to combine
them to get a float timedelta.
e.g. timedelta_float(now() - dt_last_week) == c. 7.0
"""
# 86400 = number of seconds in a day
if units == 'days':
return td.days + td.seconds / 86400.0
elif units == 'seconds':
return td.days*86400.0 + td.seconds
else:
raise Exception('Unknown units %s' % units) | 7aeca04ab112741bfbef8dc2642629560cf2949a | 110,760 |
def group_timing_lines(bm_iteration_lines):
"""
Group lines (from one benchmark iteration) by gradient call,
specifying:
- Update time
- Gradient work time
- For all partial derivatives a sublist of all lines
Finally, the terminate time for the entire bm_iteration is also
returned (last line in the list).
"""
gradient_calls = []
start_indices = []
end_indices = []
for ix, line in enumerate(bm_iteration_lines[:-1]): # -1: leave out terminate line
if line[:9] == 'worker_id':
if bm_iteration_lines[ix - 1][:9] != 'worker_id': # only use the first of these
start_indices.append(ix)
elif line[:12] == 'update_state':
end_indices.append(ix)
for ix in range(len(start_indices)):
gradient_calls.append({
'gradient_total': bm_iteration_lines[end_indices[ix]],
'partial_derivatives': bm_iteration_lines[start_indices[ix]:end_indices[ix]]
})
try:
terminate_line = bm_iteration_lines[-1]
except IndexError:
terminate_line = None
return gradient_calls, terminate_line | c124f2a01a4132436ddc87176163c8c338f19695 | 110,761 |
from typing import OrderedDict
def make_sorted_dict(dic):
"""Turn a dict into an ordered dict by sorting the keys."""
ordered_results = OrderedDict()
for key in sorted(dic.keys()):
ordered_results[key] = dic[key]
return ordered_results | bfbc935cc6a0bdfa9a86c36c1c5e651834e619d5 | 110,763 |
def new_epoch(batch_index, batch_size, num_examples):
"""Returns true if a new epoch occurs during batch number batch_index."""
min_batch_index = batch_index * batch_size
max_batch_index = (batch_index + 1) * batch_size - 1
return ((min_batch_index %
num_examples) == 0) or ((min_batch_index % num_examples) >
(max_batch_index % num_examples)) | 40c38234528b17b4ef9cc4738f0def16a8eaa640 | 110,770 |
def lhs(var, mean):
"""LHS of the equation."""
return (var-mean)/mean | c5f33608c1a42c1fc12a950039960b9bc95e45a0 | 110,772 |
def _merge_params(cli, config):
"""Merge CLI params with configuration file params. Note that the
configuration params will overwrite the CLI params."""
# update CLI params with configuration; overwrites
params = dict(list(cli.items()) + list(config.items()))
return params | aa7a70909c63931758f06e178398b3cfb2a6e497 | 110,776 |
import re
def extract_citations(tree):
"""
Extract number of citations from a given eutils XML tree.
Parameters
----------
tree: Element
An lxml Element parsed from eutil website
Return
------
n_citations: int
Number of citations that an article get until parsed date. If no citations found, return 0
"""
citations_text = tree.xpath('//form/h2[@class="head"]/text()')[0]
n_citations = re.sub("Is Cited by the Following ", "", citations_text).split(" ")[0]
try:
n_citations = int(n_citations)
except:
n_citations = 0
return n_citations | 641dbed59fbfa47f8fac66cd615032b92e9d6014 | 110,781 |
def Dic_by_List_of_Tuple(inlist):
"""
Convert a list of (key,value) tuples to {key:value} dictionary
"""
outdic={}
for key,value in inlist:
outdic[key]=value
return outdic | f3d1c80beb374e6d58050540cbd40b6e2c48d23b | 110,785 |
import binascii
def hex_xor(hex_string_1, hex_string_2):
"""Return the XOR of two hex bytestrings."""
if len(hex_string_1) != len(hex_string_2):
raise ValueError('Length of inputs must match.')
hex_data_1 = binascii.a2b_hex(hex_string_1)
hex_data_2 = binascii.a2b_hex(hex_string_2)
return binascii.b2a_hex(
bytes(
[char_a ^ char_b for char_a, char_b in zip(hex_data_1, hex_data_2)]
)
) | 76933a256c9513fe52cd1d0dfbf061f1a01b8c3d | 110,788 |
def module_resolver(jname):
"""Job resolver which adds a module name (called foo)."""
return "foo.{0}".format(jname) | 829a46544ca59822ac43013b6265c4a3130e7f83 | 110,789 |
def remove_enclosing_brackets(text):
"""Removes square brackets if they are enclosing the text string.
Args:
text (str): Any arbitrary string.
Returns:
str: The same string but with the enclosing brackets removed if they were there.
"""
if text.startswith("[") and text.endswith("]"):
return(text[1:-1])
else:
return(text) | 860f5868a5847d187ae4c91232fad5919a0ee4ac | 110,792 |
def get_xvars(rvar):
""" Return variables to be used in estimation """
xvars = ['OverallRank', 'treat']
if rvar == 'Tuition':
xvars.append('year')
return xvars | 6b67756dee8bb0dd9d4f7a635d297a4eca6b0473 | 110,793 |
def add_line(output, buffer, line_height):
"""Add a new line to the output array.
:param output: The output array the line shall be appended to
:param buffer: An array of space we add at the beginning of each line
:param line_height: The user defined line height
:returns: The output array with new line
"""
if not output:
line_height = 0
for _ in range(line_height):
output.append("")
output.extend(buffer)
return output | e3e9d0b553cd17ee8b41f8fa94e22b026687245b | 110,797 |
def get_direction_sign(here, there):
"""Return sign (+1 or -1) required to move from here to there."""
return 1 if here < there else -1 | a26ca296c28efd421edc1355ab62e9b556997ff3 | 110,798 |
def _RevisionList(rows):
"""Returns a list of revisions."""
return [r.revision for r in rows] | dcd42a875d3601172b07b18419d058fb62ab86ae | 110,805 |
import torch
def cumprod_exclusive(tensor):
"""
Mimick functionality of tf.math.cumprod(..., exclusive=True), as it isn't available in PyTorch.
"""
cumprod = torch.cumprod(tensor, -1)
cumprod = torch.roll(cumprod, 1, -1)
cumprod[..., 0] = 1.0
return cumprod | fb3343033b2ad0ab474c06f0e0005212d7b39c6b | 110,806 |
from typing import Dict
from typing import Any
def create_run_command(params: Dict[str, Any]) -> str:
"""Create the run command that will start the training/prediction.
Parameters
----------
params : Dict[str, Any]
The parameters for the job. It must contain "runner", which is
the path to the python script to run the job. All other key/values
will be passed as arguments to the runner as "--key value"
Returns
-------
job_command : str
The command that will be executed on the command line.
"""
try:
runner = params.pop("runner")
assert isinstance(runner, str)
except KeyError:
raise KeyError("params must contain the key 'runner'")
except AssertionError:
raise TypeError("runner should be a string")
try:
output_base_dir = params.pop("output_base_dir")
assert isinstance(output_base_dir, str)
except KeyError:
raise KeyError("params must contain the key 'output_base_dir'")
except AssertionError:
raise TypeError("'output_base_dir' should be a string")
try:
job_index = params.pop("job_index")
assert isinstance(job_index, int)
except KeyError:
raise KeyError("params must contain the key 'job_index'")
except AssertionError:
raise TypeError("'job_index' should be a string")
try:
job_prefix = params.pop("job_prefix")
assert isinstance(job_prefix, str)
except KeyError:
raise KeyError("params must contain the key 'job_prefix'")
except AssertionError:
raise TypeError("'job_prefix' should be a string")
if ".py" in runner:
job_command = f"python {runner}"
else:
job_command = f"{runner}"
params["output_base_dir"] = f"{output_base_dir}{job_prefix}{job_index}/"
for k, v in params.items():
if type(v) == bool:
if v is True:
job_command += f" --{k}"
else:
pass
else:
job_command += f" --{k} {v}"
return job_command | dd34ceba51cbfe8a12c4dd46d2a2e1ed3ab35955 | 110,807 |
import hashlib
def get_md5(location):
""" Get a file's MD5 checksum.
Args:
location: A string specifying the location of the file to check.
Returns:
A string containing the checksum with hexidecimal encoding.
"""
md5 = hashlib.md5()
chunk_size = 64 * 1024
with open(location, 'rb') as source:
chunk = source.read(chunk_size)
while len(chunk) > 0:
md5.update(chunk)
chunk = source.read(chunk_size)
return md5.hexdigest() | 5a2ee7594d23348d039a58a44a29535265bdc037 | 110,810 |
def convert_dict_to_tuple(d):
"""
Converts an unhashable dict to a hashable tuple, sorted by key.
"""
return tuple(sorted(d.items(), key=lambda item: item[0])) | cc22249ee8111259e51c3f40e7912e6b09d2047e | 110,811 |
def add_index_to_tex_prm_name(tex: str, index: int) -> str:
"""
Adds a lower index to a parameter's tex-name. This function is intended for vector-
valued parameters. For example: ('$a$', 1) -> '$a_1$'.
Parameters
----------
tex
The tex-string to be modified.
index
The index to be added as a lower index to tex.
Returns
-------
tex_mod
The tex-string with included index.
"""
# the math-model '$' should appear twice in the string
check_1 = tex.count("$") == 2
# the index is only added in tex-fashion
# if no indexes are present already
check_2 = not ("_" in tex)
check_3 = not ("^" in tex)
if check_1 and check_2 and check_3:
tex_list = tex.split("$")
# since it was checked that there are exactly 2 '$'-signs in tex, the tex_list
# has 3 elements, with the middle one being the string enclosed by the two
# '$'-signs
tex_list[1] = tex_list[1] + f"_{index}"
tex_mod = "$".join(tex_list)
else:
# if not all checks are passed, the index is added in a way, that does not
# expect anything from the given tex-string
tex_mod = tex + f" ({index})"
return tex_mod | b600ed4af5d9aea8bedac749826bdfd7f4397bcb | 110,813 |
def massic_flux_shen(oil_sol, area, k):
"""
Return the dissolution mass flux [kg/s]
source : (Shen et al., 1993)
Parameters
----------
oil_sol : Oil solubility in water [kg/m³]
area : Area of the slick [m²]
k : Dissolution mass transfer coefficient [m/s]
"""
return oil_sol * area * k | 0b314f911c51181add657b3f858ffe16b266df0c | 110,815 |
import math
def rotate2D(x, y, ang):
"""Rotates a 2d vector clockwise by an angle in radians."""
x2 = x * math.cos(ang) - y * math.sin(ang)
y2 = y * math.cos(ang) + x * math.sin(ang)
return x2, y2 | 8b85c4cac7d16e901f2e23384b065b909309d6ac | 110,818 |
def single_endpoint(mocker):
"""Create a single network manager endpoint mock"""
return mocker.stub() | db8669c7ca1c6ef1bd3e9cb4774aea62d515fc37 | 110,831 |
def ok_request(message: str):
"""Format ok response from a message."""
return {
'result': 'ok',
'message': message
}, 200 | 943c8d701ea5acfcb074887ee1ea374d5fff7d9d | 110,836 |
def CheckDoNotSubmitInFiles(input_api, output_api):
"""Checks that the user didn't add 'DO NOT ' + 'SUBMIT' to any files."""
keyword = 'DO NOT ' + 'SUBMIT'
# We want to check every text files, not just source files.
for f, line_num, line in input_api.RightHandSideLines(lambda x: x):
if keyword in line:
text = 'Found ' + keyword + ' in %s, line %s' % (f.LocalPath(), line_num)
return [output_api.PresubmitError(text)]
return [] | 2c347ef3497335ae0b33128e9a9d60c36cc65017 | 110,838 |
def split_location_string(loc):
"""
Return the parts of a location string (formerly used for a real estate unit location)
"""
return loc.capitalize().replace(", ", ",").split(",") | 33b4ced6cd33acdb7db3d583811a69ef6621fe96 | 110,839 |
def get_all_rules_for_dg(device_group, device_group_hierarchy_parent, devicegroup_objects):
"""
Per https://docs.paloaltonetworks.com/panorama/9-1/panorama-admin/panorama-overview/centralized-firewall-configuration-and-update-management/device-groups/device-group-policies
The order is: pre-rules top-down, local rules, then post-rules bottom up.
:param device_group: Device Group to get rules for
:param device_group_hierarchy_parent: dict mapping device group to parent device groups
:param devicegroup_objects:
:return: List of tuples: (device group, rule type, rule entry)
"""
dg_hierarchy = [device_group]
current_dg = device_group_hierarchy_parent.get(device_group)
while current_dg:
dg_hierarchy.append(current_dg)
current_dg = device_group_hierarchy_parent.get(current_dg)
all_rules = []
rule_type = 'SecurityPreRules'
for dg in dg_hierarchy[::-1]:
for rule in devicegroup_objects[dg][rule_type]:
all_rules += [(dg, rule_type, rule)]
# Doesn't support local rules yet
rule_type = 'SecurityPostRules'
for dg in dg_hierarchy:
for rule in devicegroup_objects[dg][rule_type]:
all_rules += [(dg, rule_type, rule)]
return all_rules | 5b3aea6c665943a79315950cf2a9ed00587f22fd | 110,840 |
from functools import wraps
def consumer(func):
"""A decorator, advances func to its first yield point when called.
Modifed this original example code from PEP 342 to use the new
functools.wraps decorator. This convenience function makes it look
like the original function, which is almost always what we want,
especially if we designed the original function to be wrapped in
the first place!
Maybe `consumer` should go into functools too!
"""
@wraps(func)
def wrapper(*args,**kw):
gen = func(*args, **kw)
gen.next()
return gen
return wrapper | 3f9ad84c69046f283caa825c03489b575184ac91 | 110,845 |
def merge_similarities(oldsims, newsims, clip=None):
"""
Merge two precomputed similarity lists, truncating the result to clip most similar items.
"""
if oldsims is None:
result = newsims or []
elif newsims is None:
result = oldsims
else:
result = sorted(oldsims + newsims, key=lambda item: -item[1])
if clip is not None:
result = result[:clip]
return result | 02f243c4a9b3f0c7032f06f62b9ae1b94c6bfa84 | 110,846 |
import asyncio
def async_run(coro):
"""
runs the specified asynchronous coroutine once in an event loop
:param coro: coroutine to run
:return: the result of the coroutine
"""
return asyncio.get_event_loop().run_until_complete(coro) | 13cf641234d20d7b9e8642170edc37bed455fdee | 110,849 |
def sort_points(point_array):
"""Return point_array sorted by leftmost first, then by slope, ascending."""
def slope(y):
"""returns the slope of the 2 points."""
x = point_array[0]
return (x.get_y() - y.get_y()) / \
(x.get_x() - y.get_x())
point_array.sort() # put leftmost first
point_array = point_array[:1] + sorted(point_array[1:], key=slope)
return point_array | c7ddc4d9e83961f6eab47e3a9c365500c9eb07be | 110,850 |
def get_location(patent_html):
"""Gets location of company associated with patent entry (dict)."""
# Grab metadata table
ass_loc = patent_html.find(text="Assignee:").find_next()
# Split tag contents so that only first assignee location is retrieved
ass_text = ass_loc.text.split('\n\n')[0].replace('\n', '')
lind = ass_text.find("(")
rind = ass_text.rfind(")")
return ass_text[lind + 1:rind] | e25fe2220b7ad8fcaa3cb09af6d9f98a5b05675b | 110,852 |
import re
def admitted_faster_to_qed(s):
"""Replaces instances of "Admitted. (* faster *)" with "Qed."
>>> admitted_faster_to_qed('asd Admitted. (* faster *)')
'asd Qed.'
>>> admitted_faster_to_qed('asd Admitted. (* faStEr*)')
'asd Qed.'
>>> admitted_faster_to_qed('asd Admitted. (* a lot faster *)')
'asd Admitted. (* a lot faster *)'
>>> admitted_faster_to_qed('asd Admitted.(* otheRwise proof tOo sLow*) asd2')
'asd Qed. asd2'
"""
result = s
result = re.sub(
r'Admitted[.]\s*[(][*]\s*(?i)faster\s*[*][)]',
'Qed.',
result)
result = re.sub(
r'Admitted[.]\s*[(][*]\s*(?i)otherwise\s*proof\s*too\s*slow[*][)]',
'Qed.',
result)
return result | 826a4f24ce43c36544043aa80c618c46309250d1 | 110,854 |
def is_full(board):
"""Checking that board is full (True) and a message about the draw or not (False), None."""
temp_str = ""
for dot_list in board:
for dot in dot_list:
temp_str += dot
if "." in temp_str:
return False, None
else:
message = '\033[35m' + "It is a draw, no one has won!!!" + '\033[0m'
return True, message | cf995cfac2d5af210cd78256a4bc8011bced7f71 | 110,857 |
from typing import OrderedDict
from typing import Counter
def ReturnFrequencies(list_of_values):
"""Gives the frequency (%) of each value in a list
args:
list_of_values: list of ints. A list of integers to count
returns:
frequency_dict: Dict. A dictionary where key corresponds with value
int the original list and value corresponds with frequency of
that value.
"""
frequency_dict = OrderedDict()
value_counts = Counter(list_of_values)
total_counts = sum(value_counts.values())
for key, value in sorted(value_counts.items()):
frequency_dict[key] = float(value) / total_counts * 100
return frequency_dict | 33f2ecd2afccce0ad2ef8e622a459437e4652b56 | 110,859 |
def format_keep_alive_packages(keep_alive_packages):
"""Render server alive count max option."""
format_str = '-o ServerAliveCountMax={}'.format
return format_str(keep_alive_packages) if keep_alive_packages else '' | cfbeff4044c79eda05694b7baa47f88f56fccc93 | 110,861 |
def join_tokens(tokens):
"""Join a list of tokens into a string
Arguments:
tokens (list) : a list of tokens
Returns:
a string with space-separated tokens
"""
return " ".join(tokens) | b62ecf26c62ddf54cc5ead88201471ecc4a7de00 | 110,867 |
def get_tag_count_data(infilename, chromosome='chr01'):
"""
Reads in tag count of a particular chromosome.
Input format is the same as that of GeneTrack/LionDB3
"""
k_idx = []
k_fwd = []
k_rev = []
for i in open(infilename):
isplit = i.split()
if isplit[0]==chromosome:
k_idx.append( int(isplit[1]))
k_fwd.append( int(isplit[2]))
k_rev.append( int(isplit[3]))
return k_idx, k_fwd, k_rev | 0c084f2d28761a899c8b16a1ffd1a64a46cb4969 | 110,874 |
def bucket_size(bucket):
"""
Returns the total number of bytes in a bucket.
"""
return sum(key.size for key in bucket.get_all_keys()) | 83701d79eb54ff01e21814d3d107c06a798e2ef4 | 110,876 |
from typing import Any
import pickle
def read(
path: str) \
-> Any:
"""
Unpickle file at `path`.
"""
with open(path, 'rb') as file:
data = pickle.load(file)
return data | 0eac0c92d2ae2a7a0f8d8b5a12f0129f9d31e71d | 110,884 |
def tuplize(what, lists=True, tuplize_none=False):
"""
If `what` is a tuple, return it as-is, otherwise put it into a tuple.
If `lists` is true, also consider lists to be tuples (the default).
If `tuplize_none` is true, a lone `None` results in an empty tuple,
otherwise it will be returned as `None` (the default).
"""
if what is None:
if tuplize_none:
return tuple()
else:
return None
if isinstance(what, tuple) or (lists and isinstance(what, list)):
return tuple(what)
else:
return (what,) | 7f7f97d11a1b1cf9fc6fe58af30ba899d7a7e927 | 110,887 |
import io
import re
import math
def mem_per_core(units='kB'):
"""returns memory per core in kb (default) or another desired unit"""
with io.open('/proc/meminfo') as f:
lines = [x for x in f.readlines() \
if re.search('MemTotal', x) is not None]
mem = float(lines[0].split()[1]) # memory in kb
with io.open('/proc/cpuinfo') as f:
lines = [x for x in f.readlines() \
if re.search('processor', x) is not None]
ncores = len(lines)
mpc = mem / ncores
if units.lower() == 'mb':
mpc *= 1e6 / 1e9 # MB to kB
return int(math.floor(mpc)) | 538ce73ac1dd3e30f10082dcbdde89f67fc5410d | 110,890 |
def filter_vec_by_graph(G, vec, nodelist):
"""
Return a subset of vec and nodelist corresponding to the nodes defined in G
"""
inds = []
nodelist_sub = []
for i, node in enumerate(nodelist):
if node in G:
inds.append(i)
nodelist_sub.append(node)
vec_sub = vec[inds]
return vec_sub, nodelist_sub | 29c741ab640944fbe115f918c9f36c604be9128b | 110,892 |
def chunkify(lst, n):
"""Chunk a big list into smaller lists, each of length n
"""
return [lst[i::n] for i in range(n)] | 723467fa5910577619bf8e455a0fe6020894f137 | 110,895 |
import math
def entropy(class_probabilities):
"""given a list of class probabilities, compute the entropy"""
return sum(-p * math.log(p, 2) for p in class_probabilities if p) | d2f9a12f31ebb51b097f602a92f23f3acba60fc9 | 110,897 |
def convert_maya_path_to_usd(path):
"""Change an absolute Maya node path into a USD SdfPath-style path.
Args:
path (str): Some path like "|group1|pSphere1|pSphereShape1".
Returns:
str: The converted USD path. e.g. "/group1/pSphere1/pSphereShape1".
"""
path = path.replace("|", "/")
return path | 9f3cb233ddb818afcf6231957940b74df421f95f | 110,901 |
import hashlib
def file_hash(path):
"""
Return hash generated from file's contents.
"""
HASH_BLOCKSIZE = 1 << 20
hash = hashlib.md5()
with open(path, 'rb') as f:
while True:
data = f.read(HASH_BLOCKSIZE)
if not data:
break
hash.update(data)
h = int(hash.hexdigest(), 16)
return h | daba9eb59b517bacd5bab4edd419c23d97fc517c | 110,904 |
def rsa_encrypt(m: int, e: int, n: int) -> int:
"""
rsa_encrypt
Calculates the ciphertext using the pure math model for RSA Encryption.
Via the following formula. c=(m**e) % N
Returns the cipher number c.
:param m: the plaintext message.
:param e: the encryption exponent.
:param n: the modulus.
:return: The ciphertext integer.
"""
c=pow(m, e, n)
return c | 45c8a34452531b2f3ac1a594f2c0f56520e9f86b | 110,905 |
import io
import base64
def image_to_base64(image):
"""
Convert a matplotlib image to a base64 png representation
Parameters
----------
image : matplotlib image object
The image to be converted.
Returns
-------
image_base64 : string
The UTF8-encoded base64 string representation of the png image.
"""
ax = image.axes
binary_buffer = io.BytesIO()
# image is saved in axes coordinates: we need to temporarily
# set the correct limits to get the correct image
lim = ax.axis()
ax.axis(image.get_extent())
image.write_png(binary_buffer)
ax.axis(lim)
binary_buffer.seek(0)
return base64.b64encode(binary_buffer.read()).decode('utf-8') | 2ff03698d94ed8065e1ad388f76e02b7b8576564 | 110,907 |
from pathlib import Path
def _check_behaviour_toml(behaviour_toml: Path) -> Path:
"""
Check the behaviour toml provided exists.
Parameters
----------
behaviour_toml: Path
Path to behaviour toml
Returns
-------
Path
Absolute Path to the toml file.
"""
if behaviour_toml.exists():
return behaviour_toml.resolve()
raise FileNotFoundError(behaviour_toml) | 767f4747d63384e5ebfded22c6d8516f509d0613 | 110,912 |
def by_hex(fg, bg = "#000000", attribute = 0):
"""
Return string with ANSI escape code for set text colors
fg: html hex code for text color
bg: html hex code for background color
attribute: use Attribute class variables
"""
# Delete #
fg = fg.replace("#", "")
bg = bg.replace("#", "")
fg_r = int(fg[0:2], 16)
fg_g = int(fg[2:4], 16)
fg_b = int(fg[4:6], 16)
fg_rgb = f"{fg_r};{fg_g};{fg_b}"
bg_r = int(bg[0:2], 16)
bg_g = int(bg[2:4], 16)
bg_b = int(bg[4:6], 16)
bg_rgb = f"{bg_r};{bg_g};{bg_b}"
return f"\033[{attribute};38;2;{fg_rgb};48;2;{bg_rgb}m" | 1936908d18989b0b8783a9dad2ed29302706a74a | 110,916 |
def supervised_depth_eigen2014a(outputs, labels):
"""
Loss for depth estimation in supervised learning
This loss is described in D. Eigen and al. (2014a)
Title : Depth Map Prediction from a Single Image using a Multi-Scale Deep Network
Link : https://arxiv.org/abs/1406.2283
IMPLEMENTATION:
---------------
:implementer: Alix Leroy
"""
# Size
batch_size, channels, height, width = labels.shape
# Number of element in an image
n = batch_size * channels * height * width
# Log distance
d = outputs.log() - labels.log()
# L2 related term (Note that a pure L2 would use torch.dist() in order to use the squared-root)
l2 = d.pow(2).sum().div(n)
# Scale invariant difference : "that credits mistakes if they are in the same direction and penalizes them if they oppose.
# Thus, an imperfect prediction will have lower error when its mistake sare consistent with one another" (cf. paper)
SID = d.sum().pow(2).div(2*(n**2))
# Final loss
L_depth = l2 - SID
return L_depth | 3019913fa252d7e026c80ae49482229d759f0856 | 110,918 |
def checksum(line):
"""Compute the TLE checksum."""
return (sum((int(c) if c.isdigit() else c == '-') for c in line[0:-1]) % 10) == int(line[-1]) | 1154d10a9c285c1a6c30a9930f97773973b26d9e | 110,919 |
def find_index(lst, key, value):
"""
Given a list of dictionaries, [{key:value,},] return index of list with key
and value.
Parameters:
lst (list): list to be searched for index
key (immutable key): key to match
value: value to match
"""
for i, dic in enumerate(lst):
if dic[key] == value:
return i
return -1 | 7c31804aeb3cca545a4043a969781ce5f54f9c07 | 110,920 |
import re
def strip_color(inp: str) -> str:
"""
Remove ANSI color/style sequences from a string. May not catch obscure codes used outside this module.
:param inp: the string to strip
:return: ``inp`` with ansi codes removed
"""
return re.sub('\x1b\\[(K|.*?m)', '', inp) | 73abed964a5e40525d5ec2f35cf29412d6eaff13 | 110,921 |
def to_hsl(color: tuple) -> tuple:
"""Transforms a color from rgb space to hsl space.
Color must be given as a 3D tuple representing a point in rgb space.
Returns a 3D tuple representing a point in the hsl space.
Saturation and luminance are given as floats representing percentages
with a precision of 2. Hue is given as an angle in degrees between
0 and 360 degrees with a precision of 0.
"""
rf = color[0] / 255
gf = color[1] / 255
bf = color[2] / 255
maximum = max(rf, gf, bf)
minimum = min(rf, gf, bf)
delta = maximum - minimum
l = (maximum + minimum) / 2
s = 0 if delta == 0 else delta / (1 - abs(2 * l - 1))
if delta == 0:
h = 0
elif maximum == rf:
h = 60 * (((gf - bf) / delta) % 6)
elif maximum == gf:
h = 60 * ((bf - rf) / delta + 2)
else: # max is bf
h = 60 * ((rf - gf) / delta + 4)
return (round(h), round(s, 2), round(l, 2)) | 725cd35cb9ae83d635ba5d2b477afdeb3c1bd37c | 110,922 |
async def hide_pids_by_ids(db, ids):
"""Get a list of hided post `id`s."""
if not ids:
return []
sql = """SELECT post_id FROM hive_posts_status
WHERE list_type = '1'
AND post_id IN :ids"""
return await db.query_col(sql, ids=tuple(ids)) | 5ebed1ab60d9158c0cea59ee3ac5a3c0307b4bb9 | 110,924 |
from typing import List
from typing import Dict
from typing import Any
def _create_action_list(
trial_list: List[List[Dict[str, Any]]]
) -> List[List[str]]:
"""Create and return the MCS scene's action list using the given trial
list from the JSON file data."""
action_list = []
for index in range(0, len(trial_list)):
# Add 1 for the EndHabituation action step at the end of the trial.
total_steps = len(trial_list[index]) + 1
print(f'Trial={index+1} Frames={len(trial_list[index])} Steps='
f'{total_steps}')
action_list.extend([['Pass']] * (total_steps - 1))
action_list.append(['EndHabituation'])
# Remove the EndHabituation action from the last test trial.
return action_list[:-1] | c5a0481deb9fd8c5694231c7a88538197cf1269b | 110,929 |
def strip_dashes(raw_arg: str) -> str:
"""Remove dashes prepended to string for arg splitting."""
while raw_arg.startswith('-'):
raw_arg = raw_arg[1:]
return raw_arg | 77de271fb0d60dbc4a00c57072413072f3a48b5c | 110,930 |
def repack(data):
"""
Recreate a nested object structure from a flat dictionary.
Example:
repack({"p.x": 1}) -> {"p": {"x": 1}}
"""
result = dict()
for key, value in data.items():
steps = key.split(".")
pointer = result
while len(steps) > 1:
if steps[0] not in pointer:
pointer[steps[0]] = dict()
pointer = pointer[steps.pop(0)]
pointer[steps[0]] = value
return result | cec35aabfe12657e737f7a53921de22390dd8581 | 110,932 |
def is_additional_rdr_table(table_id):
"""
Return True if specified table is an additional table submitted by RDR.
Currently includes pid_rid_mapping
:param table_id: identifies the table
:return: True if specified table is an additional table submitted by RDR
"""
return table_id == 'pid_rid_mapping' | 3076e54d20469a7d4af4e60e180eefdb95ef0714 | 110,933 |
def get_area_average(arr, start_row, start_col, width, height):
"""
Return average value of area. Sizes of area are in cell_size
:param arr: array
:param start_row: int
:param start_col: int
:param width: int
:param height: int
:return: float
>>> get_area_average([[[100, 100, 100], [200, 200, 200]], [[200, 200, 200], [100, 100, 100]]], 0, 0, 2, 2)
450
"""
average = 0
for row in range(start_row, start_row + height):
for col in range(start_col, start_col + height):
for i in range(3):
average += int(arr[row][col][0])
average = int(average // (width * height))
return average | 3b3d39e89313abdd430fe132cd30c5e854af245b | 110,935 |
def is_closing_char(char, state):
"""Check if closing char should be processed."""
return char == '}' and state['stack'] and state['stack'][-1] == '{' | b8bc51d54c0912054b62f3fb559209a4bb1655f3 | 110,936 |
def intersperse(lst, item):
"""
Adds the item between each item in the list.
:param lst:
:param item:
:return:
"""
result = [item] * (len(lst) * 2 - 1)
result[0::2] = lst
return result | 2f71bc90279262f238dce5429777a21f06eb39e0 | 110,937 |
def merge_dicts(*dicts, deleted_keys=[]):
"""Return a new dict from a list of dicts by repeated calls to update(). Keys in later
dicts will override those in earlier ones. *deleted_keys* is a list of keys which should be
deleted from the resulting dict.
"""
rv = {}
for d in dicts:
rv.update(d)
for k in deleted_keys:
del rv[k]
return rv | 41d17ff9e5a2e61d8ebd95ea7cdbd9735293309e | 110,942 |
def _sentence_is_in_split(sentence_index: int, split: str) -> bool:
"""Checks if sentence with given positional index is in the split.
In order to deterministically sample train/dev/test sentences, this function
assumes every 9th sentence of a treebank file belongs to the development
set, and every 10th sentence of a treebank file belongs to the test set. All
other sentences belong to the training set.
Args:
sentence_index: sequential index of the sentence in the source CoNLL-U
format treebank file (assuming first sentence has index 0).
split: treebank split (could be 'train', 'test', 'dev').
Returns:
True if sentence with given positional index belongs to the specified split.
Otherwise, returns False.
"""
if split == "train":
return sentence_index % 10 < 8
elif split == "dev":
return sentence_index % 10 == 8
elif split == "test":
return sentence_index % 10 == 9
else:
return True | 65332a87ba6fa0384816e1acd412e9ecd56ef6ad | 110,946 |
def kernel_sigma(n_kernels):
"""
get sigmas for each guassian kernel.
:param n_kernels: number of kernels(including the exact match)
:return: sigmas, a list of sigma
"""
sigmas = [0.001] # exact match small variance means exact match ?
if n_kernels == 1:
return sigmas
return sigmas + [0.1] * (n_kernels - 1) | f534a45f02bd8395600e2623f508d91c86a62278 | 110,954 |
import functools
def wraps(wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
"""
A light wrapper around functools.wraps to facilitate compatibility with
Python 2, Python 3, and numpy ufuncs.
Primary differences from Python 2's functools.wraps:
- uses try/accept for attribute reassignment (Python 3 functools.wraps
does this already)
- uses __name__ as __qualname__ if __qualname__ doesn't exist
(this helps with numpy ufuncs, which do not have a __qualname__)
References:
functools source:
https://github.com/python/cpython/blob/master/Lib/functools.py
"""
pruned_assigned = tuple(attr for attr in assigned if hasattr(wrapped, attr))
wrapper = functools.wraps(wrapped, pruned_assigned, updated)
def _wrapper(f):
_f = wrapper(f)
if '__qualname__' not in pruned_assigned and '__name__' in pruned_assigned:
_f.__qualname__ = _f.__name__
return _f
return _wrapper | 5dba5514e0d0556864b9887580909bfbf9e41281 | 110,956 |
def find_one_row(substr, df, col_name):
""" Return one row from `df`. The returned row has in `col_name` column
a value with a sub-string as `substr.
Raise KeyError if no row is found.
"""
for name in df[col_name]:
if substr.lower() in name.lower():
return df[df[col_name] == name]
raise KeyError("Could not find {} in the " "pandas dataframe.".format(substr)) | b577bc3e6e7fed7b9f110d94a38c74d6293e17e9 | 110,962 |
def get_raster_properties(dataset):
"""
Get the width, height, X size, and Y size of the dataset and return the
values in a dictionary.
*This function can be expanded to return more properties if needed*
Args:
dataset: a GDAL raster dataset to get the properties from
Returns:
dataset_dict (dictionary): a dictionary with the properties stored under relevant keys. The current list of
things returned is: width (w-e pixel resolution), height (n-s pixel resolution), XSize, YSize
"""
dataset_dict = {}
geo_transform = dataset.GetGeoTransform()
dataset_dict['width'] = float(geo_transform[1])
dataset_dict['height'] = float(geo_transform[5])
dataset_dict['x_size'] = dataset.GetRasterBand(1).XSize
dataset_dict['y_size'] = dataset.GetRasterBand(1).YSize
return dataset_dict | 034b2fb37b0801b43ea18d6cb6eb6099100c36b4 | 110,963 |
def is_consistent(x, e):
"""
Checks for consistency
(e.g. if e[n] is v, then x[n] must also be v)
"""
consistent = True
for n in x:
if n in e and x[n] != e[n]:
consistent = False
return consistent | f0622ac66d9ad1871ed08e068074a1104acba48b | 110,968 |
def bezier(start, end, control, steps):
"""Generate points in a bezier curve.
x and y coordinates should be calculated respectively."""
return [(1-s)**2*start + 2*(1-s)*s*control + s**2*end for s in steps] | 06774634098a319321c8d54b9fbdc9b6bd00c3b7 | 110,969 |
def _parse_order_by(model, order_by):
"""
This function figures out the list of orderings for the given model and
argument.
Args:
model (nautilus.BaseModel): The model to compute ordering against
order_by (list of str): the list of fields to order_by. If the field
starts with a `+` then the order is acending, if `-` descending,
if no character proceeds the field, the ordering is assumed to be
ascending.
Returns:
(list of filters): the model filters to apply to the query
"""
# the list of filters for the models
out = []
# for each attribute we have to order by
for key in order_by:
# remove any whitespace
key = key.strip()
# if the key starts with a plus
if key.startswith("+"):
# add the ascending filter to the list
out.append(getattr(model, key[1:]))
# otherwise if the key starts with a minus
elif key.startswith("-"):
# add the descending filter to the list
out.append(getattr(model, key[1:]).desc())
# otherwise the key needs the default filter
else:
# add the default filter to the list
out.append(getattr(model, key))
# returnt the list of filters
return out | 251dceb8dcf18741f008da9ee08c82459543004f | 110,970 |
def seq_join(seq, val):
"""
Flatten input sequence of lists into a single list. Insert separator
value if not None.
@param[in] seq sequence of lists
@param[in] val separator value, will be put between sub-sequences in the
resulting list if not None.
@returns resulting flattened list
"""
res = []
for item in seq[:-1]:
res.extend(item)
if val is not None:
res.append(val)
res.extend(seq[-1])
return res | 6893c6c553249412b2f00995ec6e5b52bec4a747 | 110,974 |
def padding(seq: list, max_length: int, pad_tok=None):
"""
:param seq: list to pad
:param max_length: length of padded list
:param pad_tok: token used to pad
:return: padded list
"""
return (seq + [pad_tok] * max_length)[:max_length] | 95a4b91a67a8799725ab18c1923e020e20c06ecb | 110,978 |
def change_pair_coeff(input_lines, coefficient_list):
""" Change pair coefficients of Lammps input
Coefficient list format:
- [[id1, id2, eps, sig], ...] """
pair_indices = []
for i, line in enumerate(input_lines):
if 'pair_coeff' in line:
pair_indices.append(i)
pair_lines = []
for coefficient in coefficient_list:
id1, id2, eps, sig = coefficient
pair_lines.append('pair_coeff %i %i %.3f %.3f\n' % (id1, id2, eps, sig))
new_lines = input_lines[:pair_indices[0]] + pair_lines + input_lines[pair_indices[-1] + 1:]
return new_lines | fd35038f6f54a1b80b0cde05cb4446c770e6af17 | 110,981 |
import re
def remove_markdown(text):
"""
Removes markdown formatting from the input text.
:param text: the text to cleanup
:return: the cleaned up text.
"""
# Strip URLs
text = re.sub(r"\[([^]]+)\][ ]?\(([^)]+)\)", r"\g<1>", text)
# Remove star-marked bold and italic
# We apply the regex three times to remove
# *italic*, **bold**, ***bold and italic***
text = re.sub(r"\*([^]]+)\*", r"\g<1>", text)
text = re.sub(r"\*([^]]+)\*", r"\g<1>", text)
text = re.sub(r"\*([^]]+)\*", r"\g<1>", text)
# Remove underline-marked bold and italic
text = re.sub(r"_([^]]+)_", r"\g<1>", text)
text = re.sub(r"_([^]]+)_", r"\g<1>", text)
text = re.sub(r"_([^]]+)_", r"\g<1>", text)
# Remove code
text = re.sub(r"`([^]]+)`", r"\g<1>", text)
# Remove strikethrough
text = re.sub(r"~~([^]]+)~~", r"\g<1>", text)
# Remove spoilers
text = re.sub(r">!([^]]+)!<", r"\g<1>", text)
return text | 68c0eb880ecb677dec38c74eafd6060e1db30940 | 110,991 |
def _first_occurance_binary_method(array: list, min_idx: int, match_idx: int) -> int:
"""Returns the lowest index of a given value in a list
This function is only called once the target value has been found at match_idx.
The function then uses binary search to find the first occurance of
the target value in the list"""
if (delta_idx := match_idx - min_idx) <= 0:
return match_idx
halfway_idx = delta_idx // 2 + min_idx
if array[halfway_idx] == array[match_idx]:
return _first_occurance_binary_method(array, min_idx, halfway_idx)
else:
return _first_occurance_binary_method(array, halfway_idx + 1, match_idx) | b5227d85c82e5aa1013b2d6d15e8a846d4d3ebec | 110,994 |
def build_scope(scopes: list) -> str:
"""Builds a valid scope string from list
:param scopes: list of :class:`~twitchAPI.types.AuthScope`
:rtype: str
"""
return ' '.join([s.value for s in scopes]) | 560033974f75197e1d7156c618c845204af7c1a0 | 110,997 |
def bytes_to_str(input_bytes):
"""Convert bytes to string.
"""
return input_bytes.decode() | 6ebbad997fd682d1c62d0402a0d6a284ab64de1a | 111,000 |
from typing import Any
def to_len(value: Any) -> int:
"""Get the length of a value. The value is converted to string and the
number of characters in the resulting string is returned.
Parameters
----------
value: any
Value whose length is returned.
Returns
-------
int
"""
return len(str(value)) | 9980653b8e6682019ac19a317b67b8f7f9138714 | 111,004 |
import json
import zlib
def open_file(dataset_path):
"""
Open a JSON dataset in the dataset path
Args :
- dataset_path (str) : the dataset path
Returns :
- dict : the loaded dataset
"""
try :
with open(dataset_path, 'r') as file:
return json.load(file)
except UnicodeDecodeError:
with open(dataset_path, 'rb') as file:
read_file = file.read()
return json.loads(zlib.decompress(read_file), encoding='utf-8') | 20c673e71ca8731246915379754d23eeb5be543e | 111,006 |
import pathlib
def create_missing_dir(path):
"""Creates specified directory if one doesn't exist
:param path: Directory path
:type path: str
:return: Path to directory
:rtype: str
"""
path = pathlib.Path(path)
if not path.is_dir():
path.mkdir()
return path | ff4df4ca4e5ddc6d03e0d2007eb3a8404044f900 | 111,013 |
def version_check(checked_version, min_version):
"""
Checks whether checked version is higher or equal to given min version
:param checked_version: version being checked
:param min_version: minimum allowed version
:return: True when checked version is high enough
"""
def version_transform(ver):
version = ''
vn = va = ''
stage = 0
ver = '.' + (ver or '')
for c in ver:
if c == '.':
if vn or va:
version += '{0:0>3}{1: <2}.'.format(vn, va)
vn = va = ''
stage = 0
continue
if c.isdigit():
pass
elif c.isalpha():
stage = max(1, stage)
else:
stage = max(2, stage)
if stage == 0:
vn += c
elif stage == 1:
va += c
if vn or va:
version += '{0:0>3}{1: <2}.'.format(vn, va)
return version[:-1]
if not checked_version:
return False
return version_transform(checked_version) >= version_transform(min_version) | cba2ecf00fb8f782181d5ec81d40adaf27ecc3eb | 111,016 |
def compute_window_spec_none(_, obj):
"""Helper method only used for row-based windows:
Window spec in ibis is an inclusive window bound. A bound of 0 indicates
the current row.
Window spec in Pandas indicates window size. Therefore, we must add 1
to the ibis window bound to get the expected behavior.
"""
return obj + 1 | ce3cefe53f8855aa29c3be906887f7d77b90d6a7 | 111,017 |
def occurs_count(lst,obj):
"""returns the number of times obj occurs in lst"""
count = 0
for x in lst:
if x == obj:
count += 1
return count | 4f0715de2b641c5bfafef20b3c23e433d0cd5c66 | 111,021 |
import random
def generate_number_seeds(number_nodes):
"""
Parameters
----------
number_nodes : int
The number of nodes in graph
Returns
---------
number_seeds: int
The number of seeds
"""
upper_bound = round(number_nodes * 0.5)
lower_bound = round(number_nodes * 0.1)
number_seeds = random.randint(lower_bound, upper_bound)
return number_seeds | 29f868d8cddef9eabf0ee011f5a7cbaa41a9c687 | 111,034 |
import requests
def pip_package(dep):
"""Query PyPI package information.
Sends a HTTP GET request to the PyPI remote API.
Args:
dep (str): A PyPI package name.
Raises:
A LookupError, if the connection fails or times out
A ValueError, if the package name can not be found
"""
pip_depname, pip_depver = dep.split("=", 1)
pip_api_url = "https://pypi.python.org/pypi/{}/json".format(pip_depname)
try:
response = requests.get(pip_api_url, timeout=10)
except (requests.exceptions.Timeout):
raise LookupError("PyPI API timed out: {}".format(pip_api_url))
except (requests.exceptions.ConnectionError):
raise LookupError("PyPI API Connection error: {}".format(pip_api_url))
else:
if response.status_code == 200:
return response.json()
else:
raise ValueError("Could not find pip dependency using the PyPI API: `{}`".format(dep)) | 83f84f3719f96fcf99785c4de74cccd61536d200 | 111,036 |
def autocomplete(corpus, prefix):
"""
Given a text corpus and a prefix, return a list
of all words that start with that prefix
"""
class Trie:
def __init__(self, value):
self.value = value
self.children = {}
# allows for quick access of resulting words,
# rather than having cumbersome traversal
self.words = []
if not len(prefix):
return corpus
# construct trie
root = Trie('')
for word in corpus:
cur = root
cur.words.append(word)
for char in word:
if char not in cur.children:
cur.children[char] = Trie(char)
cur = cur.children[char]
cur.words.append(word)
for char in prefix:
if char in root.children:
root = root.children[char]
else:
return []
return root.words | a913e27fe22d270bcb3883e937e42e8b82f4eeb6 | 111,038 |
import typing
def find_subtour(sol: typing.Dict[typing.Tuple[int, int], float], n: int) -> typing.List[int]:
"""
This function takes a a solution to the TSP formulation, and returns a subtour.
:param sol: a dict, each key is a two-tuple (i,j) with i < j and sol[i,j] gives the value of the variable
corresponding to the edge between i and j
:param n: the number of nodes.
:return: a subtour in the TSP formulation, starting with node 0. This returns a list of the nodes in the subtour.
"""
start_node = 0
prev_node = None
current_node = start_node
returned_to_start = False
cycle = [start_node]
while not returned_to_start:
next_node = None
for j in range(current_node + 1, n):
if sol[current_node, j] > 0.5 and j != prev_node:
next_node = j
if next_node is None:
for j in range(0, current_node):
if sol[j, current_node] > 0.5 and j != prev_node:
next_node = j
if next_node is None:
raise RuntimeError("Failed to find subtour")
elif next_node == start_node:
returned_to_start = True
else:
cycle.append(next_node)
prev_node = current_node
current_node = next_node
return cycle | 49973eeddd242e42fa5eb2b3014633f72e6f9be0 | 111,042 |
import typing
import ast
def getattribute(
entity_name: str, attributes: typing.List[str]
) -> typing.Union[ast.Name, ast.Attribute]:
"""Get object attribute.
Generates following code:
getattribute(entity_name, [attribute]) -> `entity_name.attribute`
getattribute(entity_name, [attribute, get]) -> `entity_name.attribute.get`
:param entity_name: dictionary variable name
:param attributes: list of attributes names
"""
value: typing.Union[ast.Name, ast.Attribute] = ast.Name(
id=entity_name, ctx=ast.Load()
)
for attribute in attributes:
value = ast.Attribute(
value=value,
attr=attribute,
ctx=ast.Load(),
)
return value | 1bf7cfc34dbfb1f3ba47f7485b5708f4354557fa | 111,043 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.