content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import json
def process_row(row, type):
"""
There are four different file types with different data needed for processing
:param row: list csvreader row data
:param type: string
:return: string json object to be written to queue
"""
if type == "deletes":
return json.dumps({'geonameid': row[0]})
elif type == "modifications":
return json.dumps({
'geonameid': row[0],
'name': row[1],
'asciiname': row[2],
'alternatenames': row[3],
'latitude': row[4],
'longitude': row[5],
'fclass': row[6],
'fcode': row[7],
'country': row[8],
'cc2': row[9],
'admin1': row[10],
'admin2': row[11],
'admin3': row[12],
'admin4': row[13],
'population': row[14],
'elevation': row[15],
'gtopo30': row[16],
'timezone': row[17],
'moddate': row[18]
})
elif type == "alternateNamesDeletes":
return json.dumps({'alternatenameId': row[0]})
elif type == "alternateNamesModifications":
return json.dumps({
'alternatenameId': row[0],
'geonameid': row[1],
'isoLanguage': row[2],
'alternateName': row[3],
'isPreferredName': row[4],
'isShortName': row[5],
'isColloquial': row[6],
'isHistoric': row[7]
})
raise IndexError('Invalid process type') | 90baf2b434288e69626bed173efbf984eabd9501 | 33,170 |
import gc
def list_with_attribute(classname, attributename, attributevalue):
"""Gather all instances in specified class with a particular attribute"""
my_list = []
for obj in gc.get_objects():
if isinstance(obj, classname):
if getattr(obj, attributename) == attributevalue:
my_list.append(obj.fname)
return my_list | 91ca3c2104843d78d4b993a702d2db238b027c8b | 33,171 |
import sys
def vtkGetTempDir():
"""vtkGetTempDir() -- return vtk testing temp dir
"""
tempIndex=-1;
for i in range(0, len(sys.argv)):
if sys.argv[i] == '-T' and i < len(sys.argv)-1:
tempIndex = i+1
if tempIndex != -1:
tempDir = sys.argv[tempIndex]
else:
tempDir = '.'
return tempDir | e84a7bc85012b6aea151a56f8c24565c88497285 | 33,172 |
def recursive_merge_config_dicts(config, default_config):
"""
Merge the configuration dictionary with the default configuration
dictionary to fill in any missing configuration keys.
"""
assert isinstance(config, dict)
assert isinstance(default_config, dict)
for k, v in default_config.items():
if k not in config:
config[k] = v
else:
if isinstance(v, dict):
recursive_merge_config_dicts(config[k], v)
return config | 482f0b544a6974a43794ddc0c497f7fc6dea75cb | 33,173 |
import unicodedata
def is_fullwidth(char):
"""Check if a character / grapheme sequence is fullwidth."""
return any(
unicodedata.east_asian_width(_c) in ('W', 'F')
for _c in char
) | 2c264f09c5873fc24f31de842a0191b6829997cc | 33,174 |
import torch
def stacker(parameters, selector=lambda u: u.values):
"""
Stacks the parameters and returns a n-tuple containing the mask for each parameter.
:param parameters: The parameters
:type parameters: tuple[Parameter]|list[Parameter]
:param selector: The selector
:rtype: torch.Tensor, tuple[slice]
"""
to_conc = tuple()
mask = tuple()
i = 0
for p in parameters:
if p.c_numel() < 2:
to_conc += (selector(p).unsqueeze(-1),)
slc = i
else:
to_conc += (selector(p).flatten(1),)
slc = slice(i, i + p.c_numel())
mask += (slc,)
i += p.c_numel()
return torch.cat(to_conc, dim=-1), mask | 07b1d770dbb9c0856a8bdcc9787c85d54a074a6e | 33,175 |
import math
def rise_pres(mach: float, beta:float, gamma=1.4) -> float:
"""
Calculate rise in pressure after oblique shock
Parameters
----------
mach:float
Mach number
beta: float
oblique shock angle
gamma: float
isentropic exponent
Returns
-------
mach2: float
Mach number after an oblique shock
"""
m_sin_2 = mach * mach * math.sin(beta) * math.sin(beta)
return (2.0 * gamma / (gamma + 1.0) * m_sin_2) - ((gamma-1.0) / (gamma + 1.0)) | 0845f9e047b0a372ca5b4fffd96b181d5367c82f | 33,176 |
def insert_into_queue(new_boards, boards_dict={}):
"""
Insert the boards into a dictionary and return the order in which their keys
:param new_boards: list of Board objects
:return boards_dict: dictionary of boards with key of the cost
:return queue: list of ints. It is the call order of the keys in boards_dict
"""
for new_board in new_boards:
cost = new_board.cost
if cost not in boards_dict:
boards_dict[cost] = {'boards': [], 'count': 0}
boards_dict[cost]['boards'].append(new_board)
boards_dict[cost]['count'] += 1
queue = sorted(list(boards_dict.keys()))
return boards_dict, queue | ac857f3eb35ca191489249d08846da52af7e9fcf | 33,177 |
def get_name(soup):
"""Extract the name from the given tree."""
return soup.find('h2').getText() | 46773bb657bffb7624bb82fd43a398f2a72ce24f | 33,178 |
def version_code_table() -> dict[int, tuple[str]]:
"""A dictionary that contains the code for a specific version.
Returns:
dict[int, tuple[str]]: A dictionary that contains data in the form
(version number: 3-line code tuple)
"""
table = {
7: ('000010', '011110', '100110'),
8: ('010001', '011100', '111000'),
9: ('110111', '011000', '000100'),
10: ('101001', '111110', '000000'),
11: ('001111', '111010', '111100'),
12: ('001101', '100100', '011010'),
13: ('101011', '100000', '100110'),
14: ('110101', '000110', '100010'),
15: ('010011', '000010', '011110'),
16: ('011100', '010001', '011100'),
17: ('111010', '010101', '100000'),
18: ('100100', '110011', '100100'),
19: ('000010', '110111', '011000'),
20: ('000000', '101001', '111110'),
21: ('100110', '101101', '000010'),
22: ('111000', '001011', '000110'),
23: ('011110', '001111', '111010'),
24: ('001101', '001101', '100100'),
25: ('101011', '001001', '011000'),
26: ('110101', '101111', '011100'),
27: ('010011', '101011', '100000'),
28: ('010001', '110101', '000110'),
29: ('110111', '110001', '111010'),
30: ('101001', '010111', '111110'),
31: ('001111', '010011', '000010'),
32: ('101000', '011000', '101101'),
33: ('001110', '011100', '010001'),
34: ('010000', '111010', '010101'),
35: ('110110', '111110', '101001'),
36: ('110100', '100000', '001111'),
37: ('010010', '100100', '110011'),
38: ('001100', '000010', '110111'),
39: ('101010', '000110', '001011'),
40: ('111001', '000100', '010101')
}
return table | 091685145bb8d7a44ae76c3a2208f29c01581b03 | 33,179 |
import os
import tempfile
def get_transcode_temp_directory():
"""Creates temporary folder for transcoding.
Its local, in case of farm it is 'local' to the farm machine.
Should be much faster, needs to be cleaned up later.
"""
return os.path.normpath(
tempfile.mkdtemp(prefix="op_transcoding_")
) | f6afa621b932e3687205b92bb368a21644bd1f20 | 33,180 |
from typing import OrderedDict
def process_differential_coordinates(reader):
"""
Each row is a list of 7 values
"""
data = OrderedDict()
total_rows = 0
for row in reader:
total_rows += 1
chromosome_name = row[1]
start_position = row[2]
end_position = row[3]
strand = row[4]
padj = row[5]
log2FoldChange = row[6]
key = f"{chromosome_name}{start_position}{end_position}{strand}{padj}{log2FoldChange}"
data[key] = [
chromosome_name,
start_position,
end_position,
strand,
padj,
log2FoldChange,
]
return data, total_rows | 4ce2744cfb544dbed614c8674581bb4b56ec6e3b | 33,182 |
import ast
def build_tree(script):
"""Builds an AST from a script."""
return ast.parse(script) | a37d24ce3808bfa0af3a5e8e4c1fb8a2d281625e | 33,184 |
import requests
def get_url_pages():
"""
Takes a URL from SWAPI and then return all the pages related to that category. They will be stored on a list.
"""
url = "https://swapi.co/api/people/"
pages_url = []
while True:
pages_url.append(url)
r = requests.get(url)
assert r.status_code == 200, "There was a problem connecting with SWAPI."
url = r.json()["next"] # If there are more pages to check, this will update the URL accordingly.
if url is None: # If there are no more pages to check, this finishes the function.
print("\n")
print("- - - All URLs were successfully retrieved. - - -")
return pages_url
break
print("Getting URL from page", url[-1], "...") | 75ee1d49dfbc8117c7e2e761569094a88e13d108 | 33,185 |
def get_maxima(spectrum):
"""
Crude function that returns maxima in the spectrum.
:param spectrum: tuple of frequency, intensity arrays
:return: a list of (frequency, intensity) tuples for individual maxima.
"""
res = []
for n, val in enumerate(spectrum[1][1:-2]):
index = n+1 # start at spectrum[1][1]
lastvalue = spectrum[1][index-1]
nextvalue = spectrum[1][index+1]
if lastvalue < val and nextvalue < val:
print('MAXIMUM FOUND AT: ')
print((spectrum[0][index], val))
res.append((spectrum[0][index], val))
return res | d8fec410b4a959c27ab03d7540790b85a5a1766b | 33,186 |
def indent(text, indent):
"""Indent Text.
Args:
text(str): body of text
indent(str): characters with which to indent
Returns:
text indeneted with given characters
"""
if '\n' not in text:
return "%s%s" % (indent, text)
else:
text = text.splitlines(True)
return indent.join(text) | 225575457fe308e3462c0e87181eb88a6e4da939 | 33,187 |
def distance_from_root_v1(root, key, distance=0):
"""
Distance from root is same as the level at which key is present.
:param root:
:param key:
:param distance:
:return:
"""
if root == None:
return -1
if root.key == key:
return distance
else:
ld = distance_from_root_v1(root.left, key, distance+1)
rd = distance_from_root_v1(root.right, key, distance+1)
return ld if ld > 0 else rd | ca68e4d56379d57171ca7a7e3abe073f28666d02 | 33,188 |
def create_view(klass):
"""
This is the generator function for your view. Simply pass it the class
of your view implementation (ideally a subclass of BaseView or at least
duck-type-compatible) and it will give you a function that you can
add to your urlconf.
"""
def _func(request, *args, **kwargs):
"""
Constructed function that actually creates and executes your view
instance.
"""
view_instance = klass(request, *args, **kwargs)
response = view_instance(request, *args, **kwargs)
after = getattr(view_instance, '__after__', None)
if after is None:
return response
else:
return view_instance.__after__(response)
setattr(_func, '_class', klass)
return _func | aebdc98483f19e64fd71952b93f945d277a6c16b | 33,189 |
def ukr_E(X, B):
"""UKR reconstruction error."""
E = ((X - B.T.dot(X))**2).sum() / B.shape[0] # (Frobenius norm)^2
return E | bd807018e162bdbb99cfe3d297daa0b5272aefa2 | 33,190 |
import sys
def connectBodyWithJoint(model, parentFrame, childFrame, jointName, jointType):
"""Connect a childFrame on a Body to a parentFrame (on another Body or Ground)
in the model using a Joint of the specified type.
Arguments:
model: model to be modified.
parentFrame: the Body (or affixed offset) to be connected as the parent frame;
any PhysicalFrame already in the model is suitable.
childFrame: the Body (or affixed offset) to be connected as the child frame;
can be any PhysicalFrame that is not the parent Frame.
jointName: name to be given to the newly-created Joint.
jointType is one of:
'PinJoint', 'FreeJoint', 'WeldJoint', 'PlanarJoint', 'SliderJoint',
'UniversalJoint'
returns the Joint added to connect the Body to the model
"""
validJointTypes = [
'PinJoint',
'FreeJoint',
'WeldJoint',
'PlanarJoint',
'SliderJoint',
'UniversalJoint',
]
if not jointType in validJointTypes:
raise Exception('Provided jointType %s is not valid.' %
jointType)
module = sys.modules['org.opensim.modeling']
JointClass = getattr(module, jointType)
# Instantiate the user-requested Joint class.
joint = JointClass(jointName, parentFrame, childFrame)
model.addJoint(joint)
return joint | 6c1316e32e1f37d721aa2680b8fee8725f729ea4 | 33,192 |
def min_dist(q, dist):
"""
Returns the node with the smallest distance in q.
Implemented to keep the main algorithm clean.
"""
min_node = None
for node in q:
if min_node == None:
min_node = node
elif dist[node] < dist[min_node]:
min_node = node
return min_node | 99c4e868748598a44f79ee3cb876d7ebc3abae08 | 33,193 |
def _should_profile_development_default():
"""Default to enabling in development if this function isn't overridden.
Can be overridden in appengine_config.py"""
return True | 01b72de25feff42c0cfd6c474296ca339745727d | 33,194 |
def filter_localization_probability(df, threshold=0.75):
"""
Remove rows with a localization probability below 0.75
Return a ``DataFrame`` where the rows with a value < `threshold` (default 0.75) in column 'Localization prob' are removed.
Filters data to remove poorly localized peptides (non Class-I by default).
:param df: Pandas ``DataFrame``
:param threshold: Cut-off below which rows are discarded (default 0.75)
:return: Pandas ``DataFrame``
"""
df = df.copy()
localization_probability_mask = df['Localization prob'].values >= threshold
return df.iloc[localization_probability_mask, :] | 7036c730db4c0a649aa25bcf59a962f89ce2710c | 33,195 |
def mean_riders_for_max_station(ridership):
"""
Fill in this function to find the station with the maximum riders on the
first day, then return the mean riders per day for that station. Also
return the mean ridership overall for comparsion.
Hint: NumPy's argmax() function might be useful:
http://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html
"""
overall_mean = ridership.mean()
max_station = ridership[0, :].argmax()
mean_for_max = ridership[:, max_station].mean()
return overall_mean, mean_for_max | 936f6a8f7ac2c1cebc1c53516963253c9c51ee29 | 33,196 |
def _read_first_line(file_path):
"""
Returns the first line of a file.
"""
with open(file_path, 'r') as file_:
first_line = file_.readline().strip()
return first_line | 035f22e1e35aa2f945d6ee6d8435d44fee17cc01 | 33,197 |
def _month_year_to_tuple(month_year: str) -> tuple[int, int]: # Month, Year
"""
Parses user-preferred combination of Month/Year string and returns tuple
:param month_year: string Month/Year combination space separated (i.e. 10 2021)
:return: tuple[int, int] Month, Year
"""
m, y = month_year.split(" ")
return int(m), int(y) | 202d7863767374fc043204be4588443f6296d3e9 | 33,198 |
from typing import Optional
from typing import Dict
def _create_param_name_prefix(enclosing_param_name: Optional[str],
prefix_dictionary: Optional[Dict]) -> str:
"""
Create a param name variant to de-conflict conflicting params
:param enclosing_param_name: The name of the enclosing class's parameter
:param prefix_dictionary: If the user sets __nested_prefixes__ in their Params class
declaration, they can manually override any prefix for a nested class or specify not to use
one by specifying None.
Ex.
```
class A(Params):
x = LinspaceParam(expand=True, ...)
class B(Params):
x = LinspaceParam(expand=True, ...)
class C(Params):
a = A()
b = B()
```
will result in a_start, a_stop, a_num, b_start, b_stop, and b_num as command line parameters
because there's a conflict with x in both A and B.
The enclosing param names for x are "a" and "b" in this example.
Ex 2.
```
class A(Params):
x = LinspaceParam(expand=True, ...)
class B(Params):
x = LinspaceParam(expand=True, ...)
class C(Params):
__nested_prefixes__ = {'a': 'ayy', 'b': None}
a = A()
b = B()
```
will result in ayy_start, ayy_stop, ayy_num, start, stop, and num as command line
parameters.
"""
if prefix_dictionary is not None and enclosing_param_name is not None:
prefix = prefix_dictionary.get(enclosing_param_name, enclosing_param_name)
return prefix if prefix is not None else ''
return enclosing_param_name + '_' if enclosing_param_name is not None else '' | 68b179bbea40dc044d949eee5067f64250005392 | 33,199 |
import os
def fetch_genomes(target_genus_species, db_base=None):
"""
Use rsync to manage periodic updates
Examples:
>>> fetch_genomes("Escherichia coli")
>>>
>>> fetch_genomes("Klebsiella pneumoniae", "/home/me/dbs/")
:param target_genus_species: the genus species as a string
(space delimited)
:returns: the database location
"""
working_dir = os.getcwd()
if db_base is not None:
os.chdir(db_base)
target_genus_species = target_genus_species.replace(" ", "_")
if not os.path.exists(target_genus_species):
os.mkdir(target_genus_species)
os.chdir(target_genus_species)
cmd = ("rsync -av ftp.ncbi.nlm.nih.gov::genomes/Bacteria/"
"%s_*/*.gbk .") % (target_genus_species)
db_loc = os.getcwd()
os.system(cmd)
os.chdir(working_dir)
return db_loc | 1bf00a478856d706126e58fccc70a7e7533d216c | 33,200 |
import os
import logging
def get_logger(name):
"""
Special get_logger. Typically name is the name of the application using Balsa.
:param name: name of the logger to get, which is usually the application name. Optionally it can be a python file
name or path (e.g. __file__).
:return: the logger for the logger name
"""
# if name is a python file, or a path to a python file, extract the module name
if name.endswith(".py"):
name = name[:-3]
if os.sep in name:
name = name.split(os.sep)[-1]
return logging.getLogger(name) | 387e8ad50beb329f43b78121a197b8a600580a09 | 33,203 |
def _parse_cubehelix_args(argstr):
"""Turn stringified cubehelix params into args/kwargs."""
if argstr.startswith("ch:"):
argstr = argstr[3:]
if argstr.endswith("_r"):
reverse = True
argstr = argstr[:-2]
else:
reverse = False
if not argstr:
return [], {"reverse": reverse}
all_args = argstr.split(",")
args = [float(a.strip(" ")) for a in all_args if "=" not in a]
kwargs = [a.split("=") for a in all_args if "=" in a]
kwargs = {k.strip(" "): float(v.strip(" ")) for k, v in kwargs}
kwarg_map = dict(
s="start", r="rot", g="gamma",
h="hue", l="light", d="dark", # noqa: E741
)
kwargs = {kwarg_map.get(k, k): v for k, v in kwargs.items()}
if reverse:
kwargs["reverse"] = True
return args, kwargs | a7c7405a3a668e6b7925aa02a1fafc9e5e774f41 | 33,204 |
import re
def rm_custom_emoji(text):
"""
絵文字IDは読み上げないようにする
:param text: オリジナルのテキスト
:return: 絵文字IDを除去したテキスト
"""
pattern = r'<:[a-zA-Z0-9_]+?:>'
return re.sub(pattern, '', text) | ac0a1cfb59868c6bcd96541f5bcc75de966d8a93 | 33,206 |
def open_views_for_file(window, file_name):
"""Return all views for the given file name."""
view = window.find_open_file(file_name)
if view is None:
return []
return [v for v in window.views() if v.buffer_id() == view.buffer_id()] | d265e42b99b38606646e0f87b456c8b44d6fcb8c | 33,207 |
def get_bond_type_counts(bond_types):
""" Returns a count on the number of bonds of each type. """
count = {}
for bt in bond_types:
if bt in count.keys():
count[bt] += 1
else:
count[bt] = 1
return count | 80835f800de7e7cb59f8c0dd05779ef7fab6eba6 | 33,211 |
def encoded_capacity(wmb, data, num_bits):
"""
Returns the total capacity in bits of the bitmask + data
:param wmb: weight mask bits -- bit vectors to indicate non-zero values in original weight matrix M
:param data: Data corresponding to the non-zero elements of matrix M
:param num_bits: number of bits per data value to use
"""
return (data.size()[0]*num_bits + wmb.size()[0]) | 08442eb84c303962ea538995c6920842aa4ff1b3 | 33,213 |
def raise_power(num, pow):
"""Raise the number to the given power"""
# Negative powers are 1 over the positive version
if pow < 0:
result = raise_power(num, -pow)
return 1 / result
# Base cases
if pow == 0:
return 1
if pow == 1:
return num
# Recurse and multiply until reaching 1
return num * raise_power(num, pow - 1) | 038efd037e19fa22149d866beccc0b4bdd27d448 | 33,214 |
def partition_train_validation_test(data):
"""
Partition a dataset into a training set, a validation set and a testing set
:param data: input dataset
:return: training, validation and testing set
"""
# 60% : modulus is 0, 1, 2, 3, 4, or 5
data_train = [item for item in data if item['hash'] % 10 <= 5]
# 20% : modulus is 6 or 7
data_valid = [item for item in data if item['hash'] % 10 in [6, 7]]
# 20% : modulus is 8 or 9
data_test = [item for item in data if item['hash'] % 10 in [8, 9]]
return data_train, data_valid, data_test | 4defd18e0dc5ddeb7309b274b707676516901acb | 33,218 |
import os
import requests
def weather():
"""Returns weather in Zagazig, Egypt"""
location_key = 127335 # Zagazig location key
url = f"http://dataservice.accuweather.com/forecasts/v1/hourly/1hour/{location_key}"
parameters = {"apikey": os.environ.get("ACCUWEATHER"), "metric": True}
data = requests.get(url, params=parameters).json()[0]
temperature = data["Temperature"]["Value"]
atm_status = data["IconPhrase"]
location = "Zagazig, Egypt"
return f"Weather is {atm_status} in {location}.\nAnd it currently feels like {temperature} °C" | 042fa1b613b19f4273721c8f6c9ade7f28c7a401 | 33,219 |
def remove_key_values(dictionary, keys=['self', '__class__']):
"""
Removes key values from dictionary
"""
new_dict = dictionary
for key in keys:
del new_dict[key]
return new_dict | 3e0ed376bb4b00623ffabd2c0fc107d5409b6ef1 | 33,220 |
def solution(n: int = 600851475143) -> int:
"""
Returns the largest prime factor of a given number n.
>>> solution(13195)
29
>>> solution(10)
5
>>> solution(17)
17
>>> solution(3.4)
3
>>> solution(0)
Traceback (most recent call last):
...
ValueError: Parameter n must be greater than or equal to one.
>>> solution(-17)
Traceback (most recent call last):
...
ValueError: Parameter n must be greater than or equal to one.
>>> solution([])
Traceback (most recent call last):
...
TypeError: Parameter n must be int or castable to int.
>>> solution("asd")
Traceback (most recent call last):
...
TypeError: Parameter n must be int or castable to int.
"""
try:
n = int(n)
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int.")
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one.")
prime = 1
i = 2
while i * i <= n:
while n % i == 0:
prime = i
n //= i
i += 1
if n > 1:
prime = n
return int(prime) | 2c081a09422ef35c81a6173df6e5cf032cf425e1 | 33,221 |
def interpolate_line(x1, y1, x2, y2):
"""
This functions accepts two points (passed in as four arguments)
and returns the function of the line which passes through the points.
Args:
x1 (float): x-value of point 1
y1 (float): y-value of point 1
x2 (float): x-value of point 2
y2 (float): y-value of point 2
Returns:
callable: the function of the line
"""
if x1 == x2:
raise ValueError("x1 and x2 must be different values")
def f(x):
slope = (y2 - y1) / (x2 - x1)
return slope * (x - x1) + y1
return f | 71b26e50fb21f22333df7b20ddacf7bc376789cc | 33,222 |
def dir_names(dirrepo, panel_id):
"""Defines structure of subdirectories in calibration repository.
"""
dir_panel = '%s/%s' % (dirrepo, panel_id)
dir_offset = '%s/offset' % dir_panel
dir_peds = '%s/pedestals' % dir_panel
dir_plots = '%s/plots' % dir_panel
dir_work = '%s/work' % dir_panel
dir_gain = '%s/gain' % dir_panel
dir_rms = '%s/rms' % dir_panel
dir_status = '%s/status' % dir_panel
return dir_panel, dir_offset, dir_peds, dir_plots, dir_work, dir_gain, dir_rms, dir_status | fee6d54795593af4e84497310aae5549d2579c68 | 33,223 |
import argparse
def arg_parsing():
"""
Parses keyword arguments from the command line to customise the training
process.
returns: Namespace with various strings, ints, floats and bools.
"""
# Create parser
parser = argparse.ArgumentParser(description="Neural Network Settings")
# Add architecture selection to parser
parser.add_argument('--arch',
type=str,
help='Choose architecture from torchvision.models as str')
# Add checkpoint directory to parser
parser.add_argument('--save_dir',
type=str,
help='Define save directory for checkpoints as str. If not specified then model will be lost.')
# Add hyperparameter tuning to parser
parser.add_argument('--learning_rate',
type=float,
help='Define gradient descent learning rate as float')
parser.add_argument('--hidden_units',
type=int,
help='Hidden units for DNN classifier as int')
parser.add_argument('--epochs',
type=int,
help='Number of epochs for training as int')
# Add GPU Option to parser
parser.add_argument('--gpu',
action="store_true",
help='Use GPU + Cuda for calculations')
# Parse args
args = parser.parse_args()
return args | f2bccafa9bbc5c451d546c6e49ce3eaca2a025e4 | 33,224 |
import os
def get_fileset(path):
""" Construieste lista de fisiere din directorul dat si din subdirectoare,
cu data ultimei moficari """
fileset = dict()
for root, _, files in list(os.walk(path)):
if not os.path.islink(root):
for fname in files:
cfil = os.path.join(os.path.relpath(root, path), fname)
fileset[cfil] = int(os.path.getmtime(os.path.join(path, cfil)))
return fileset | 9f979743476e7d880788b59da3464a365729d9c9 | 33,225 |
def nth_even(n):
"""get's the nth even number."""
even_numbers = []
m = 0
while len(even_numbers) != n:
even_numbers.append(m)
m += 2
return even_numbers[-1] | 0d06509e8ac6696b745bf2fd48bd21c92498a124 | 33,226 |
def is_sequence(item):
"""Whether input item is a tuple or list."""
return isinstance(item, (list, tuple)) | f00d191f68994043dbab0c483a8af58d102a2a11 | 33,227 |
def gamma_update(tau, delta=1):
""" VBEM update of the parameter of the variational Dirichlet distribution"""
return delta + tau.sum(axis=0) | 4e430bce68f3aa08fc1d7074e2a59c53378410ed | 33,228 |
from typing import Any
from typing import Mapping
def value_in_context(value: Any, context: Mapping) -> Any:
"""Evaluate something in the context of an object being constructed."""
try:
return value.evaluate_in(context)
except AttributeError:
return value | e8037a1d23b266ac02125f2b25fc4dbbf059b3b5 | 33,229 |
def remove_at(bar, pos):
"""Remove the NoteContainer after pos in the Bar."""
for i in range(len(bar) - pos):
bar.remove_last_entry()
return bar | 3814c2d825d5aca8815e62e398a940149f287835 | 33,230 |
def calc_gof(resdf, simvar, datavar):
"""Calculate goodness-of-fit measures for given sim & data vars"""
resdf.loc['error'] = abs(resdf.loc[simvar] - resdf.loc[datavar])
maeom = resdf.loc['error'].mean()/resdf.loc[datavar].mean()
mape = (resdf.loc['error']/resdf.loc[datavar]).mean()
r2 = (resdf.loc[simvar].corr(resdf.loc[datavar])) ** 2
return maeom, mape, r2 | bf7f290fb6012b09df56c12d137db8914c75a253 | 33,232 |
import argparse
def get_input_args_predict():
"""
INPUTS:
None
OUTPUT:
parse_args(): a data structure that stores the command line arguments object for the predict script
"""
# Creates the parser
parser = argparse.ArgumentParser()
# We create 5 command lines argument, to cover all our cases
# The first argument is where we can find the directory where our image is. It is a required argument
parser.add_argument('path', type=str,
help='path to the image we want to submit to our image classifier')
# The second argument points to a checkpoint we want to load
parser.add_argument('checkpoint', action='store', help='Checkpoint we want to load')
# The third optional argument enables choosing how many classes we want to return
parser.add_argument('--top_k', action='store', default=5, type=int,
help='Choose how many predicted classes we want to return')
# The fourth optional argument enables providing a mapping from the category indices to their names
parser.add_argument('--category_names', action='store',
default='', help='The mapping from index to category names')
# The fifth positional argument allows setting a True or False value to the GPU variable,
# in order to choose whether the model will be trained on a GPU or not
parser.add_argument('--gpu', action='store_true', default=False, dest='gpu',
help='By default, the model will not be trained on a GPU ; if this argument is present it will')
return parser.parse_args() | 95a5ffad24f909ad5ce3ea66ea77436b70cca9b8 | 33,233 |
def fc_params(in_features: int, out_features: int, bias: bool = True):
"""
Return the number of parameters in a linear layer.
Args:
in_features: Size of input vector.
out_features: Size of output vector.
bias: If true count bias too.
Returns:
The number of parameters.
"""
m = out_features + 1 if bias else out_features
return in_features * m | 0138ae52f101975aba2fd6f38452471a09a2c8e1 | 33,234 |
def schedule_extraction(informations):
"""
今月の予定情報(bs4.element.Tag)が入ったlistをぶち込むと
{2('date'): {'date': 2,
'yobi': '土曜日',
'yotei': [{'text': '🎍《22:00》朝までバンドリ!TV 2021', 'url_link': 'https://bang-dream.com/news/1095'},
{'text': '📻【第14回】Afterglowの夕焼けSTUDIO+', 'url_link': 'https://hibiki-radio.jp/description/Afterglowplus/detail'}]},
}
みたいな感じの形式で返してくれる
"""
calendar_yotei = {}
for info in informations:
# 日付と曜日情報
day = info.find(class_="calendarCellDate").get_text()
date = int(day[:-3])
yobi = day[-3:]
# 予定情報
yotei = []
for link in info.find(class_="calendarCellContent").find_all("a"):
text,url_link = link.get_text(),link.get("href")
yotei.append({"text":text, "url_link":url_link})
calendar_yotei[date] = {"date":date, "yobi":yobi, "yotei":yotei}
return calendar_yotei | d14d62168295d47f3eaca02ad431e74fcec91d52 | 33,236 |
def _to_float(maybe_float):
"""Safe cast to float."""
try:
return float(maybe_float)
except ValueError:
return maybe_float | fa46e030d83dd879b09949d310092cca3c75abca | 33,237 |
def get_fuel_cost_part2(x1, x2):
"""Get the fuel that would cost move from x1 to x2, when:
Moving from each change of 1 step in horizontal position costs 1 more unit of fuel than the last. This can be easily
computed as a triangular number/binomial coefficient.
"""
steps = abs(x1-x2)
return steps*(steps+1)/2 | 16a975c6ba9bcc9af9a85c857d86e7436b80863b | 33,238 |
def get_sim_num():
"""
Get number of simulations to run and validates input.
:return: none
"""
while True:
simulations = input('Type the number of simulations you want '
'to run.\nThe higher the number, the more '
'accurate the numbers will be: ')
try:
simulations = int(simulations)
if simulations > 0:
return simulations
else:
print('Please type a positive number.\n')
except ValueError:
print('Please type a number.\n') | 2ce3addfdd44a765131474275848b63732a2916c | 33,239 |
def azimu_half(degrees: float) -> float:
"""
Transform azimuth from 180-360 range to range 0-180.
:param degrees: Degrees in range 0 - 360
:return: Degrees in range 0 - 180
"""
if degrees >= 180:
degrees = degrees - 180
return degrees | 068f1e059a64f0070d927cee05777a15bc22b01a | 33,240 |
import threading
import os
def _temp_file_name():
"""Construct a temporary file name for each thread."""
f_name = 'local-{}.temp'.format(threading.get_ident())
return os.path.join(os.path.sep, 'tmp', f_name) | 6ce8b62ad90702eadd95501d49f36c385827842e | 33,241 |
def join_data(data1, data2, f):
"""Simple use of numpy functions vstack and hstack even if data not a tuple
Args:
data1 (arr): array or None to be in front of
data2 (arr): tuple of arrays to join to data1
f: vstack or hstack from numpy
Returns:
Joined data with provided method.
"""
if isinstance(data2, tuple):
data2 = f(data2)
if data1 is not None:
data2 = f((data1, data2))
return data2 | d823e4751ded4f2c5ad3c174034d3260503fa841 | 33,242 |
def get_stack_name(environment, service_name):
"""
This should be what ef-open templates call "ENV", that is, the short
name for the environment.
"""
environment = environment.split('.', 1)[0]
return '{}-{}'.format(environment, service_name) | 4704aae7b9f4ae7fb69b6dac161c3bf8d7f44723 | 33,243 |
def crit(evals, atol=1e-2):
"""Keep the non-zero eigenvalues.
Assumes eigenvalues to be sorted in ascending order.
Args:
evals (torch.Tensor): Eigenvalues.
atol (float): Cutoff value. Smaller evals will not be considered.
Returns:
[int]: Indices of non-zero eigenvalues.
"""
return [idx for idx, v in enumerate(evals) if v > atol] | cb30ad8b3bdbfa83aae9c8445cf11f357e96ef2e | 33,244 |
def askNbPlayer():
"""ask for the number of player
Returns:
number: the number of player 1(IA) or 2
"""
nb = 0
try:
userResponse = input("Combien de joueur 1 ou 2: ")
nbPlayer = int(userResponse)
if not (0 < nbPlayer <= 2):
raise(ValueError('wrong number'))
return nbPlayer
except ValueError:
print('\nErreur - Vous devez entrer un nombre entre 1 et 2\n')
return askNbPlayer() | 7a09e85c58404d86c59a651b66cff98b62a4a00a | 33,245 |
import torch
def update_affine_param( cur_af, last_af): # A2(A1*x+b1) + b2 = A2A1*x + A2*b1+b2
"""
update the current affine parameter A2 based on last affine parameter A1
A2(A1*x+b1) + b2 = A2A1*x + A2*b1+b2, results in the composed affine parameter A3=(A2A1, A2*b1+b2)
:param cur_af: current affine parameter
:param last_af: last affine parameter
:return: composed affine parameter A3
"""
cur_af = cur_af.view(cur_af.shape[0], 4, 3)
last_af = last_af.view(last_af.shape[0],4,3)
updated_af = torch.zeros_like(cur_af.data).to(cur_af.device)
dim =3
updated_af[:,:3,:] = torch.matmul(cur_af[:,:3,:],last_af[:,:3,:])
updated_af[:,3,:] = cur_af[:,3,:] + torch.squeeze(torch.matmul(cur_af[:,:3,:], torch.transpose(last_af[:,3:,:],1,2)),2)
updated_af = updated_af.contiguous().view(cur_af.shape[0],-1)
return updated_af | e9a5127ebec4cb404b419446a17b8e949a4d8c04 | 33,246 |
def is_empty(list):
"""Determine if empty."""
return not list | 9659f43b2724c1e0f3186be8229e903f3f4fec3c | 33,247 |
import math
def perfect_square(N):
"""
Helper function to check whether a number is a perfect square
:return: True if number is a perfect square, False otherwise
:rtype: bool
"""
if int(math.sqrt(N))*int(math.sqrt(N))==N:
return True
return False | 8ef6d02b2b9f07857a5549695c627e2d2327cb98 | 33,248 |
import itertools
def generate_combo(samples, sample_spaces):
"""
First, generate combinations of i samples from the corresponding sample_spaces.
Next, generate the cross product between the elements in the previously generated list.
Finally, filter out the original combination from the result.
"""
combos = []
for sample, sample_space in zip(samples, sample_spaces):
combos_one_sample = [list(tup) for tup in itertools.combinations(sample_space, len(sample))]
combos.append(combos_one_sample)
# cross product of the elements in the list:
combos = [list(tup) for tup in itertools.product(*combos)]
# don't add the original samples
combos = filter(lambda x: x != samples, combos)
return combos | 663a933a3dd1331a4264e6a259d4c60738361b59 | 33,249 |
from typing import Dict
from typing import Any
def _make_serving_version(service: str, version: str) -> Dict[str, Any]:
"""Creates description of one serving version in API response."""
return {
'split': {
'allocations': {
version: 1,
}
},
'id': service
} | a75fa6bdc03ee67d6f4bb6714d94c489072cfa66 | 33,250 |
def signum(x):
"""Sign of `x`: ``-1 if x<0, 0 if x=0, +1 if x>0``."""
if x < 0:
return -1
elif 0 < x:
return +1
else:
return 0 | 858f49fe9d271b18e3a20d157bc793e613b6d73a | 33,251 |
import fnmatch
import os
def find_file(path, pattern):
"""Wrapper for fnmatch.filter, with additional filecheck"""
return [ os.path.join(path,filename) for filename in fnmatch.filter(os.listdir(path),pattern)
if os.path.isfile(os.path.join(path,filename)) ] | 815964103f7285989cf839a59198a7a218945be1 | 33,252 |
def detxy2kxy(xdet, ydet, xstart, ystart, x0, y0, fx, fy, xstep, ystep):
"""
Conversion from detector coordinates (xd, yd) to momentum coordinates (kx, ky).
**Parameters**\n
xdet, ydet: numeric, numeric
Pixel coordinates in the detector coordinate system.
xstart, ystart: numeric, numeric
The starting pixel number in the detector coordinate system
along the x and y axes used in the binning.
x0, y0: numeric, numeric
The center pixel position in binned image used in calibration.
fx, fy: numeric, numeric
Scaling factor along the x and y axes (in binned image).
xstep, ystep: numeric, numeric
Binning step size along x and y directions.
"""
xdet0 = xstart + xstep * x0
ydet0 = ystart + ystep * y0
kx = fx * ((xdet - xdet0) / xstep)
ky = fy * ((ydet - ydet0) / ystep)
return (kx, ky) | 44d353a5c5b5cabeb5a4b9aba8b4a07fc6a3ac2c | 33,254 |
def get_item_from_dict_by_key(dict_name,
search_term,
search_in,
return_content_of="item"):
"""
Return all items in a dict with a certain field match.
It will normally return the content of the field
'item' which is expected to contain a Q-item.
It is, however, possible to overwrite the name
of the field whose contents should be returned.
@param dict_name: the dictionary to look in
@pram search_term: the value to match
@param search_in: the field in which to look for matching value
@param return_content_of: the field whose content to return
"""
results = []
matches = [x for x in dict_name if x[search_in] == search_term]
if len(matches) == 0:
return []
else:
for match in matches:
results.append(match[return_content_of])
return results | e142ad6a017834bc2e3200d5c5350e2bea06c919 | 33,259 |
def bounding_box2D(pts):
"""
bounding box for the points pts
"""
dim = len(pts[0]) # should be 2
bb_min = [min([t[i] for t in pts]) for i in range(dim)]
bb_max = [max([t[i] for t in pts]) for i in range(dim)]
return bb_min[0], bb_min[1], bb_max[0] - bb_min[0], bb_max[1] - bb_min[1] | 904a567ab962273e22b6e869fde393d23744eed0 | 33,263 |
from typing import Union
from typing import List
from pathlib import Path
from typing import Dict
from typing import Counter
def recursive_open_and_count_search_terms(
terms: Union[str, List[str]], folder: Union[str, Path]
) -> Dict[str, int]:
"""Recursively open files in a folder, and count given search terms.
Args:
terms: A string of list of strings denoting the search terms.
folder: A folder containing files to recursively open and count for search
terms.
Returns:
A dictionary where each key-value pair contains each search term, and its
associated count.
"""
# Set terms to a list, and get all files in `folder` recursively
terms = terms if isinstance(terms, List) else [terms]
all_files = [f for f in Path(folder).rglob("*") if f.is_file()]
# Initialise a counter, then iterate through the files counting all terms
word_count = Counter()
for file in all_files:
try:
with open(file, encoding="utf-8") as f:
word_count.update(Counter(f.read().split()))
if word_count.get("cookiecutter.using_R"):
print(file)
except UnicodeDecodeError:
continue
return {t: word_count.get(t, 0) for t in terms} | ad25ce04b7010b88a1a975ce37658e8c7a04611a | 33,264 |
def has_been_replied_by_bot(comment, me):
"""Checks if a comment has been replied already by the bot. It checks if any the replies on the comment has been made by the
reddit bot
"""
# Checking if the parent comment has been made by the bot.
if comment.author == me:
return True
# Getting the comment replies
comment.refresh()
for reply in comment.replies.list():
if reply.author == me:
return True
# If *True* hasn't been returned to this point. It means that nor the parent comment nor any of the replies was made by the
# bot, so we return False
return False | 6beb866adefeede829d9f3c9f9564e6e8ce5036c | 33,266 |
def functionexample(INDIR, path, fname, OUTDIR, ARGS, intranet):
"""
Description
-----------
Example of the function to perform in each file.
Opens each file.
"""
f = open(INDIR + "/" + path + fname)
f.close()
return intranet | 5bc17897970ac824c70b6f3963f76a035dab06fa | 33,267 |
from typing import List
import itertools
def permutations(raw: str) -> List[str]:
"""Return a list of all unique permutations of a given input string.
In case of an empty string (`''`) a list with an empty string will be returned (`['']`).
Parameters
----------
raw: str
Input string from which the permutations are being generated
Returns
-------
permutations: List[str]
A list of permutation strings based on the input string
"""
return [*set(''.join(tup) for tup in itertools.permutations(raw))] | 7de84dde64431722fe25b170c6769e9eabc57858 | 33,268 |
def get_colour(temperature: float) -> tuple[float, float, float, float]:
"""Get colour from temperature.
Args:
temperature (float): Temperature in range [-1, 1]
Return
tuple: RGBA tuple
"""
# blending constant
colour_blend = 0.85
alpha_blend = 0.9
# if temperature < 0, the colour is blue
if temperature < 0:
channel = -temperature * colour_blend + (1 - colour_blend)
alpha = -temperature * alpha_blend + (1 - alpha_blend)
return (0, 0, channel, alpha)
# otherwise, the colour is red
channel = temperature * colour_blend + (1 - colour_blend)
alpha = temperature * alpha_blend + (1 - alpha_blend)
return (channel, 0, 0, alpha) | dff04ed5023885b127b44632a2f5d49441e8b68e | 33,269 |
def pep440_from_semver(semver):
"""Convert semantic version to PEP440 compliant version."""
segment = ''
if semver.prerelease:
segment = '.dev{}'.format('.'.join(semver.prerelease))
local_version = '.'.join(semver.build)
local_version = local_version.replace('-', '.')
version_str = '{}.{}.{}{}'.format(semver.major, semver.minor, semver.patch, segment)
# Include the local version if we are not a true release
if local_version and semver.prerelease:
version_str = '{}+{}'.format(version_str, local_version)
return version_str | e3a8acb4dc0b83011fdfb76283ceb5fd928cda1c | 33,271 |
def has_feature(td, feature):
"""
Checks for feature in DISTRO_FEATURES or IMAGE_FEATURES.
"""
if (feature in td.get('DISTRO_FEATURES', '') or
feature in td.get('IMAGE_FEATURES', '')):
return True
return False | b438b3b84a134c0fbe0c38decd8caedbd1e1087c | 33,272 |
def is_xml_file(f):
"""Tries to guess if the file is a BNC xml file"""
f.seek(0)
line = f.readline().strip()
while line == '':
line = f.readline().strip()
if line.find('<bncDoc ') != -1:
return True
else:
return False | e54318fc5679cc0709db0924f0444be347e7089a | 33,273 |
import os
def which(executable_name, env_var='PATH'):
"""Equivalent to ``which executable_name`` in a *nix environment.
Will return ``None`` if ``executable_name`` cannot be found in ``env_var``
or if ``env_var`` is not set. Otherwise will return the first match in
``env_var``.
Note: this function will likely not work on Windows.
Code taken and modified from:
http://www.velocityreviews.com/forums/
t689526-python-library-call-equivalent-to-which-command.html
"""
exec_fp = None
if env_var in os.environ:
paths = os.environ[env_var]
for path in paths.split(os.pathsep):
curr_exec_fp = os.path.join(path, executable_name)
if os.access(curr_exec_fp, os.X_OK):
exec_fp = curr_exec_fp
break
return exec_fp | ea446540a7f7f3ea64b5060900f0c57b349f81f2 | 33,274 |
def possible_negation_suffix(text: str) -> bool:
"""
Checks if the texts contains a possible negation suffix
:param text: string containing a token
:return: True if the texts ends with a possible negation suffix, False if not
"""
suffixes = ("less",)
# length is mentioned so it doesn't consider "less" as containing the suffix
return text.endswith(suffixes) and len(text) >= 5 | 0cb8c2f81d29e6b836c2b4a2da2198613bb894cd | 33,275 |
import logging
from pathlib import Path
def get_logging(filename):
"""
:return: the stdlib logging module configured with niceties
"""
logging.basicConfig(
level=logging.INFO,
filename=Path(".") / f"{filename}.log",
filemode="a",
format="%(asctime)s %(levelname)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
return logging | 85ea5bd13f1043d2d3a346f85702cadf51bc375d | 33,276 |
def count_object_methods(obj:object):
"""
get the number of callable object methods
Args:
obj (object): any object
Returns:
int: the number of object class methods
"""
return len([k for k,v in obj.__dict__.items() if callable(v)]) | 20fa1375441b30e119181b25c4d001604d5a6796 | 33,277 |
def t(s):
"""Force Windows line endings to Unix line endings."""
return s.replace("\r\n", "\n") | 399ee6bccb2207afb79a0a8f74330be541baea6e | 33,278 |
def xml_prompt():
"""Prompts user for PubMed XML file."""
try:
prompt = input("Please enter the name of the PubMed XML file: ")
fhand = open(prompt)
return fhand
except IOError:
print("Please ensure that you spelled the file name correctly and that you're in the right directory.") | a4e88e9a77a28bb5afb670c3310f65a4156a2308 | 33,279 |
from typing import OrderedDict
def linear_set_generator(random, args):
"""
Generates a list continuous values of the size of a representation.
This function requires that a bounder is defined on the EvolutionaryAlgorithm.
See Also
--------
inspyred.ec
Parameters
----------
random : Random
args : dict
representation: set containing the possible values
max_candidate_size: int, default: 9
variable_candidate_size: bool, default: True
Returns
-------
list
A list containing tuples - sample of the elements and linear value.
If variable_candidate_size is True the list size is up to max_candidate_size,
otherwise the candidate size equals candidate_size
"""
bounder = args.get("_ec").bounder
representation = args.get('representation')
max_size = args.get('max_size', 9)
variable_size = args.get('variable_size', True)
if variable_size:
size = random.randint(1, max_size)
else:
size = max_size
indices = random.sample(range(len(representation)), size)
values = random.uniform(next(bounder.lower_bound), next(bounder.upper_bound), len(indices))
return OrderedDict({i: v for i, v in zip(indices, values)}) | 39fef7e79d83d6c281e290e4387829d6f3343410 | 33,280 |
import textwrap
def reindent(content, indent):
"""
Reindent a string to the given number of spaces.
"""
content = textwrap.dedent(content)
lines = []
for line in content.split('\n'):
lines.append(indent + line)
return '\n'.join(lines) | b93b59a286e65eff286492315ad35f73ea86a1bd | 33,282 |
def factorial(n: int) -> int:
"""Calculates the factorial of n
Args:
n (int): n > 0
Returns:
int: factorial of n
"""
print(n)
if n == 1:
return 1
return n * factorial((n - 1)) | d1a621d0081e1e14c272ca5fa058f3d66e28b097 | 33,283 |
import random
def mergeUnfairRanking(_px, _sensitive_idx, _fprob): # input is the ranking
"""
Generate a fair ranking.
Attributes:
_px: input ranking (sorted), list of ids
_sensitive_idx: the index of protected group in the input ranking
_fprob: probability to choose the protected group
Return: generated fair ranking, list of ids
"""
# _px=sorted(range(len(_inputrankingscore)), key=lambda k: _inputrankingscore[k],reverse=True)
rx = [x for x in _px if x not in _sensitive_idx]
qx = [x for x in _px if x in _sensitive_idx]
rx.reverse() # prepare for pop function to get the first element
qx.reverse()
res_list = []
while (len(qx) > 0 and len(rx) > 0):
r_cur = random.random()
# r_cur=random.uniform(0,1.1)
if r_cur < _fprob:
res_list.append(qx.pop()) # insert protected group first
else:
res_list.append(rx.pop())
if len(qx) > 0:
qx.reverse()
res_list = res_list + qx
if len(rx) > 0:
rx.reverse()
res_list = res_list + rx
if len(res_list) < len(_px):
print("Error!")
return res_list | 072f28bea5a17cfc1df859bb5182708197014402 | 33,284 |
import six
def construct_getatt(node):
"""
Reconstruct !GetAtt into a list
"""
if isinstance(node.value, six.text_type):
return node.value.split(".", 1)
elif isinstance(node.value, list):
return [s.value for s in node.value]
else:
raise ValueError("Unexpected node type: {}".format(type(node.value))) | 180d3f6ffba403213daf7b61b0381bcf758591df | 33,285 |
def distanceSquared(a,b):
"""Squared L2 distance"""
if len(a)!=len(b): raise RuntimeError('Vector dimensions not equal')
sum=0
for i in range(len(a)):
sum = sum + (a[i]-b[i])*(a[i]-b[i])
return sum | c1875902fba462d2c9a822d66acb1dc2be27c7b8 | 33,287 |
def flatten_nested_covariates(covariate_definitions):
"""
Some covariates (e.g `categorised_as`) can define their own internal
covariates which are used for calculating the column value but don't appear
in the final output. Here we pull all these internal covariates out (which
may be recursively nested) and assemble a flat list of covariates, adding a
`hidden` flag to their arguments to indicate whether or not they belong in
the final output
We also check for any name clashes among covariates. (In future we could
rewrite the names of internal covariates to avoid this but for now we just
throw an error.)
"""
flattened = {}
hidden = set()
items = list(covariate_definitions.items())
while items:
name, (query_type, query_args) = items.pop(0)
if "extra_columns" in query_args:
query_args = query_args.copy()
# Pull out the extra columns
extra_columns = query_args.pop("extra_columns")
# Stick the query back on the stack
items.insert(0, (name, (query_type, query_args)))
# Mark the extra columns as hidden
hidden.update(extra_columns.keys())
# Add them to the start of the list of items to be processed
items[:0] = extra_columns.items()
else:
if name in flattened:
raise ValueError(f"Duplicate columns named '{name}'")
flattened[name] = (query_type, query_args)
for name, (query_type, query_args) in flattened.items():
query_args["hidden"] = name in hidden
return flattened | 8f9d92ba1c7baa1a0d6c7153d2bf2b577d49ff70 | 33,288 |
def numeric_to_record(n_field):
"""
Check if the field has a value other then zero.
:param str_field_to_check:
:return:
"""
if n_field == 0:
return None
else:
return n_field | 11f33852c351b432f02380ec62cd1091759f1a17 | 33,289 |
def get_field_name(data, original_key, alternative_value):
"""
check which column name used in the BioSamples record, if both provided column names not found, return ''
:param data: one BioSamples record in JSON format retrieved from API
:param original_key: field name to be checked
:param alternative_value: alternative field name to be checked
:return: either original key or alternative value if found in the data, if not found return ''
"""
# biosamples record always has characteristics section
if original_key not in data['characteristics']:
if alternative_value in data['characteristics']:
return alternative_value
else:
return ''
return original_key | 07b9f01e5d1e0fe58a654c4ea55287a744cd291f | 33,290 |
def fib(n):
"""Use the slow, recursive formula to calculate Fibonacci numbers.
Arguments:
n {int} -- Calculate the nth Fibonacci number.
Returns:
int -- The resulting Fibonacci number.
"""
if n == 0 or n == 1:
return n
return fib(n - 1) + fib(n - 2) | 58ca55ffd7e17153d9f75a223912076f1cb8fb52 | 33,291 |
def generate_tikz_foot(tikzpic=True):
"""
generates end of tex-file
:param tikzpic: if True include end{tikzpicture} before end{document}
:return: tex-foot
"""
if tikzpic:
tikzCode = '''
\\end{tikzpicture}
\\end{document}'''
else:
tikzCode = '''
\\end{document}
'''
return tikzCode | efac23b349cce2f9031cfa2eb5edbb48d47047fd | 33,292 |
def remove_quotes(val):
"""Helper that removes surrounding quotes from strings."""
if val[0] in ('"', "'") and val[0] == val[-1]:
val = val[1:-1]
return val | 11a8c26e5b261e75a08ae9b11d9c4e39f07da4c3 | 33,293 |
def characters_count(sorted_string):
"""Count only the characters (not numbers)
occurrences in the string"""
result = []
if sorted_string:
unique_characters = set(sorted_string)
unique_characters = sorted(unique_characters)
for c in unique_characters:
result.append({c: sorted_string.count(c)})
return result | 536b9b1a0cf422df0e606191a02dd36f64e6828d | 33,294 |
def basic_name_formatter(name):
"""Basic formmater turning '_' in ' ' and capitalising.
"""
return name.replace('_', ' ').capitalize() | 4b87af7df7bfc23c8c63d958e29a4fe614780fa8 | 33,295 |
def no_inference(csp, var, assignment, removals):
""" If we do not implement an inference algorithm, just return that everything is ok."""
return True | 6bea7afca66a73a5236ef7667e857b746b4c95a4 | 33,297 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.