content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def oucru_gender_pregnant_correction(tidy, verbose=10):
"""Ensure that all pregnant patients have appropriate gender.
Parameters
----------
tidy: pd.DataFrame
DataFrame with patients data
verbose: int
Verbosity level
Returns
-------
pd.DataFrame
The corrected DataFrame
"""
if verbose > 5:
print("Applying... gender_pregnant_correction.")
if 'pregnant' in tidy:
if not 'gender' in tidy:
tidy['gender'] = None
tidy.loc[tidy['pregnant'], 'gender'] = 'Female'
return tidy | 30c339f4206213d02d795582746f4bcbbf08b8a6 | 120,895 |
def operation(operator):
"""
A utility function used in order to determine the operation corresponding
to the operator that is given in string format
"""
if operator == '+':
return lambda a, b: a + b
elif operator == '-':
return lambda a, b: a - b
elif operator == '*':
return lambda a, b: a * b
elif operator == '/':
return lambda a, b: a / b
else:
return None | 7f3739e986c91f5e13ad686d9e5888de0eadcc3d | 120,896 |
def add_leading_gaps(t_aligned, s_aligned, i, j):
"""Add the leading gaps to alignments."""
for _ in range(i):
t_aligned = t_aligned[:0] + '-' + t_aligned[0:]
for _ in range(j):
s_aligned = s_aligned[:0] + '-' + s_aligned[0:] # pragma: no cover
return s_aligned, t_aligned | 7f99664509e46db4ee3b11a87f2eab7a3101b757 | 120,897 |
def sign(integer: int, /) -> int:
"""
Get the sign of `integer`
E.g:
>>> sign(1234)
1
>>> sign(-1234)
-1
>>> sign(0)
0
"""
if integer == 0:
return integer
return integer // abs(integer) | 77081701c05f6705a77772fe971347c74513db3b | 120,908 |
def find_last_digit(k):
"""Determines the last digit in a base 10 integer."""
return k%10 | f18d1bca686fa4ca092a840e7728058da7d7990a | 120,909 |
def _get_build_name(package):
"""Generate the name passed to Fortify."""
return "statick-fortify-{}".format(package.name) | f8e565732a9ba720e02db0353ba7a12ca9d754aa | 120,910 |
def get_average_fluxden(input_spec, dispersion, width=10, redshift=0):
"""
Calculate the average flux density of a spectrum in a window centered at
the specified dispersion and with a given width.
The central dispersion and width can be specified in the rest-frame and
then are redshifted to the observed frame using the redsh keyword argument.
Warning: this function currently only works for spectra in wavelength
units. For spectra in frequency units the conversion to rest-frame will
be incorrect.
:param input_spec: Input spectrum
:type input_spec: sculptor.speconed.SpecOneD
:param dispersion: Central dispersion
:type dispersion: float
:param width: Width of the dispersion window
:type width: float
:param redshift: Redshift argument to redshift the dispersion window into
the observed frame.
:type redshift: float
:return: Average flux density
:rtype: astropy.units.Quantity
"""
disp_range = [(dispersion - width / 2.)*(1.+redshift),
(dispersion + width / 2.)*(1.+redshift)]
unit = input_spec.fluxden_unit
return input_spec.average_fluxden(dispersion_range=disp_range) * unit | a8a968d903248ce744ce7324788576eee5c34f5a | 120,911 |
def ntc_dic_to_ratio_dic(ntc_dic,
perc=False):
"""
Given a dictionary of nucleotide counts, return dictionary of nucleotide
ratios (count / total nucleotide number).
perc:
If True, make percentages out of ratios (*100).
>>> ntc_dic = {'A': 5, 'C': 2, 'G': 2, 'T': 1}
>>> ntc_dic_to_ratio_dic(ntc_dic)
{'A': 0.5, 'C': 0.2, 'G': 0.2, 'T': 0.1}
"""
assert ntc_dic, "given dictionary ntc_dic empty"
# Get total number.
total_n = 0
for nt in ntc_dic:
total_n += ntc_dic[nt]
ntr_dic = {}
for nt in ntc_dic:
ntc = ntc_dic[nt]
ntr = ntc / total_n
if perc:
ntr = ntr*100
ntr_dic[nt] = ntr
return ntr_dic | 5944f515950ad0bef0112a70ebac58ee9c2ecdf6 | 120,913 |
import re
def _clean_sort_regex(string):
"""Clean non-alphanumeric characteres using regular expression and sort into a list."""
return sorted(re.sub(r"\W", "", string).casefold()) | 8bc7a1cb58f5962b7f550dc49fa6303be12d1b39 | 120,914 |
from typing import List
def bit_list_to_int(bitList: List[bool]) -> int:
"""Converts a binary list to an integer
Args:
bitList: the binary list to convert
Returns:
The integer corresponding to the input bit list
"""
out = 0
for b in reversed(bitList):
out = (out << 1) | b
return out | 3bd4feb65b985678ccb03861a55e0736f8dc1af7 | 120,915 |
import random
def random_password(length, printable):
"""
Provides a Random String of the given length.
:param printable: The string for Random String
:param int length: The length of the password to generate.
"""
return "".join([random.choice(printable) for x in range(int(length))]) | c75dd63fcb7d74cd7eff4255c510a4ec897f9b89 | 120,916 |
def guess_header_format(handle):
"""Guess the header format.
:arg stream handle: Open readable handle to an NGS data file.
:return str: Either 'normal', 'x' or 'unknown'.
"""
if handle.seekable():
line = handle.readline().strip('\n')
handle.seek(0)
else:
line = str(handle.buffer.peek(1024)).split('\n')[0]
if line.count('#') == 1 and line.split('#')[1].count('/') == 1:
return 'normal'
if line.count(' ') == 1 and line.split(' ')[1].count(':') == 3:
return 'x'
return 'unknown' | 4b6275b29b583f8f5306acbcedbe5d037fce4e36 | 120,921 |
def read_file( file_path ):
"""
Read data from file_path
Return non-None on success
Return None on error.
"""
try:
fd = open( file_path, "r" )
except:
return None
buf = None
try:
buf = fd.read()
except:
fd.close()
return None
try:
fd.close()
except:
return None
return buf | f8cf065b6796793b76d62dabbcf5b667f61f3d95 | 120,923 |
from typing import Union
import torch
from typing import Sequence
from typing import Iterator
def count_tensor_list_size(tensor_list: Union[torch.Tensor, Sequence, Iterator], format="byte"):
"""Approximately count the size of a list of tensors in terms of bytes."""
if torch.is_tensor(tensor_list):
tensor_list = [tensor_list]
_bytes = 4 * sum([t.numel() for t in tensor_list])
if format == "byte":
return _bytes
elif format == "kb":
return _bytes / 1024
elif format == "mb":
return _bytes / 1024 ** 2
elif format == "gb":
return _bytes / 1024 ** 3
else:
raise ValueError(f"Unknown format: {format}") | e941a293bdd9958486acfcee58b3e29663be4314 | 120,925 |
def _find_subclass_with_field_name(schema, subclass_sets, parent_class_name, field_name):
"""Find a subclass that has a field with a given name, or return None if none exists."""
subclasses = subclass_sets[parent_class_name]
if parent_class_name not in subclasses:
raise AssertionError(
"Found a class that is not a subclass of itself, this means that the "
"subclass_sets value is incorrectly constructed: {} {} {}".format(
parent_class_name, subclasses, subclass_sets
)
)
for subclass_name in subclasses:
class_object = schema.get_type(subclass_name)
if field_name in class_object.fields:
# Found a match!
return subclass_name
# No match.
return None | f95e59d4aee1eb6124a35ab52a9fdcccc0caa465 | 120,927 |
def is_callable(x):
"""Return `True` if :attr:`x` is callable.
"""
try:
_is_callable = callable(x)
except: # pylint: disable=bare-except
_is_callable = hasattr(x, '__call__')
return _is_callable | e7b8aaaf8ad6f304740a3e3d182909c24c0e39d7 | 120,933 |
def lex_less_Nd(a, b):
"""Determine if array 'a' lexicographically less than 'b'."""
for _a, _b in zip(a, b):
if _a < _b:
return True
elif _a > _b:
return False
return False | e73a6de2d4f1a0e3797c564a23c2ae0dbed3f101 | 120,934 |
import math
def ang2slp(ang):
""" convert an angle in degrees to a slope """
return math.tan(math.radians(ang)) | a1b535e1db20a2c5f619e847fc5cce248de3cfb2 | 120,935 |
def getFeatureNames(lines):
"""
Get feature names from the first line of the examples
"""
cols = [item.split(':')[0] for item in lines[0].strip().split()[1:]]
return cols | 2a897dbb95748902b9e12f7a55b0be8d470d441c | 120,936 |
def get_fullurl_from_abbrev(name):
"""return the full url address from a abrev name
"""
return "http://www.aitaotu.com/guonei/%s.html" % name
pass | 426300c73cb7ce5bcea0f5c27ed7f6faa41fd78d | 120,937 |
import collections
def count_packages(budgets, all_manifests):
"""Returns packages that are missing, or present in multiple budgets."""
package_count = collections.Counter(
package for budget in budgets for package in budget["packages"])
more_than_once = [
package for package, count in package_count.most_common() if count > 1
]
zero = [package for package in all_manifests if package_count[package] == 0]
return more_than_once, zero | 124b034a007a3048233f27ed09a76d5e684b2688 | 120,938 |
def calculateInfectiousOverTime(ts, infectiousStates):
"""
Create a list of the number of infectious people over time
:param ts: pandas dataframe with the entire outbreak timeseries
:param infectiousStates: compartments considered infectious
"""
return ts[ts.state.isin(infectiousStates)].groupby("date").sum().total.to_list() | add846cca81e165081104fb4eb8d990209f0ac6d | 120,942 |
import re
def archive_groups(inp):
"""Create a list of related Let's Encrypt archive-like certificate files.
For a given input that is a list of archive-like certificate files
(e.g. 'cert1.pem', 'cert2.pem' etc.), return a list of lists, where the
inner lists are those files grouped by the number in the file name.
For example, for an input list:
[ cert1.pem, chain1.pem, cert2.pem, chain2.pem, cert3.pem ],
return:
[ [cert1.pem, chain1.pem], [cert2.pem, chain2.pem], [cert3.pem] ].
If the input is a list of live-like files (e.g. 'cert.pem', 'chain.pem'
etc.), where there is no number in the file name, then return an empty
list.
Args:
inp (list(pathlib.Path)): list of absolute paths of files in a
common directory (usually a Let's Encrypt live or archive folder,
but can be any folder).
Returns:
list(list(pathlib.Path)): list of lists if the input, grouped by the
number in the file name.
"""
nums = []
for c in inp:
m = re.match(r'\w+(\d+)\.pem$', c.name)
if m:
if m.group(1) not in nums:
nums += [ m.group(1) ]
return [ [ c for c in inp if re.match(r'\w+{}\.pem$'.format(n), c.name) ]
for n in nums ] | 7eac9b7ea3cdeecc6cfbd6db3367e0a8badc1fa7 | 120,945 |
import functools
def has_default_decorator(decorator_factory):
"""A meta-decorator for decorator factories.
This decorator of decorators allows a decorator factory to be used
as a normal decorator (without calling syntax).
A single non-keyword argument (with no keyword arguments) will be
considered the object to be decorated. If more than one argument
is passed, or any keyword arguments are passed, all arguments will
be passed to the decorator factory.
To treat a single argument as an argument to the decorator factory,
pass the keyword-only argument "lonely_argument=True". It will
default to False.
"""
@functools.wraps(decorator_factory)
def wrapper(*args, **kwargs):
single_argument = kwargs.pop("lonely_argument", False)
if not single_argument and len(args) == 1:
return decorator_factory()(*args)
return decorator_factory(*args, **kwargs)
return wrapper | 0a20270c02880445e880f395c9422ac5c6ab25d3 | 120,957 |
from torch import randperm
from torch._utils import _accumulate
def split_datasource(datasource, lengths):
"""
Split a datasource into non-overlapping new datasources of given lengths
"""
if sum(lengths) != len(datasource):
raise ValueError("Sum of input lengths does not equal the length of the input datasource")
indices = randperm(sum(lengths)).tolist()
return [[datasource[i] for i in indices[offset - length: offset]]
for offset, length in zip(_accumulate(lengths), lengths)] | 2bc5ab8cbc7677309545669ec136b70ae2e57fc9 | 120,962 |
def get_source_pos(snirf):
"""Returns a list of the position of each source in the SNIRF file."""
pos_arr = snirf["nirs"]["probe"]["sourcePos3D"][:]
return [pos_arr[n] for n in range(pos_arr.shape[0])] | 592d4e2710f0e584c1b321b7dbf657806c07da44 | 120,967 |
def intstr_to_intlist(string):
"""Given a string e.g. '311 9 1334 635 6192 56 639', returns as a list of integers"""
return [int(s) for s in string.split()] | 315a76d0ac216ba5fac2d3e688a72d761b383291 | 120,968 |
def _Backward3b_P_hs(h, s):
"""Backward equation for region 3b, P=f(h,s)
Parameters
----------
h : float
Specific enthalpy [kJ/kg]
s : float
Specific entropy [kJ/kgK]
Returns
-------
P : float
Pressure [MPa]
References
----------
IAPWS, Revised Supplementary Release on Backward Equations p(h,s) for
Region 3, Equations as a Function of h and s for the Region Boundaries, and
an Equation Tsat(h,s) for Region 4 of the IAPWS Industrial Formulation 1997
for the Thermodynamic Properties of Water and Steam,
http://www.iapws.org/relguide/Supp-phs3-2014.pdf. Eq 1
Examples
--------
>>> _Backward3b_P_hs(2400,4.7)
63.63924887
>>> _Backward3b_P_hs(2600,5.1)
34.34999263
>>> _Backward3b_P_hs(2700,5.0)
88.39043281
"""
I = [-12, -12, -12, -12, -12, -10, -10, -10, -10, -8, -8, -6, -6, -6, -6,
-5, -4, -4, -4, -3, -3, -3, -3, -2, -2, -1, 0, 2, 2, 5, 6, 8, 10, 14,
14]
J = [2, 10, 12, 14, 20, 2, 10, 14, 18, 2, 8, 2, 6, 7, 8, 10, 4, 5, 8, 1, 3,
5, 6, 0, 1, 0, 3, 0, 1, 0, 1, 1, 1, 3, 7]
n = [0.125244360717979e-12, -0.126599322553713e-1, 0.506878030140626e1,
0.317847171154202e2, -0.391041161399932e6, -0.975733406392044e-10,
-0.186312419488279e2, 0.510973543414101e3, 0.373847005822362e6,
0.299804024666572e-7, 0.200544393820342e2, -0.498030487662829e-5,
-0.102301806360030e2, 0.552819126990325e2, -0.206211367510878e3,
-0.794012232324823e4, 0.782248472028153e1, -0.586544326902468e2,
0.355073647696481e4, -0.115303107290162e-3, -0.175092403171802e1,
0.257981687748160e3, -0.727048374179467e3, 0.121644822609198e-3,
0.393137871762692e-1, 0.704181005909296e-2, -0.829108200698110e2,
-0.265178818131250, 0.137531682453991e2, -0.522394090753046e2,
0.240556298941048e4, -0.227361631268929e5, 0.890746343932567e5,
-0.239234565822486e8, 0.568795808129714e10]
nu = h/2800
sigma = s/5.3
suma = 0
for i, j, ni in zip(I, J, n):
suma += ni * (nu-0.681)**i * (sigma-0.792)**j
return 16.6/suma | 8030d63893af23c3954f2b6982063693be2fb94c | 120,970 |
def get_line_mode(day_range):
"""
Returns "line" if day range is less than a
certain threshold, otherwise "spline". This
prevents the charts from doing spline interpolation
when heavily zoomed-in, which looks really bad.
"""
if day_range[1] - day_range[0] > 90:
return "spline"
return "line" | 4cec9455fbdc8caddc09e40f9786b986678f9478 | 120,973 |
import string
import random
def generate_filename(size=10, chars=string.ascii_uppercase + string.digits, extension='png'):
"""Creates random filename
Args:
size: length of the filename part, without dot and extention
chars: character range to draw random characters from
extension: extension to be added to the returned filenam
Returns:
random filame with extension
"""
filename = ''.join(random.choice(chars) for _ in range(size))
return filename + '.' + extension | 171c635f849b262894bb09749c1f22a76133dacc | 120,974 |
def scm_url_schemes(terse=False):
"""
Definition of URL schemes supported by both frontend and scheduler.
NOTE: only git URLs in the following formats are supported atm:
git://
git+http://
git+https://
git+rsync://
http://
https://
file://
:param terse=False: Whether to return terse list of unique URL schemes
even without the "://".
"""
scm_types = {
"git": (
"git://",
"git+http://",
"git+https://",
"git+rsync://",
"http://",
"https://",
"file://",
)
}
if not terse:
return scm_types
else:
scheme_list = []
for scm_type, scm_schemes in scm_types.items():
scheme_list.extend([scheme[:-3] for scheme in scm_schemes])
return list(set(scheme_list)) | 66ac5e29643c59ee5432e6d1d11e47b64fa76fad | 120,976 |
import math
def simpFDI(temp, humid, wind, df):
"""
Description
-----------
Calculates the Macarthur FDI via a simple formula.
Parameters
----------
temp : float
humid : float
wind : float
df : integer
Returns
-------
FDI : float
"""
return 2*(math.exp((0.987*math.log(df+0.001))-.45-(.0345*humid)+(.0338*temp)+(.0234*wind))) | 7d07cb29da0184da8facfa890959c79d46f73473 | 120,978 |
import requests
def getVideoStats(self, id):
"""Returns stats of a video."""
res = requests.get(self.baseUrl + "api/v1/stats", params={"id": id})
return res.json() | d5b5af7548e20e90f2cf65292c6dfee81f14b45d | 120,979 |
def group_with_decoys(peptides, proteins):
"""Retrieve the protein group in the case where the FASTA has decoys.
Parameters
----------
peptides : pandas.DataFrame
The peptide dataframe.
proteins : a Proteins object
Returns
-------
pandas.Series
The protein group for each peptide.
"""
return peptides["stripped sequence"].map(proteins.peptide_map.get) | 60b9ae08c294eaec922f6a740d5af3e9c959b105 | 120,980 |
def format_detections(num_images, pre_format_dict):
"""Dict{List} to List[Dict]"""
detections = []
keys = pre_format_dict.keys()
for v in pre_format_dict.values(): assert len(v) == num_images
for i in range(num_images):
item = dict()
for k in keys:
item.update({k: pre_format_dict[k][i]})
detections.append(item)
return detections | 6d2b21a69d093db8a18550664a0094bf8a59ec2e | 120,981 |
def prep_data(data):
"""
Method to drop the following columns,
- is_reliable, since that's the y-values we aim to predict
- lat, lng and vic coordinates since we're already getting the env data from those
:param data: dataset to be prepped for balancing
:return: dataset and the reliability values
"""
return (
data.drop(columns=["is_reliable", "longitude", "latitude", "vic_x", "vic_y"]),
data.is_reliable,
) | 06738d6786c51879833d444f1a82d1481dfbbb7e | 120,982 |
from typing import OrderedDict
def _unflatten(dico):
"""Unflatten a flattened dictionary into a nested dictionary."""
new_dico = OrderedDict()
for full_k, v in dico.items():
full_k = full_k.split('.')
node = new_dico
for k in full_k[:-1]:
if k.startswith('[') and k.endswith(']'):
k = int(k[1:-1])
if k not in node:
node[k] = OrderedDict()
node = node[k]
node[full_k[-1]] = v
return new_dico | c5e26f558260882a839e518e48044ccf6aa18d1a | 120,987 |
def get_mode(size, numbers):
"""
Gets the mode for N elements in an array
Input:
- number(array[float]): list of element to calculate the mode
Returns:
- res(float): mode of the elements
"""
mode = -1
count = 0
count_max = 0
prev = numbers[0]
for element in numbers:
if element == prev:
count += 1
prev = element
mode = element
if count > count_max:
count_max = count
mode = element
else:
count = 1
return mode | 3cab77bcf8a70cff75165cf16db811699ea6f6dd | 120,989 |
def HasProperty(entity, property_name):
"""Returns whether `entity` has a property by the provided name."""
return property_name in entity._properties # pylint: disable=protected-access | 0ce2c7fed31777b5d2e269096fe4d3d6d04169fe | 120,994 |
from collections import ChainMap
def sorted_union_of_datas(*dicts):
"""Returns the union of the dicts, sorted (reversed) by keys"""
data_src = ChainMap(*dicts)
return {k: data_src[k] for k in sorted(data_src)[::-1]} | 386aeafdd4e819619cc435d883d90a10626aef13 | 120,997 |
def env_vars_from_env_config(env_config):
"""Generate a dict suitable for updating os.environ to reflect app config.
This function only returns a dict and does not update os.environ directly.
Args:
env_config: The app configuration as generated by
vmconfig.BuildVmAppengineEnvConfig()
Returns:
A dict of strings suitable for e.g. `os.environ.update(values)`.
"""
return {
'SERVER_SOFTWARE': env_config.server_software,
'APPENGINE_RUNTIME': 'python27',
'APPLICATION_ID': '%s~%s' % (env_config.partition, env_config.appid),
'INSTANCE_ID': env_config.instance,
'BACKEND_ID': env_config.major_version,
'CURRENT_MODULE_ID': env_config.module,
'CURRENT_VERSION_ID': '%s.%s' % (env_config.major_version,
env_config.minor_version),
'DEFAULT_TICKET': env_config.default_ticket,
} | db6a84f6689cbd900e9086c300e9fa2f66b1d496 | 120,998 |
import typing
import shutil
def executable_is_available(
executable_name: typing.Union[str, typing.List[str], typing.Tuple[str]]
) -> bool:
"""Check if an executable is available in execution environment.
:param executable_name: List or Tuple, or single command name
:return: `True` if the exec if found, `False` otherwize
"""
if isinstance(executable_name, (list, tuple)):
for _exec_name in executable_name:
print("_exec_name =", _exec_name)
if shutil.which(_exec_name) is not None:
return True
return False
return shutil.which(executable_name) is not None | 848da40d5bdc211759ad573464f61cb4ca5e31bb | 121,004 |
def personal_top_three(scores):
"""
Get the top 3 scores.
:param scores list - List of scores
:return list - Best 3 scores.
"""
scores.sort(reverse=True)
topc = 3
top = []
if len(scores) < topc:
topc = len(scores)
for i in range(0, topc):
top.append(scores[i])
return top | 15ea8b023e4f714e505fb4fcaabe7e3da1cfa359 | 121,014 |
import re
def format_msg(msg):
"""
Format a message from verbose to delete extra spaces and carriage return.
Args:
msg (str): Message to format.
Returns:
str: Formatted message.
"""
return re.sub("\n\s+", " ", msg) | a75c0918b95c4825bc8442e96f033f72062c2f16 | 121,018 |
def check_multistage_dockerfile(dfobj):
"""Given a dockerfile object, return the index(es) of FROM line(s)
in the dfobj structure."""
from_lines = []
for idx, st in enumerate(dfobj.structure):
if st['instruction'] == 'FROM':
from_lines.append(idx)
return from_lines | 614c8556126acc3155b1f1600677c43c67603bb3 | 121,019 |
def _resample_params(N, samples):
"""Decide whether to do permutations or random resampling
Parameters
----------
N : int
Number of observations.
samples : int
``samples`` parameter (number of resampling iterations, or < 0 to
sample all permutations).
Returns
-------
actual_n_samples : int
Adapted number of resamplings that will be done.
samples_param : int
Samples parameter for the resample function (-1 to do all permutations,
otherwise same as n_samples).
"""
n_perm = 2 ** N
if n_perm - 1 <= samples:
samples = -1
if samples < 0:
n_samples = n_perm - 1
else:
n_samples = samples
return n_samples, samples | f1493aa3409cef023d05c9e74951d59f481b11bc | 121,021 |
def dsname(exp='xpptut15', run='0001') :
"""Returns (str) control file name, e.g. 'exp=xpptut15:run=1' for (str) exp and (str) of (int) run
"""
if isinstance(run, str) : return 'exp=%s:run=%s' % (exp, run.lstrip('0'))
elif isinstance(run, int) : return 'exp=%s:run=%d' % (exp, run)
else : return None | d7401aa1cf4708cb01dab9d0eb9aff1c137c8bcd | 121,025 |
def get_Q_max_hs(q_GU_rtd):
"""1時間当たりの熱源機の最大暖房出力 (22)
Args:
q_GU_rtd(float): ガスユニットの定格暖房能力 (W)
Returns:
float: 1時間当たりの熱源機の最大暖房出力 (MJ/h)
"""
Q_max_hs = q_GU_rtd * 3600 * 10 ** (-6)
return Q_max_hs | 69cdccd2782437891152d952021c1109b0c7f75b | 121,028 |
import torch
def get_mesh_grid(image_height, image_width, image_channels=3):
"""Generates 2D and 3D mesh grid of image indices.
Works with non square image shapes based on image height and width.
For image channels set to 1 it generates values from -1 to 1 based on image (height, width).
For image channels set to 3 it generates values from -1 to 1 based on image (height, width, channels).
Args:
image_height: Input image height
image_width: Input image width
image_channels: Input image channels. Only RGB or Greyscale allowed.
Returns:
2D or 3D mesh grid based on number of image_channels.
Raises:
Exception unsupported image channels.
"""
if image_channels == 3:
t1 = tuple([torch.linspace(-1, 1, steps=image_height)])
t2 = tuple([torch.linspace(-1, 1, steps=image_width)])
t3 = tuple([torch.linspace(-1, 1, steps=image_channels)])
mesh_grid = torch.stack(torch.meshgrid(*t1, *t2, *t3), dim=-1)
mesh_grid = mesh_grid.reshape(-1, image_channels)
return mesh_grid
elif image_channels == 1:
t1 = tuple([torch.linspace(-1, 1, steps=image_height)])
t2 = tuple([torch.linspace(-1, 1, steps=image_width)])
mesh_grid = torch.stack(torch.meshgrid(*t1, *t2), dim=-1)
mesh_grid = mesh_grid.reshape(-1, 2)
return mesh_grid
else:
raise Exception(F'{image_channels} not allowed try 1 or 3.') | b79bf590f36ca4ed07cd2625609fdf19425544d0 | 121,031 |
def cobb_douglas_mpk(k, alpha, **params):
"""Marginal product of capital with Cobb-Douglas production."""
return alpha * k**(alpha - 1) | 5f1f55250ab224649af2ae64188c9e2d1e989c08 | 121,034 |
def first_instance(objs, klass):
"""Return the first object in the list that is an instance of a class."""
for obj in objs:
if isinstance(obj, klass):
return obj | e7f79fe70d5f6f712a2e65269bf7f534e35ea1dd | 121,035 |
from secrets import randbits
from sympy import isprime
def get_prime(prime_length:int) -> int:
"""
Returns a prime number. Doing it this way to require less modules.
:param prime_length: The length in bits that the prime has to be.
:return: A prime integer.
"""
num = randbits(prime_length)
while not isprime(num):
num = randbits(prime_length)
return num | d67cf5d56662f5de8ce657e7b0e0d8928216c57a | 121,038 |
def length(tree):
"""
Count the total number of the tree nodes.
:param tree: a tree node
:return: the total of nodes in the subtree
"""
if tree:
n_nodes = length(tree.left)
n_nodes += length(tree.right)
n_nodes += 1
return n_nodes
return 0 | b28a1424d0f1dafd9098f996ec00eb73f139d22f | 121,040 |
def truncate_text(text: str, width: int, end: str = "…") -> str:
"""Truncate a text not based on words/whitespaces
Otherwise, we could use textwrap.shorten.
Args:
text: The text to be truncated
width: The max width of the the truncated text
end: The end string of the truncated text
Returns:
The truncated text with end appended.
"""
if len(text) <= width:
return text
return text[: (width - len(end))] + end | 64de2ec0dd08d62379bf0189e76ec22e173c7521 | 121,041 |
import yaml
def get_config(config_file):
"""Get configuration."""
with open(config_file, 'r') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.SafeLoader)
return cfg | 47c494c8353d5ee035e468b799fb33278586e889 | 121,042 |
def getFileSet(source, logErrors):
"""
Get set of files to extract errors, containing original source file and
any files mentioned in the errors
:param source: source file that was compiled
:parma logErrors: list of error tuples from the log
"""
fileSet = set([source])
for e in logErrors:
fileSet.add(e.file)
return fileSet | c72361c380b0b69624aa59c95092e255935996e2 | 121,043 |
def _make_parnames(parameters):
"""Create list with unambigious parameter names"""
return [
"par_{:03d}_{}".format(idx, par.name)
for idx, par in enumerate(parameters.parameters)
] | b192f2e13fd4407cba86362156adde5e2b2182ef | 121,046 |
import io
import errno
def value_read(filename, default=0):
"""Read an integer value from a file.
:param ``str`` filename:
File to read from.
:param ``int`` default:
Value to return in case `filename` doesn't exist.
:returns ``int``:
Value read or default value.
"""
try:
with io.open(filename, 'r') as f:
value = f.readline()
except IOError as err:
if err.errno is errno.ENOENT:
value = default
else:
raise
return int(value) | be0d974fda6ccf819df8567adc2808badf1821c3 | 121,048 |
def _get_returner(returner_types):
"""
Helper to iterate over retuerner_types and pick the first one
"""
for returner in returner_types:
if returner:
return returner | efc7e4cb7ca7812ddc0755ec20182db544472b54 | 121,049 |
def scale(val, src, dst):
"""
:param val: Given input value for scaling
:param src: Initial input value's Min Max Range pass in as tuple of two (Min, Max)
:param dst: Target output value's Min Max Range pass in as tuple of two (Min, Max)
:return: Return mapped scaling from target's Min Max range
"""
return (float(val - src[0]) / float(src[1] - src[0])) * (dst[1] - dst[0]) + dst[0] | 274df6b54fb854e9ae968f94a6fc14a79a640314 | 121,051 |
import ast
def isidentifier(ident):
"""Determines, if string is valid Python identifier."""
# Smoke test — if it's not string, then it's not identifier, but we don't
# want to just silence exception. It's better to fail fast.
if not isinstance(ident, str):
raise TypeError("expected str, but got {!r}".format(type(ident)))
# Resulting AST of simple identifier is <Module [<Expr <Name "foo">>]>
try:
root = ast.parse(ident)
except SyntaxError:
return False
if not isinstance(root, ast.Module):
return False
if len(root.body) != 1:
return False
if not isinstance(root.body[0], ast.Expr):
return False
if not isinstance(root.body[0].value, ast.Name):
return False
if root.body[0].value.id != ident:
return False
return True | 6cfeb47d01edb49e980e79047ee23958b908fd0c | 121,053 |
def prompt_chars(prompt: str, chars: str) -> str:
"""Prompt the user with a multiple-choice question."""
said = ""
while said not in list(chars):
said = input(f"{prompt} [{chars}]> ").lower()
return said | f01719f216b4dee16cedd00722731374aa65291c | 121,055 |
def normalise_card_fields(cards):
"""
Adds XSOAR-like variations of card fields.
"""
fields = {
"id": "ID",
"name": "Name",
"url": "URL",
"due": "Due",
"labels": "Labels"
}
for card in cards:
for k, v in fields.items():
if k in card:
card[v] = card[k]
return cards | b15c8a4cea19eee0f7b0029221946105877453a5 | 121,056 |
import hashlib
def check_password(hashed_password, user_password):
"""This function checks a password against a SHA256:salt entry"""
password, salt = hashed_password.split(':')
return password == hashlib.sha256(salt.encode() + user_password.encode()).hexdigest() | d2d2682b3e4c8624ff6dd41e2903157b0016e727 | 121,058 |
def snake_to_camel(string: str) -> str:
"""Convert from snake_case to camelCase."""
return "".join(
word.capitalize() if idx > 0 else word
for idx, word in enumerate(string.split("_"))
) | ec70ff80e801db8c4de80e0fa9e0b1a38210926e | 121,065 |
def qtile(a, ir):
"""
This routine returns the ith entry of vector after sorting in
descending order.
Parameters
----------
a : list
Input data distribution.
ir : int
Desired percentile.
Returns
-------
qtile : float
Percentile value.
"""
as_sorted = sorted(a, reverse=True)
qtile1 = as_sorted[ir]
return qtile1 | 46252d07e6f229bcbc1b2fbe0e5198a3e94c9177 | 121,067 |
def dict_product(a, b):
"""Pointwise-multiply the values in two dicts with identical sets of
keys.
"""
assert set(a.keys()) == set(b.keys())
return { k: v * b[k] for k, v in a.items() } | a2e14cf0ef7c40588a4606d618800652e07b430e | 121,068 |
import math
def CRRA(cons, gamma):
"""
CRRA utility function.
:params: cons: consumption level.
:params: gamma: relative risk aversion.
:return: util: utility level.
"""
if not gamma == 1:
util = cons**(1-gamma)/(1-gamma)
else:
util = math.log(cons)
return util | 0bd2d8c1e57dc79340284bc0f23610deac8602f4 | 121,071 |
def dot(kernelA, kernelB):
"""dot product between kernels
Args:
kernelA: (N, N) tensor
kernelB (N, N) tensor
"""
return (kernelA * kernelB).sum() | 80fdf4d58512773a5df77c8d660ac21fbc268863 | 121,072 |
def grab_data(array, index):
"""
This is a helper function for parsing position output.
:param array: any array that's items are lists
:param index: desired index of the sublists
:return: subset: a list of all of the values at the given index in each sublist
"""
subset = []
for i in array:
subset.append(float(i[index]))
return subset | 2ce37779189a62dd26261c3a88ee4a1ee11d85b5 | 121,073 |
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
# We should figure out what license checks we actually want to use.
license_header = r'.*'
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, maxlen=800, license_header=license_header))
return results | 5d23933ce5b7253185879e7e80144ea594451d70 | 121,076 |
import requests
def get_user_name(graph_bearer_token):
""" Get the username of the user who owns the bearer token """
user_info = requests.get(
'https://graph.windows.net/me?api-version=1.6',
headers={"Authorization": "Bearer {}".format(graph_bearer_token)},
data={}
).json()
return user_info['userPrincipalName'] | 882d4da80d9df2732e839e7c5ff3521f89f1c7ca | 121,078 |
from typing import List
from typing import Any
def unique(lst: List[Any]) -> List[Any]:
"""uniquify list while preserving order"""
# compatible with Python 3.6 ???
# seen = set()
# return [x for x in lst if x not in seen and not seen.add(x)]
return list(dict.fromkeys(lst)) | 57fb353b77de514c83ee352b030e9c91cd83fb3b | 121,079 |
def sync_angle(*, snr, preamble_duration=9.3e-6, bandwidth=1.2e6, **kwargs):
"""
Computes the angle of de-synchronisation.
:param snr: an SNR of the received signal
:param preamble_duration: the duration of PHY-preamble in seconds
:param bandwidth: the bandwidth of the signal in herzs
:param kwargs:
:return: the angle of de-synchronisation
"""
return (snr * preamble_duration * bandwidth) ** -0.5 | b53f87c552af09f001d999ffe742af124efb87b8 | 121,080 |
import torch
def remove_double_edges(edge_index):
"""In undirected graphs you have both (i, j) and (j, i) for every edge.
This method removes (j, i)."""
row, col = edge_index
mask = row <= col
row, col = row[mask], col[mask]
ei_without_double_edges = torch.stack([row, col], dim=0).long()
return ei_without_double_edges | 8b48da05ccfbdae16a99318add4ae3666c255b9a | 121,085 |
import pathlib
def changelog_d(temp_dir) -> pathlib.Path:
"""Make a changelog.d directory, and return a Path() to it."""
the_changelog_d = temp_dir / "changelog.d"
the_changelog_d.mkdir()
return the_changelog_d | ee542c053d514b439d02246261d34e6973d080a4 | 121,086 |
def getBin(value: int):
"""Converts the given integer into string bit representation."""
brep = "{0:b}".format(value)
return brep | 1232bf778bad58f0a84de9fa06dd6fbd7653a860 | 121,090 |
def datetime_to_str(time):
"""
convert a date time to string
:param time:
:return:
"""
return time.strftime("%Y-%m-%d") | 328d0e5d42ea1895b173e1c7158a742143e64e53 | 121,095 |
def to_kelvin(temp, units):
"""Simple utility to convert temperature to units of Kelvin.
:param float temp: Initial temperature in `units`
:param str units: Units of `temp`
:return: Converted temperature in Kelvin
:rtype: float
"""
if units == 'K':
temp = temp
elif units == 'C':
temp = (temp + 273.15)
elif units == 'F':
temp = ((temp + 459.67) * (5.0 / 9.0))
else:
raise KeyError('Temperature units not recognized: ' + units)
if temp < 0:
raise ValueError('Temperature in Kelvin < zero: ' + str(temp))
else:
return temp | 6ddd69d199d02f8a44e8ab1049ab89f573a137b3 | 121,096 |
def _format_time_wtlma(time):
"""
Formats a time string for WTLMA functions
Parameters
----------
time : str
Format: HHMM
Returns
-------
str
Format: HH:MM
"""
hr = time[:2]
mn = time[2:]
return '{}:{}'.format(hr, mn) | c00d56421f05474d0ff94317548e88289a2e0aed | 121,097 |
def coco_annfile(dir, subset, year=2014):
"""Construct coco annotation file."""
annfile = '{}/annotations/instances_{}{}.json'.format(dir, subset, year)
print(annfile)
return annfile | e63bdfc40318190cc22b095455dee4a57f7f5325 | 121,099 |
from typing import List
import itertools
def changed_token_count(original_tokens: List[List[str]],
adversarial_tokens: List[List[str]]) -> List[int]:
"""Given two batches of lists of tokens, this finds per example changes.
Arguments:
original_tokens: A batch of lists containing the original sentences' tokens.
adversarial_tokens: A batch of lists containing the adversarial sentences'
tokens.
Returns:
A list containing how many tokens per example pair are unequal.
"""
changed_token_counts = []
for original_sentence, adversarial_sentence in zip(original_tokens,
adversarial_tokens):
changed_token_counts.append(0)
for original_token, adversarial_token in itertools.zip_longest(
original_sentence, adversarial_sentence, fillvalue=''):
if original_token != adversarial_token:
changed_token_counts[-1] += 1
return changed_token_counts | acd21b5f94a1c79d424068fe603419be5ac67957 | 121,100 |
import logging
import yaml
def ProcessInputConfig(omnicache, input_config):
"""Adds a list of remotes from a YAML config file to the omnicache"""
logging.info("Adding remotes from {0}".format(input_config))
with open(input_config) as icf:
content = yaml.safe_load(icf)
if "remotes" in content:
for remote in content["remotes"]:
# dict.get() used here to set name=None if no name specified in input cfg file.
omnicache.AddRemote(remote["url"], name=remote.get("name"))
return 0 | 45d22e0abdf4465edf3d7053a7560dc67e509e7a | 121,105 |
from typing import Union
import torch
from typing import Tuple
from typing import List
def reset_backward_rnn_state(
states: Union[torch.Tensor, Tuple, List]
) -> Union[torch.Tensor, Tuple, List]:
"""Set backward BRNN states to zeroes.
Args:
states: RNN states
Returns:
states: RNN states with backward set to zeroes
"""
if isinstance(states, (list, tuple)):
for state in states:
state[1::2] = 0.0
else:
states[1::2] = 0.0
return states | 11acf6e3dd678a57d101855acc24847733c39f5d | 121,107 |
def create_is_missing(df, cols):
"""
Creates a new column with 1 in the places where the 'cols' have missing
values.
Args:
df (pandas.DataFrame): Any dataframe with missing data.
cols (list(str)): The names of the columns to use.
Returns:
pandas.DataFrame: The same as df but with the added columns.
"""
missing_df = df[cols].isnull().astype(int).rename(columns={
c: c + '_missing' for c in cols})
return df.join(missing_df) | bbc1d239c1daebf3c8ff7f8d67514b6ac1b3fb46 | 121,108 |
def _ogroups_to_odict(ogroups, swap_order=False):
"""
From a list of orthogroups, return a dict from sp1 prots to a set of sp2
prots. We want a dictionary from the first species in the file to the second,
unless swap_order is True.
"""
sp1col = 1 if swap_order else 0
sp2col = 0 if swap_order else 1
orthdict = dict([(p1,set([p2 for p2 in og[sp2col]])) for og in ogroups for
p1 in og[sp1col]])
return orthdict | 2cf79d4aee3fa665f82cca7d747f738db4923116 | 121,113 |
import struct
def fdt32_to_cpu(val):
"""Convert a device tree cell to an integer
Args:
Value to convert (4-character string representing the cell value)
Return:
A native-endian integer value
"""
return struct.unpack('>I', val)[0] | dcc4250b60f973d6257772acf3f592f333d261fc | 121,114 |
def remove_stop_words(tokens: list, stop_words: list) -> list:
"""
Removes stop words
:param tokens: a list of tokens
:param stop_words: a list of stop words
:return: a list of tokens without stop words
e.g. tokens = ['the', 'weather', 'is', 'sunny', 'the', 'man', 'is', 'happy']
stop_words = ['the', 'is']
--> ['weather', 'sunny', 'man', 'happy']
"""
if not isinstance(tokens, list):
return []
list_words = [word for word in tokens if word not in stop_words]
return list_words | 02754dcaa131da6275b9607f3fa4066a5dfcb9fb | 121,115 |
def decode_bytes(data: bytes) -> str:
"""
Decodes provided bytes using `utf-8` decoding, ignoring potential decoding errors.
"""
return data.decode('utf-8', 'ignore') | bb59ea8484b0927f448733562285ad318c2d4cf1 | 121,116 |
def get_words_by_start_time(transcript):
"""Merges punctuation with standard words since they don't have a start time,
returns them in a handy map of start_time to word and confidence
Args:
transcript: Amazon Transcript JSON
Returns:
(dict): a map of start_time to word and confidence
"""
merged_words = {}
items = transcript["results"]["items"]
for i, item in enumerate(items):
# Only save pronunciations... may not be necessary, or may need other types
if item["type"] == "pronunciation":
word = item["alternatives"][0]["content"]
confidence = item["alternatives"][0]["confidence"]
# If the next item in the transcript is a punctuation, merge with the current word
if i < len(items) - 1 and items[i + 1]["type"] == "punctuation":
word += items[i + 1]["alternatives"][0]["content"]
# Add the word to the map at start time
merged_words[item["start_time"]] = {
"content": word,
"confidence": confidence,
}
return merged_words | 20e779944bcbde8b7281cd5f807ad6cdb6660332 | 121,117 |
from typing import Union
import json
def ask_json() -> Union[dict, list]:
"""
Ask user a path to JSON file to navigate it.
Return a JSON object from file entered by user.
"""
print("Hello! This is JSON navigator. Please provide us with the path to .json file.")
path = input("Please, enter a path to the file: ")
with open(path, 'r') as infile:
json_obj = json.load(infile)
return json_obj | 45dfb72bad2f2e9042a37f2ee9afc974af6d21b8 | 121,119 |
def is_unique(obj, unique_names):
"""
Checks is a unique name (producer + product) doesn't already exists.
"""
return obj['producer'].lower() + "_" + obj['product'].lower().replace('/', ' ') not in unique_names | c3013c16ffcab997122444d7a1fe54dae7479606 | 121,120 |
import textwrap
def box(text, decor='*', decor_x=None, decor_y=None,
boxwidth=60, borderwidth=2):
"""Create a formatted textbox for highlighting important information"""
decor_x = decor_x if decor_x else decor
decor_y = decor_y if decor_y else decor
decor_x_width = len(decor_x) if len(decor_x) > 0 else 1
decor_y_width = len(decor_y) if len(decor_y) > 0 else 1
decor_x_multiplier = int(boxwidth / decor_x_width)
decor_y_multiplier = decor_y_width * 2
textwidth = boxwidth - decor_y_multiplier - (borderwidth * 2)
text = textwrap.wrap(text, textwidth)
text = "\n".join([
"{decor_y}{text}{decor_y}".format(
decor_y=decor_y, text=m.center(
boxwidth - decor_y_multiplier)) for m in text])
border_x = (decor_x * decor_x_multiplier)
border_space = "{decor_y}{spacer}{decor_y}".format(
decor_y=decor_y, spacer=(" " * (boxwidth - decor_y_multiplier)))
spacing = (' ' * boxwidth)
text = "\n".join([
spacing, border_x, border_space, text,
border_space, border_x, spacing])
print(text)
return text | a2dc1a7e3bf4dfdd19887bcd1281e5da6ea09f45 | 121,121 |
def nice_time(time):
""" Format a time in seconds to a string like "5 minutes".
"""
if time < 15:
return 'moments'
if time < 90:
return '%d seconds' % time
if time < 60 * 60 * 1.5:
return '%d minutes' % (time / 60.)
if time < 24 * 60 * 60 * 1.5:
return '%d hours' % (time / 3600.)
if time < 7 * 24 * 60 * 60 * 1.5:
return '%d days' % (time / 86400.)
if time < 30 * 24 * 60 * 60 * 1.5:
return '%d weeks' % (time / 604800.)
return '%d months' % (time / 2592000.) | eb4694fe7db850816a7db8f6ccbb54c118591c44 | 121,124 |
def bfldele(self, line="", lab="", **kwargs):
"""Deletes body force loads on a line.
APDL Command: BFLDELE
Parameters
----------
line
Line at which body load is to be deleted. If ALL, delete for all
selected lines [LSEL]. A component name may also be substituted
for LINE.
lab
Valid body load label. If ALL, use all appropriate labels. Load
labels are listed under "Body Loads" in the input table for each
element type in the Element Reference. See the BFL command for
labels.
Notes
-----
Deletes body force loads (and all corresponding finite element loads)
for a specified line and label. Body loads may be defined on a line
with the BFL command.
Graphical picking is available only via the listed menu paths.
This command is also valid in PREP7.
"""
command = f"BFLDELE,{line},{lab}"
return self.run(command, **kwargs) | 267d88fe54e57dda085c2a7c25cc46984fc5184a | 121,128 |
def cast_nested_dict(d, original_type, new_type):
"""Cast items in a nested dict to a new type.
Converts all leaves of type `original_type` to type `new_type`
Args:
d (dict): nested dict to cast
original_type (type): old type to convert
new_type (type): new type to apply to leaf nodes
Returns:
dict
Examples:
>>> cast_nested_dict({"hello": 0.70, "hey": {"sup": 1}}, float, int)
{'hello': 0, 'hey': {'sup': 1}}
"""
result = dict()
for k, v in d.items():
if isinstance(v, dict):
result[k] = cast_nested_dict(
v, original_type=original_type, new_type=new_type
)
elif isinstance(v, original_type):
result[k] = new_type(v)
else:
result[k] = v
return result | 9f4b6d27a68a19864d095bb4dd8a58cfb7ca2561 | 121,130 |
def check_blacklist(wiki_title):
"""Check blacklist
Returns True if the given title exists in the blacklist
"""
blacklist = [line.strip() for line in open('config/blacklist.cfg')]
return wiki_title in blacklist | 7707e657992fd3f74ed4af279db2ce1807ecb8db | 121,132 |
def manhattan_heuristic(pos, problem):
""" The Manhattan distance heuristic for a PositionSearchProblem.
((int, int), PositionSearchProblem) -> int
"""
return abs(pos[0] - problem.goal_pos[0]) + abs(pos[1] - problem.goal_pos[1]) | 2770db8548bace93b3eefc59191bea76a7163122 | 121,138 |
def join_all(domain, *parts):
"""
Join all url components.
Example::
>>> join_all("https://www.apple.com", "iphone")
https://www.apple.com/iphone
:param domain: Domain parts, example: https://www.python.org
:param parts: Other parts, example: "/doc", "/py27"
:return: url
"""
l = list()
if domain.endswith("/"):
domain = domain[:-1]
l.append(domain)
for part in parts:
for i in part.split("/"):
if i.strip():
l.append(i)
url = "/".join(l)
return url | e2c308c615802ae119ab409b1b73d94c0f29e793 | 121,139 |
def compute_recall(confusionMatrix):
"""
Compute recall based on a Confusion Matrix
with prediction on rows
and truth on columns.
recall = true positive / (true positive + false negative)
"""
recall = []
for elem in range(confusionMatrix.shape[0]):
tot = 0
for j in range(confusionMatrix.shape[0]):
tot += confusionMatrix[j, elem]
correct = confusionMatrix[elem, elem]
recall.append(correct/tot)
return recall | 9ed86b617c73e9eb05f43ff8d25e5843a3ad7d70 | 121,144 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.