content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
from datetime import datetime
def check_friday_rush(time: datetime, delivery_fee: float) -> float:
"""
This function calculates the friday rush fee and adds it to delivery fee.
---
Args:
time (object): datetime object
delivery_fee (float): delivery_fee without friday rush fee
Returns:
delivery_fee (float): delivery_fee with friday rush fee
"""
friday = 4
if time.weekday() == friday and 15 <= time.hour < 19:
delivery_fee *= 1.1
return delivery_fee
|
d9c5f05fd2081fc9ddbc611c34e273ef7f6fe4f6
| 27,628
|
def build_characters_encoding(names):
"""
:param names: list of strings
:return: (encoding, decoding, count)
"""
count = 2
encoding = {}
decoding = {1: 'START'}
for c in set([c for name in names for c in name]):
encoding[c] = count
decoding[count] = c
count += 1
return encoding, decoding, count
|
067781fd1c2b5c89f6e33277eeb69f86decb1f9e
| 27,630
|
def cm22ft2(cmsq):
"""cm^2 -> ft^2"""
return cmsq/929.0304
|
539df64c2b19720b282f8e72e75e34dd41a4a782
| 27,632
|
def OR(logical_expression, *logical_expressions):
"""
Returns True if any of the arguments is logically true, and false if all of the
arguments are false.
Same as `any([value1, value2, ...])`.
>>> OR(1)
True
>>> OR(0)
False
>>> OR(1, 1)
True
>>> OR(0, 1)
True
>>> OR(0, 0)
False
>>> OR(0,False,0.0,"",None)
False
>>> OR(0,None,3,0)
True
"""
return any((logical_expression,) + logical_expressions)
|
4033822021dd336283b6edeebb00fd4b19e42f82
| 27,633
|
import warnings
def slab_filter(config, dask_dict):
"""
Filters a dask bag `dask_dict` of slabs according to rules specified in config yml `config`
Args:
config: dictionary of the config yaml
dask_dict: a dictionary containing slab info
Returns:
boolean value (True -> retain slab, False -> reject slab)
"""
slab_filters = config["slab_filters"]
keep = True
for name, val in slab_filters.items():
if val != "None":
if name == "filter_by_object_size":
keep = keep and (dask_dict["slab_natoms"] <= val)
elif name == "filter_by_max_miller_index":
keep = keep and (dask_dict["slab_max_miller_index"] <= val)
else:
warnings.warn("Slab filter is not implemented: " + name)
return keep
|
1c0298da792cb691964626dd6987c16be9dc2255
| 27,635
|
def rescaleImage(image0, scaledFac):
"""
Jan. 28, 2105
rescale image, given the image full path on disk
"""
#originalImage = wx.Image(imagePath, type = wx.BITMAP_TYPE_ANY)
originalImage = image0
imgW, imgH = originalImage.GetSize()
scaledImage = originalImage.Scale(imgW * scaledFac, imgH * scaledFac)
return scaledImage
|
735926d0b265fc8ebb57ed9898db88e31159b827
| 27,636
|
def get_checksum_type(checksum):
"""
Return the checksum type (ad32 or md5).
The given checksum can be either be a standard ad32 or md5 value, or a dictionary with the format
{ checksum_type: value } as defined in the `FileSpec` class.
In case the checksum type cannot be identified, the function returns 'unknown'.
:param checksum: checksum string or dictionary.
:return: checksum type (string).
"""
checksum_type = 'unknown'
if type(checksum) == dict:
for key in list(checksum.keys()): # Python 2/3
# the dictionary is assumed to only contain one key-value pair
checksum_type = key
break
elif type(checksum) == str:
if len(checksum) == 8:
checksum_type = 'ad32'
elif len(checksum) == 32:
checksum_type = 'md5'
return checksum_type
|
fd907bf7020449505e989ce8cd1f40567227ae96
| 27,637
|
def get_modes(name):
"""Extract userpatch modes."""
has_modes = name.find(': !')
mode_string = ''
if has_modes > -1:
mode_string = name[has_modes + 3:]
name = name[:has_modes]
modes = {
'direct_placement': 'P' in mode_string,
'effect_quantity': 'C' in mode_string,
'guard_state': 'G' in mode_string,
'fixed_positions': 'F' in mode_string
}
return name, modes
|
1c6ddcf2e3205b9517962b1be79a42d0705db670
| 27,640
|
def _validate_translation_table_relations_dict(relations):
"""
Parameters
----------
relations : `dict` of (`str`, (`None`, `str`)) items
Relations to validate.
Returns
-------
validated_relations : `None`, `dict` of (`str`, `str`) items
Raises
------
TypeError
- If an item's structure is incorrect.
"""
validated_relations = None
for key, value in relations.items():
if not isinstance(key, str):
raise TypeError(
f'`relation` keys can be `str`, got {key.__class__.__name__}; {key!r}.'
)
if value is None:
continue
if not isinstance(value, str):
raise TypeError(
f'`relation` values can be `str`, got {value.__class__.__name__}; {value!r}.'
)
if (not key) or (not value):
continue
if validated_relations is None:
validated_relations = {}
validated_relations[key] = value
continue
return validated_relations
|
450aae70a221e9da611dd236f5440107a70639a2
| 27,642
|
import torch
def roty_cuda(t):
"""Rotation about the y-axis."""
c = torch.cos(t)
s = torch.sin(t)
roty = torch.zeros(*t.size(), 3, 3).to(t.device).float()
roty[:, :, 1, 1] = 1.
roty[:, :, 0, 0] = c
roty[:, :, 2, 2] = c
roty[:, :, 0, 2] = s
roty[:, :, 2, 0] = -s
return roty
|
528a5e0b4911b5e4a3b128439c46a0232eac8b10
| 27,643
|
def string_from_array(arr):
"""Encode an array as a string code.
Parameters
----------
arr : (N, k) array_like
*Numpy* array.
Returns
-------
str
String code of an array.
Examples
--------
>>> string_from_array(np.array([1, 0, 0]))
'100'
>>> string_from_array(np.array([[1,0], [3,4]]))
'1034'
"""
return ''.join(map(str, arr.flat))
|
3c938656b8e078e9fb1e5f6eef00790f155cd801
| 27,644
|
def fibtest():
"""Calculates the sum of even fibonacci
numbers between 0 and 4000000"""
i = 0
first = 1
second = 2
total = 2
while i < 4000000:
i = first + second
first = second
second = i
if i % 2 == 0:
total += i
return total
|
700b5204c7e0114dcc17699e798c8ed6c645c401
| 27,649
|
def is_empty(dict_obj):
"""
query if a dict is empty
:param dict_obj: the to be tested dictionary
:return: True, if it is empty, False if not empty
"""
if isinstance(dict_obj, dict):
if len(list(dict_obj.items())) <= 0:
return True
else:
switch = True
for k, v in list(dict_obj.items()):
if v is None:
pass
else:
switch = False
return switch
else:
return False
|
4b20723f16bbcf49b7b437eb5221303eec1632b7
| 27,651
|
def get_unit_info(units):
"""Read in split file name and return data variable type (runoff, AET, etc)
:param units: Name of file
:type units: list
:return: str; Name of variable
"""
if units[0] == "q":
data_type = "Runoff"
elif units[0] == "avgchflow":
data_type = "Streamflow"
elif units[0] == "aet":
data_type = "Actual ET"
elif units[0] == "pet":
data_type = "Potential ET"
else:
data_type = "unknown"
return data_type
|
35679ba9e0f8518c32572ee7cc46b54f07693c5c
| 27,653
|
def scanlist(testlist):
""" Process a testlist file """
tests = [t.strip() for t in testlist if not t.startswith('#')]
return [t for t in tests if t]
|
4d5162e1dc46ead4e0d8beec9773e012dea3ef1f
| 27,654
|
import zipfile
def uncompress_mxl_to_xml(mxl_file):
"""an mxl is a zip file that contains a manifest and the actual xml file."""
with zipfile.ZipFile(mxl_file) as zipfp:
names = zipfp.namelist()
xmlnames = [n for n in names if "/" not in n and n.lower().endswith(".xml")]
if len(xmlnames) == 0:
raise ValueError("Cannot find any xml file")
elif len(xmlnames) > 1:
raise ValueError("Found more than one xml in the root?")
else:
return zipfp.read(xmlnames[0]).decode("utf-8")
|
8081b34ebfeed0ae0735a4710d8ac7ff1a653ef0
| 27,655
|
def remove_krm_group(apitools_collection_guess, krm_group):
"""Remove krm_group prefix from krm_kind."""
if krm_group.lower() in apitools_collection_guess.lower():
apitools_collection_guess = apitools_collection_guess[len(krm_group):]
return apitools_collection_guess
|
6f69449f06d3c7c148cf9cbf909bae25b2602707
| 27,656
|
def color(fg=None, bg=None, style=None):
"""
Returns an ANSI color code. If no arguments are specified,
the reset code is returned.
"""
fmt_list = [style, fg, bg]
fmt_list = [str(x) for x in fmt_list if x is not None]
if len(fmt_list) == 0:
fmt = "0"
else:
fmt = ";".join(fmt_list)
return "\x1b[{}m".format(fmt)
|
2ed7374042055ff3f1fd5e883ea0467a77ccc571
| 27,657
|
def get_inverted_graph(graph_edges):
"""
>>> get_inverted_graph([(2, 47646), (3, 4)])
[(47646, 2), (4, 3)]
:param graph_edges:
:return:
"""
inv_G = []
for edge in graph_edges:
inv_edge = edge[1], edge[0]
inv_G.append(inv_edge)
return inv_G
|
9ae75207760f985deace953640505dd6565da80e
| 27,660
|
def list_get(l, index, default):
"""
>>> list_get([], 0, None)
>>> list_get([], -1, 7)
7
>>> list_get(None, 0, None)
Traceback (most recent call last):
...
TypeError: 'NoneType' object is not subscriptable
>>> list_get([1], 1, 9)
9
>>> list_get([1, 2, 3, 4], 2, 8)
3
"""
try:
return l[index]
except IndexError:
return default
|
4f114d4b4fdd0268028adbe68edb07ebeda29fc7
| 27,661
|
def add_boilerplate(text: str) -> str:
"""Format text with boilerplate so readers know it's a bot
Args:
text (str): Text to add boilerplate to
Return:
str: Text with boilerplate
"""
if not isinstance(text, str):
raise TypeError("text must be a str")
source_code_on_github = (
"[source code on GitHub](https://github.com/vogt4nick/datascience-bot)"
)
greeting = "_Bleep Bloop_. "
footer = (
"\n\n---\n\n"
"I am a bot created by the r/datascience moderators. "
f"I'm open source! You can review my {source_code_on_github}."
)
return "".join([greeting, text, footer])
|
c1f53650d9a22e6976d269ec458a130bbf71eea8
| 27,662
|
def format_time(t: float) -> str:
"""
Format a time duration in a readable format.
:param t: The duration in seconds.
:return: A human readable string.
"""
hours, remainder = divmod(t, 3600)
minutes, seconds = divmod(remainder, 60)
return '%d:%02d:%02d' % (hours, minutes, seconds)
|
e31b595b37172360ce6c5ea00ecca029ddb37651
| 27,664
|
import numbers
def is_valid_med(med):
"""Returns True if value of *med* is valid as per RFC.
According to RFC MED is a four octet non-negative integer.
"""
valid = True
if not isinstance(med, numbers.Integral):
valid = False
else:
if med < 0 or med > (2 ** 32) - 1:
valid = False
return valid
|
6e80c1b4d48e257876ede826d98ceeea12d059d0
| 27,665
|
def check_if_toxin(ingredients):
"""Checks if any of the ingredients are in the list of toxic ingrediets"""
toxic_indredients = ['sodium nitrate', 'sodium benzoate', 'sodium oxide']
return any(item in ingredients for item in toxic_indredients)
|
68996807ace863edb6466e333f6f03b3e04fce12
| 27,666
|
def sample_dqn_params(trial):
"""
Sampler for DQN hyperparams.
:param trial: (optuna.trial)
:return: (dict)
"""
gamma = trial.suggest_categorical('gamma', [0.9, 0.95, 0.98, 0.99, 0.995, 0.999, 0.9999])
learning_rate = trial.suggest_loguniform('lr', 1e-5, 1)
batch_size = trial.suggest_categorical('batch_size', [16, 32, 64, 100, 128, 256, 512])
buffer_size = trial.suggest_categorical('buffer_size', [int(1e4), int(5e4), int(1e5), int(1e6)])
exploration_final_eps = trial.suggest_uniform('exploration_final_eps', 0, 0.2)
exploration_fraction = trial.suggest_uniform('exploration_fraction', 0, 0.5)
target_update_interval = trial.suggest_categorical('target_update_interval', [1, 1000, 5000, 10000, 15000, 20000])
learning_starts = trial.suggest_categorical('learning_starts', [0, 1000, 5000, 10000, 20000])
train_freq = trial.suggest_categorical('train_freq', [1, 4, 8, 16, 128, 256, 1000])
subsample_steps = trial.suggest_categorical('subsample_steps', [1, 2, 4, 8])
gradient_steps = max(train_freq // subsample_steps, 1)
n_episodes_rollout = -1
net_arch = trial.suggest_categorical('net_arch', ["tiny", "small", "medium"])
net_arch = {
'tiny': [64],
'small': [64, 64],
'medium': [256, 256]
}[net_arch]
hyperparams = {
'gamma': gamma,
'learning_rate': learning_rate,
'batch_size': batch_size,
'buffer_size': buffer_size,
'train_freq': train_freq,
'gradient_steps': gradient_steps,
'n_episodes_rollout': n_episodes_rollout,
'exploration_fraction': exploration_fraction,
'exploration_final_eps': exploration_final_eps,
'target_update_interval': target_update_interval,
'learning_starts': learning_starts,
'policy_kwargs': dict(net_arch=net_arch)
}
return hyperparams
|
6462c42aa044aa88eb905b4f11a3bc87fc82cc85
| 27,668
|
def strip_namespace(tag_name):
"""
Strip all namespaces or namespace prefixes if present in an XML tag name .
For example:
>>> tag_name = '{http://maven.apache.org/POM/4.0.0}geronimo.osgi.export.pkg'
>>> expected = 'geronimo.osgi.export.pkg'
>>> assert expected == strip_namespace(tag_name)
"""
head, brace, tail = tag_name.rpartition('}')
return tail if brace else head
|
425b41db7a75a17122e50208b64242d1aeeea5ce
| 27,670
|
def trivial_ineq(c):
"""
Assumes c is a ZeroComparison.
Determines whether c is the trivial inequality 0 >= 0
"""
return len(c.term.args) == 0 and not c.strong
|
65020f1ddc5686b43e21258e34fd51cbfdd0377f
| 27,672
|
def is_container(obj):
""" Checks whether the object is container or not.
Container is considered an object, which includes other objects,
thus string is not qualified, even it implments iterator protocol.
>>> is_container("text")
False
>>> is_container(tuple())
True
"""
if isinstance(obj, str):
return False
return hasattr(obj, '__iter__')
|
a6772793a24fc95f159df100c5e3ba19dce33281
| 27,673
|
import torch
def compute_dloss_by_dx_using_scale_offset(x, grad, scaling, offset, n, p):
"""
compute derivative w.r.t input
:param grad: gradient
:param scaling: scaling factor computed for given encoding min/max
:param offset: offset computed
:param n: lower bound
:param p: upper bound
:return: gradient w.r.t input
"""
# R(x/s) + R(o)
r_x_by_s_plus_round_o = torch.round(x / scaling) + offset
# compute dloss_by_dx = dq_by_dx * grad
inner_cond = torch.where(torch.le(r_x_by_s_plus_round_o.data, p.data), # condition to check per value
torch.ones_like(r_x_by_s_plus_round_o), # execute if true
torch.zeros_like(r_x_by_s_plus_round_o)) # execute if false
dloss_by_dx = torch.where(torch.le(n.data, r_x_by_s_plus_round_o.data), # condition to check per value
inner_cond, # execute if true
torch.zeros_like(r_x_by_s_plus_round_o.data)) * grad
return dloss_by_dx
|
965059441bf72067cc4b92859dffaefdc3335e56
| 27,674
|
def get_tokens_per_topic(topic, terms, n_tokens):
"""Return the top features as extracted from the
simple LSA.
:param topics: array, [n_tokens]
"""
top_features = topic.argsort()[::-1]
return [terms[ind] for ind in top_features[:n_tokens]]
|
30a39ee6c5b10376100dacc8bf8e65ffca3ad536
| 27,676
|
def has_table_def(table_def):
""" Check if table is defined in the database.
"""
return len(table_def)
|
44b47fa3b61a76b2b498f81adb7c51b3c3af21ba
| 27,677
|
def distance1(cell1, cell2):
"""Return Manhattan distance between cells."""
return abs(cell1[0] - cell2[0]) + abs(cell1[1] - cell2[1])
|
abbe94a7304854d3f1f90d06987e20b625f834ba
| 27,679
|
def flip_layers(nparray):
"""
Flip RGB to BGR image data (numpy ndarray).
Also accepts rgbA/bgrA and single channel images without crashing.
"""
if nparray is None:
return None
if len(nparray.shape) == 3:
if nparray.shape[2] == 4:
# We got xyzA, make zyxA
return nparray[..., [2, 1, 0, 3]]
else:
return nparray[:, :, ::-1]
return nparray
|
d9d1951b968f655093c57f1827e9be27845731bd
| 27,680
|
import time
def _make_tstamp(val):
"""
Converts a ``datetime`` object into a unix timestamp.
"""
if val:
return int(time.mktime(val.timetuple()) * 1000)
|
ab59736ad811b8b1d8b40119f59ccc84f16503f2
| 27,681
|
def normalize(lst, maxval=1.):
"""
Normalizes a list of values with a specified value.
**Parameters**
lst: *list*
List of values to be normalized
maxval: *float*, optional
The maximum value that the list will have after normalization.
**Returns**
normalized list: *list*
A list of values normalized to the specified value.
"""
listmax = max(lst)
for ind, val in enumerate(lst):
lst[ind] = float(val) / float(listmax) * maxval
return lst
|
9ae34b5b7a81d55de88c942806f0440040873165
| 27,682
|
import _pickle
def pck(path):
"""
Reads a python/pickle format data file
:param path: the path to the input pickle file
:return: the object stored in the pickle file
"""
with open(path, "r") as f:
return _pickle.load(f)
|
43c3c92e44745ce34b56c8f51b0af1ae5841f8fe
| 27,683
|
import yaml
def read_yaml(path):
"""yamlファイルを読み込むための関数
入力:yamlファイルのパス
出力:dict形式のデータ
"""
with open(path, "r") as f:
cfg = yaml.safe_load(f)
return cfg
|
d7e9dd6d525fe42985de6862ce350106c2201d1c
| 27,684
|
def str_to_byn(str_):
""" str_to_bin([string]) UTF-8
Takes string. Return string of binary bites splited with " "
Example
input: bynary_to_string.str_to_byn('ex')
output: '1100101 1111000'
"""
return ' '.join(bin(byte).lstrip('0b') for Item in str_ for byte in Item.encode())
|
b800f0e37807040c1f9ae216fe272fcf4bce3cf4
| 27,685
|
def isPrime(M):
"""Return True if integer M is prime, and False otherwise.
This is used for testing certain functions; see e.g. factor.py. A
simple, inefficient algorithm is employed.
"""
for x in range(2, M):
if M % x == 0:
return False
return True
|
c6e2ea3dbd7db54c64ba0440813131075160e834
| 27,686
|
def get_power_plants_by_string_in_object_name(object_name_part, db_emlab_powerplants):
"""
Return list of power plants in which the string can be found.
:param object_name_part: string
:param db_emlab_powerplants: PowerPlants as queried from EMLAB SpineDB
:return: List
"""
return {row['object_name']: row['parameter_value'] for row in db_emlab_powerplants
if row['object_class_name'] == 'PowerPlants'
and object_name_part in row['object_name']
and row['parameter_name'] == 'ON-STREAMNL'}
|
cbdc87ffa7d1e70edde20509987939eeb6f7ad85
| 27,687
|
import subprocess
def launch(cmd):
"""
Run the shell command `cmd` and return an iterator
for stdout.
"""
proc = subprocess.Popen(cmd,stdout=subprocess.PIPE, shell=True)
return proc.stdout
|
bd9b7118662869da49e45511ae3cc960bb866cf8
| 27,688
|
import os
def write_readdb(list_of_names_and_fast5s, out_path):
"""Write a readdb file given a list of pairs of names and fast5 files"""
with open(out_path, "w") as fh:
for pair in list_of_names_and_fast5s:
fh.write(pair[0]+"\t"+os.path.basename(pair[1])+"\n")
return 0
|
258db8bb3b6d68daf45952d49697dd9eb16ce91b
| 27,689
|
def compute_optimal_action_with_classification_environment(
observation, environment):
"""Helper function for gin configurable SuboptimalArms metric."""
del observation
return environment.compute_optimal_action()
|
d2d25fa30fbbf9a896d589c2135364bc4f9837aa
| 27,691
|
import inspect
import importlib
def load_class(cls):
""" Loads the given class from string (unless alrady a class). """
if inspect.isclass(cls):
return cls
module_name, class_name = cls.rsplit('.', 1)
module = importlib.import_module(module_name)
return getattr(module, class_name, None)
|
15992df96984f0aae130d4e5abd1563aca1852e0
| 27,694
|
def mock_get_provider(*args, **kwargs):
"""A mock function for the get_provider Qiskit function to record the
arguments which it was called with."""
return (args, kwargs)
|
38989084d13bfd3dd51eb17f7aa1e760fcf7de34
| 27,695
|
def while_false():
"""
>>> while_false()
"""
while 1 == 0:
return False
|
d581acc3a977ad95dfac1ef3579938123f52f429
| 27,696
|
import torch
def one_hot(labels: torch.Tensor, num_classes: int, dtype: torch.dtype = torch.float, dim: int = 1) -> torch.Tensor:
"""
For a tensor `labels` of dimensions B1[spatial_dims], return a tensor of dimensions `BN[spatial_dims]`
for `num_classes` N number of classes.
Example:
For every value v = labels[b,1,h,w], the value in the result at [b,v,h,w] will be 1 and all others 0.
Note that this will include the background label, thus a binary mask should be treated as having 2 classes.
"""
if labels.dim() <= 0:
raise AssertionError("labels should have dim of 1 or more.")
# if `dim` is bigger, add singleton dim at the end
if labels.ndim < dim + 1:
shape = list(labels.shape) + [1] * (dim + 1 - len(labels.shape))
labels = torch.reshape(labels, shape)
sh = list(labels.shape)
if sh[dim] != 1:
raise AssertionError("labels should have a channel with length equal to one.")
sh[dim] = num_classes
o = torch.zeros(size=sh, dtype=dtype, device=labels.device)
labels = o.scatter_(dim=dim, index=labels.long(), value=1)
return labels
|
5eb414dcd03b46252348150cc6d2516d6564ed56
| 27,697
|
import re
def HTMLDeCode(strtmp):
"""
python2 version
strtmp = string.replace(strtmp,">", ">")
strtmp = string.replace(strtmp,"<", "<")
strtmp = string.replace(strtmp," "," ")
strtmp = string.replace(strtmp,""", chr(34))
strtmp = string.replace(strtmp,"'", chr(39))
strtmp = string.replace(strtmp,"</P><P> ",chr(10) + chr(10))
strtmp = string.replace(strtmp,"<BR> ", chr(10))
"""
#python3 version
strtmp = re.sub(">", ">", strtmp)
strtmp = re.sub("<", "<", strtmp)
strtmp = re.sub(" "," ", strtmp)
strtmp = re.sub(""", chr(34), strtmp)
strtmp = re.sub("'", chr(39), strtmp)
strtmp = re.sub("</P><P> ",chr(10) + chr(10), strtmp)
strtmp = re.sub("<BR> ", chr(10), strtmp)
return strtmp
|
4aeb625309097c4abd4cd3d0865e81f10e0d2b4a
| 27,698
|
from math import gcd
from random import randrange
def fermat_primality_test(n: int, k: int = 3) -> bool:
"""
https://en.wikipedia.org/wiki/Fermat_primality_test
>>> assert all(fermat_primality_test(i) for i in [2, 3, 5, 7, 11])
>>> assert not all(fermat_primality_test(i) for i in [4, 6, 8, 9, 10])
"""
for _ in range(k):
random_num = randrange(1, n)
if gcd(n, random_num) != 1 or pow(random_num, n - 1, n) != 1:
return False
return True
|
75011a3003c4f32d6e1b7bf78c80536ae30e30f0
| 27,699
|
def shi(i, base="0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"):
""" Returns a short string hash for a given int.
"""
s = []
while i > 0:
i, r = divmod(i, len(base))
s.append(base[r])
return "".join(reversed(s))
|
93b1ac50a3fab321d7667521a6236f0fc5ab1b51
| 27,700
|
import base64
def img_to_base64(img_filepath):
""" helper function that converts img filepath to base64 string"""
with open(img_filepath, "rb") as img_file:
base64_str = base64.b64encode(img_file.read())
return base64_str.decode('utf-8')
|
63f342bdfd2fa9dfef381935840189d4f343a3ce
| 27,701
|
def _getnames(self, yname=None, xname=None):
"""extract names from model or construct names
"""
if yname is None:
if getattr(self.model, 'endog_names', None) is not None:
yname = self.model.endog_names
else:
yname = 'y'
if xname is None:
if getattr(self.model, 'exog_names', None) is not None:
xname = self.model.exog_names
else:
xname = ['var_%d' % i for i in range(len(self.params))]
return yname, xname
|
9f25393eae389c4f0f7d4de4051a00db0100ca73
| 27,702
|
def _load_entry_point(entry_point, verify_requirements):
"""Based on the version of setuptools load an entry-point correctly.
setuptools 11.3 deprecated `require=False` in the call to EntryPoint.load.
To load entry points correctly after that without requiring all
dependencies be present, the proper way is to call EntryPoint.resolve.
This function will provide backwards compatibility for older versions of
setuptools while also ensuring we do the right thing for the future.
"""
if hasattr(entry_point, 'resolve') and hasattr(entry_point, 'require'):
if verify_requirements:
entry_point.require()
plugin = entry_point.resolve()
else:
plugin = entry_point.load(require=verify_requirements)
return plugin
|
f83e00563561a80b469d397b70b278907438318e
| 27,703
|
import sys
def __get_attrs(h5_obj, attr_key, default=None):
"""Helper function to make sure attribute values in h5py are returned as strings and not bytes."""
val = h5_obj.attrs.get(attr_key, default)
if sys.version_info[0] >= 3 and isinstance(val, bytes):
val = val.decode()
return val
|
74ec9890410670972a79b84285b2e93c62915050
| 27,704
|
def get_min_value(solution, field=0):
"""
Get the minimum value in a field in a solution.
"""
min_val = 1e38
for state in solution.states:
min_temp = state.q[field, :, :].min()
if min_temp < min_val:
min_val = min_temp
return min_val
|
719d426022b3f60520cb81f69918d113501fb21a
| 27,705
|
def bubble_sort(arr):
"""
Exchange sort (bubble sort)
1. Take the first element each time and compare it with the next element. If the former is larger than the latter,
switch positions and compare with the latter. If the latter bit is greater, it is not exchanged.
Then the next bit is compared with the next bit until the last indexed element. At this time,
the last indexed element is the largest, and it is also the element that will never be moved after sorting.
That is, L[n-i:n] is arranged so as not to move.
2. The above rule loops for n-1 rounds, then the sorting is complete.
Average Time Complexity: O(n**2)
Space Complexity: O(1)
stability: Yes
Best Time Complexity: O(n)
Worst Time Complexity: O(n**2)
:param arr:
:return:
"""
for i in range(len(arr)-1):
for j in range(len(arr)-1-i):
if arr[j] > arr[j+1]:
arr[j],arr[j+1] = arr[j+1],arr[j]
return arr
|
404439098a1549f5d2f0553320dd21a210f28ab7
| 27,706
|
def percent_change(old, new):
"""Computes the percent change from old to new:
.. math::
percent_change = 100 \frac{new - old}{abs(old)}
"""
return float(new - old) / abs(old)
|
cff7fc4e75f082d1d3665e1feb167c6ad601fa47
| 27,707
|
def _get_bytes_from_pem_file(fpath: str) -> bytes:
"""Returns bytes from a pem file.
"""
with open(fpath, "rb") as f:
return f.read()
|
aff762b165f536b6c1830313bd28f73594c7ff9a
| 27,708
|
def greetings():
"""
Greetings
"""
return 'Made it to AWS?'
|
35a4c08f626e555c5e2dfc587ada95e1a11ee339
| 27,709
|
def create_vocabulary_list(sms_words):
"""
创建语料库
:param sms_words:
:return:
"""
vocabulary_set = set([])
for words in sms_words:
vocabulary_set |= set(words)
vocabulary_list = list(vocabulary_set)
return vocabulary_list
|
7218e328f2c72e3c8c7d73faa548f863d4560354
| 27,710
|
import os
def read_test_data():
"""
Read in 8-bit autoencoder data from
file auto_encoder_input.txt and return
as list of pairs of strings
"""
cwd = os.getcwd()
sequence = []
filename = './auto_encoder_input.txt'
test_data = []
with open(filename, "r") as f:
for line in f:
test_data.append((line.replace('\n', ''), line.replace('\n', '')))
return test_data
|
ea9d1c9b29a2430ed6b7c63895aaff538fa63f50
| 27,711
|
def get_tl_num_size(val: int) -> int:
"""
Calculate the length of a TL variable.
:param val: an integer standing for Type or Length.
:return: The length of var.
"""
if val <= 0xFC:
return 1
elif val <= 0xFFFF:
return 3
elif val <= 0xFFFFFFFF:
return 5
else:
return 9
|
4a11da075f57d98b2956e7932adf0cc9a14645a7
| 27,712
|
def tablestart():
"""
print tabel start for LaTeX table
"""
start = '''
\\begin{table}
\\centering
\\small
%%'''
return start
|
f152b4251802d5f1e9fda535de89fa7de5d75b60
| 27,714
|
def dict2yaml(yaml_dict: dict) -> str:
"""
Convert the YAML dict into the YAML front matter string.
Parameters
----------
yaml_dict : dict
Dict made from the YAML front matter.
Returns
-------
str
YAML front matter into string.
"""
yaml_text = "---\n"
for i in yaml_dict:
line = f"{i}: {yaml_dict[i]}"
yaml_text += f"{line.strip()}\n"
yaml_text += "---\n"
return yaml_text
|
b2bfd40ad5f45f2725384a9de12fbfcc0d7fe9d1
| 27,715
|
def remove_rectangle(rlist, u_low, u):
"""
Function to remove non-optimal rectangle
from the list of rectangles
Parameters
----------
rlist : list
List of rectangles.
u_low : list
Lower bound of the rectangle to remove.
u : list
Upper bound of the rectangle to remove.
Returns
-------
list
Updated list of rectangles.
"""
return [r for r in rlist if r != [u_low, u]]
|
4353fc9bb1187b565abcb5932465940ada6ad678
| 27,717
|
def lcp(s1, s2):
"""Return the length of the longest common prefix
between strings `s1` and `s2`."""
comp = 0
for i in range(min(len(s1), len(s2))):
if s1[i] != s2[i]:
break
comp += 1
return comp
|
dae5fbe70e9684f7e9a336adbd4e74d874dd52f5
| 27,718
|
import torch
def pr(x, y):
""" Metrics calculation from: https://en.wikipedia.org/wiki/Confusion_matrix
Returns precision, recall, specificity and f1 (in that order)
"""
tp = ((x == y) * (x == 1)).sum().to(torch.float32)
tn = ((x == y) * (x == 0)).sum().to(torch.float32)
fp = ((x != y) * (x == 1)).sum().to(torch.float32)
fn = ((x != y) * (x == 0)).sum().to(torch.float32)
pr = tp / (tp + fp)
rc = tp / (tp + fn)
sp = tn / (tn + fp)
f1 = (2 * tp) / (2 * tp + fp + fn)
return pr, rc, sp, f1
|
b2095585e3283b8c301c992ea241158c612b4d3b
| 27,719
|
def cost_of_preferred_stock(preferred_dividends, market_price_of_preferred):
"""
Summary: Calculate the cost of preferred stock that is used in a WACC calculation.
PARA preferred_dividends: The amount of a preferred dividend paid in that period.
PARA type: float
PARA market_price_of preferred: The price of a share of preferred stock during the period.
PARA type: float
"""
return preferred_dividends / market_price_of_preferred
|
84e55ae1f18bb9b7b40631e08e427d5f9a8ddcad
| 27,720
|
import re
def _strip_ken_suffix(name):
"""
>>> _strip_ken_suffix('Akita ken')
'Akita'
>>> _strip_ken_suffix('Akita-ken')
'Akita'
>>> _strip_ken_suffix('Akita Prefecture')
'Akita'
"""
return re.sub(r'[- ](ken|prefecture)', '', name, flags=re.IGNORECASE)
|
736dae9f335d5a22e3faace4e947ec02798d3fa2
| 27,721
|
from typing import OrderedDict
import six
def BuildFullMapUpdate(clear, remove_keys, set_entries, initial_entries,
entry_cls, env_builder):
"""Builds the patch environment for an environment update.
To be used when BuildPartialUpdate cannot be used due to lack of support for
field masks containing map keys.
Follows the environments update semantic which applies operations
in an effective order of clear -> remove -> set.
Leading and trailing whitespace is stripped from elements in remove_keys
and the keys of set_entries.
Args:
clear: bool, If true, the patch removes existing keys.
remove_keys: iterable(string), Iterable of keys to remove.
set_entries: {string: string}, Dict containing entries to set.
initial_entries: [AdditionalProperty], list of AdditionalProperty class with
key and value fields, representing starting dict to update from.
entry_cls: AdditionalProperty, The AdditionalProperty class for the type
of entry being updated.
env_builder: [AdditionalProperty] -> Environment, A function which produces
a patch Environment with the given list of entry_cls properties.
Returns:
Environment, a patch environment produced by env_builder.
"""
# Transform initial entries list to dictionary for easy processing
entries_dict = OrderedDict(
(entry.key, entry.value) for entry in initial_entries)
# Remove values that are no longer desired
if clear:
entries_dict = OrderedDict()
remove_keys = set(k.strip() for k in remove_keys or [])
for key in remove_keys:
if key in entries_dict:
del entries_dict[key]
# Update dictionary with new values
# set_entries is sorted by key to make it easier for tests to set the
# expected patch object.
set_entries = OrderedDict(
(k.strip(), v) for k, v in sorted(six.iteritems(set_entries or {})))
entries_dict.update(set_entries)
# Transform dictionary back into list of entry_cls
return env_builder([
entry_cls(key=key, value=value)
for key, value in six.iteritems(entries_dict)
])
|
dd43cfe6db69076c61a211d9534f0a5624f76246
| 27,722
|
import requests
import sys
def get_url(url: str, auth: tuple):
"""Get a GitHub URL and check for rate limits and errors"""
if auth:
req = requests.get(url, auth=auth)
else:
req = requests.get(url)
if not req.ok:
if req.headers["x-ratelimit-remaining"] == "0":
print("Rate limit exceeded")
sys.exit(1)
else:
print("Unknown error:")
print(req.status_code)
sys.exit(1)
else:
return req.json()
|
ffda0597d806d1230629815b0c7e61025fe3d549
| 27,724
|
import os
def stripExtension(file_path):
""" Return a copy of `file_path` with the file extension removed. """
base = os.path.basename(file_path)
return os.path.splitext(base)[0]
|
c1479279737c3cb4ce53ec0020437dfe50148c5b
| 27,725
|
import os
def _normalize_path(path):
"""
Returns an absolute version of a path.
:param path: path to normalize
:type path: str
:rtype: str
"""
return os.path.abspath(os.path.expanduser(path))
|
fcd58d0250c1164c41435cec0b2d55c5f0527c57
| 27,726
|
def exception_match(x, y):
"""Check the relation between two given exception `x`, `y`:
- `x` equals to `y`
- `x` is a subclass/instance of `y`
Note that `BaseException` should be considered.
e.g. `GeneratorExit` is a subclass of `BaseException` but which is not a
subclass of `Exception`, and it is technically not an error.
"""
return (
(issubclass(x, Exception) or issubclass(x, BaseException))
and issubclass(x, y)
)
|
6ea965c70c9980834a4b31baac802b5bad295e2a
| 27,728
|
import argparse
import sys
def parse_args():
"""Parse arguments."""
app = argparse.ArgumentParser()
app.add_argument("target", help="Target: single sequence file or .lst")
app.add_argument("query", help="Query: single sequence file or .lst")
app.add_argument("DEF_file", help="DEF configuration file")
app.add_argument("output", help="Output file location")
app.add_argument("--outFormat", choices=["psl", "axt"], help="Output format axt|psl")
app.add_argument("--gz", help="Compress output with gzip")
app.add_argument("--temp_dir",
help="Temp directory to save intermediate fasta files (if needed)\n"
"/tmp/ is default, however, DEF_file key TMPDIR can provide a value"
"the command line argument has a higher priority than DEF file"
)
app.add_argument("--verbose",
"-v",
action="store_true",
dest="verbose",
help="Show verbosity messages")
if len(sys.argv) < 5:
app.print_help()
sys.exit(0)
args = app.parse_args()
return args
|
e9dd8ecc0335d5dfc6ef00a5649eb0e28d01c364
| 27,729
|
import math
def trunc(value, decimals=0):
"""Truncates values after a number of decimal points
:param value: number to truncate
:type value: float
:param decimals: number of decimals points to keep
:type decimals: int
:return: truncated float
:rtype: float
"""
step = 10 ** decimals
return math.trunc(value * step)/step
|
fda9289eae3274b7c8cb1bd172032fc3c0e7f8f0
| 27,730
|
def _is_irreducible_and_reduced(F):
"""
Check if the polynomial F is irreducible and reduced.
TESTS::
sage: R.<x,y> = QQ[]
sage: F = x^2 + y^2
sage: from sage.schemes.curves.constructor import _is_irreducible_and_reduced
sage: _is_irreducible_and_reduced(F)
True
"""
factors = F.factor()
return len(factors) == 1 and factors[0][1] == 1
|
212e99128ff0df538a907125cd94567432fd3c6e
| 27,732
|
def tsvRowToDict(row):
"""convert a TSV row to a dict"""
return {col: getattr(row, col) for col in row._columns_}
|
5cc7860861ab73aaaa9857684b415db63b4aaf3d
| 27,733
|
def identifier_to_label(identifier):
"""Tries to convert an identifier to a more human readable text label.
Replaces underscores by spaces and may do other tweaks.
"""
txt = identifier.replace("_", " ")
txt = txt.replace(" id", "ID")
txt = dict(url="URL").get(txt, txt)
txt = txt[0].upper() + txt[1:]
return txt
|
8dbbac38e4e0408354128bf8da0dabbf72d785ae
| 27,734
|
def get_contrast(img, w, h, seed):
"""
:param img: simple itk image object. Pixel value of the image is access through the object
:param w: pixel width of the image
:param h: pixel height of the image
:param seed: Coordinates for one seed in the seed list
:return: A rough estimate of the contrast between the skin and the bottom fourth of the image
"""
count = 0
sum = 0
count_noise = 0
sum_noise = 0
seed_height = seed[1]
for i in range(int(w / 2) - 3, int(w / 2) + 3):
for j in range((seed_height - 3), (seed_height + 3)):
sum = sum + img[i, j, 2]
count = count + 1
for k in range(w):
for m in range(int(.75 * h), h):
sum_noise = sum_noise + img[k, m, 2]
count_noise = count_noise + 1
avg = sum / count
avg_noise = sum_noise / count_noise
contrast = (avg-avg_noise) / avg_noise
return contrast
|
a327aec00e8b5c485cf0943ebe8ab6144e16a8b5
| 27,735
|
def max_subsequent_sum(the_list):
"""
Function that returns the maximum sum of consecutive numbers in a list.
@param the_list: an array with integers
@return: maximum sum of consecutive numbers in the list
"""
memory = [0] * len(the_list)
memory[0] = the_list[0]
for i in range(1, len(the_list)):
memory[i] = max(the_list[i], the_list[i] + memory[i-1])
print(memory)
return max(memory)
|
ca5bd9845d7e6995c739f806642edd4e35fd0223
| 27,736
|
def standings_to_str(standings):
"""Format standings list as string. Use enumerate()
index value both to display place. Ties receive the
same place value.
Format: "<place>. <team name> (<points>)\n"
Parameters:
standings (list): team standings
Returns:
str: formatted string representation of list
"""
string = '\n'
prev_points = 0
place_tie = 0
for i, team in enumerate(standings, 1):
points = team['points']
if points == prev_points:
place = place_tie
else:
place, place_tie = i, i # increment
string += f"{place}. {team['name']} ({points})\n"
prev_points = points # increment
return string
|
be19aff13a20c0c421bf4b58a8f309d36ab9d973
| 27,737
|
def getGameFromCharacter(cname: str) -> str:
"""Return a query to get games with equal Ryu Number to a character.
The query will retrieve the title and Ryu Number of a game whose Ryu
Number is exactly equal to the Ryu Number of the character whose name is
passed. This is used primarily for path-finding towards Ryu.
The resulting query takes the following form for game as G:
`(G.title: str, G.ryu_number: int)`
"""
return (f"SELECT DISTINCT G.title, G.ryu_number "
f"FROM appears_in "
f"INNER JOIN game_character AS C ON cname=C.name "
f"INNER JOIN game AS G ON gtitle=G.title "
f"WHERE cname LIKE '{cname}' AND G.ryu_number=C.ryu_number;"
)
|
22e6a5702f69d3d7ca391add6144c8e19734c5a9
| 27,738
|
def is_valid_cidr_netmask(cidr_netmask: str) -> bool:
"""
This function will check that the netmask given in
parameter is a correct mask for IPv4 IP address.
Using to verify a netmask in CIDR (/24) format.
:param cidr_netmask: Netmask to check
:return bool: True if the netmask is valid
"""
return str(cidr_netmask).isdigit() and \
int(cidr_netmask) >= 0 and \
int(cidr_netmask) <= 32
|
3e63d4cf2e9d748977230f7b9c26bf6e1440d313
| 27,739
|
from typing import List
def clean_authors(authors_list: List[str], sign: str) -> List[str]:
"""
Cleans a list of author names by splliting them based on a given sign.
Args:
authors_list (:obj:`List[str]`):
A list of author names.
sign (:obj:`str`):
Sign that separates author names in the list.
Returns:
:obj:`List[str]`:
A list of splitted author names.
"""
if authors_list:
authors = list()
for author in authors_list:
if sign in author:
authors.extend([name.strip() for name in author.split(sign)])
else:
authors.append(author)
return authors
return authors_list
|
65d7e625d0e7e98f95c3e2ae4877d5d744518686
| 27,741
|
def _get_crash_key(crash_result):
"""Return a unique identifier for a crash."""
return f'{crash_result.crash_type}:{crash_result.crash_state}'
|
0f2472bf984440bb27cd94e1e3fb132529b4fad1
| 27,742
|
def midpoint(A, B):
""" calculates the midpoint between 2 points"""
return (A[0]+B[0])/2, (A[1]+B[1])/2, (A[2]+B[2])/2;
|
d9895cfed02a86b3a0b9117ff6e697d69c173a96
| 27,743
|
from typing import List
def filter_stacktrace(stacktrace: List[str]) -> List[str]:
"""Removes those frames from a formatted stacktrace that are located
within the DHParser-code."""
n = 0
for n, frame in enumerate(stacktrace):
i = frame.find('"')
k = frame.find('"', i + 1)
if frame.find("DHParser", i, k) < 0:
break
return stacktrace[n:]
|
ac92251eadc4f53c3a2f14e7386c38ee2aa70e17
| 27,744
|
def conv_file(file):
"""converts csv to array for use in later functions
args: filename as .csv"""
with open(file, 'r') as f1:
file_contents = []
for line in f1:
file_contents.append(line.strip().split(",")) # strip \n and split into values to put in array
return file_contents
|
dc0ea139d2006744d848aff75d85d74f984e2fad
| 27,745
|
def predict_by_epoch(Classifier, Features):
"""Test a trained classifier from one day during another day
Parameters:
-----------
Returns:
--------
"""
epochs_predictions = []
for epoch in Features:
epochs_predictions.append(Classifier.predict(epoch))
return epochs_predictions
|
0a48c06ee8de1ac477540e23325369fad4ec20db
| 27,746
|
def colorize(color: str, text: str) -> str:
"""
A simple override of Color.colorize which sets the default auto colors value to True, since it's the more common
use case. When output isn't TTY just return text
:param color: Color tag to use
:param text: Text to color
:param auto: Whether to apply auto colors
:return: Colored text or text
"""
return f'[{color}]{text}[/]'
|
db81c32c84e35e1f546ead8abef5585ba2afbea9
| 27,747
|
def avg(iter, key=None, percent=0.0):
"""
自定义求平均值
:param iter:
:param key:
:param percent: 去除百分比(将列表中的最大最小值去掉一定后再计算平均值)
:return:
"""
nlen = len(iter)
if nlen == 0:
return 0
# 要去除的数据
del_num = int(nlen * percent / 2)
if del_num >= nlen:
return 0
new_iter = iter
if percent > 0:
# 先转换成有序
new_iter = sorted(iter, key=key)
if del_num > 0:
new_iter = new_iter[del_num:-del_num]
# 计算新的平均数
nlen = len(new_iter)
total_v = 0
for i in new_iter:
if key is None:
total_v += i
else:
total_v += key(i)
return total_v / nlen
|
6305c908d4726412843b5fa936daf04e313096de
| 27,748
|
def remove_duplicates(df):
"""
function drops duplicates
and return pd dataframe.
"""
df_dedup = df.drop_duplicates(inplace=False)
return df_dedup
|
ea5b5a8a1024650c4684d46d8a0319b0a7a26d51
| 27,749
|
def dividers_list(a):
"""
:param a: число от 1 до 1000
:return: список делителей числа
"""
div_list = []
for i in range(1, a + 1):
if a % i == 0: div_list.append(i)
return div_list
|
1ca5a211a1841f53a49f53f1fb1e4836534cd792
| 27,750
|
from pathlib import Path
def write_slim(L_sites, generations, out, haploid=False):
"""
Write SLiM script for an L-site epistasis model, calling fitness values from a genotype-phenotype map.
The basic SLiM code blocks are:
- initialize() callback
- initialize parameters for simulation parameters
- initialize chromosome with L sites, each with a unique mutation allowed
- establish mutation stack groups (only one mutation is allowed at each site, back mutation allowed)
- first generation callback
- initialize SLiM subpopulation
- add 'ancestral' mutation to all sites in all individuals
- define genotype-phenotype dictionary
- fitness(NULL) callback
- defines an individual's fitness based on gpmap values
- mutation(NULL) callback
- writes new mutations to a text file as they arise
- run the simulation
- gtcount() function writes the census count of populated nodes at each generation to _gtcount.txt
- background_check() function writes observed edge transitions at each generation to _gttransitions.txt
- simulation runs until 'generations' is reached or until all mutations fix
- once finished running, saves tree sequence information
PARAMETERS:
-----------
L_sites (int) : number of interacting sites
generations (int) : number of generations to run the simulation for
out (str) : outpath for SLiM script (appended with .slim)
haploid (bool) : run the simulation on a haploid population (default = False,
population is diploid and fitness is averaged between both chromosomes)
RETURNS:
--------
None (SLiM script is written to 'out' parameter)
"""
# find gt_to_pt.eidos filepath
eidospath = Path(__file__).parent / "gt_to_pt.eidos"
out=out+'.slim'
with open(out, 'w') as f:
# initialize callback
f.writelines('\n'.join(['// '+str(L_sites)+'-site epistasis model',
'initialize() {',
'\tinitializeSLiMOptions(keepPedigrees=T);',
'\tsource("'+str(eidospath)+'");',
#'\tsource("gt_to_pt.eidos");',
'\tinitializeTreeSeq();',
'\tinitializeRecombinationRate(0);',
'\tinitializeMutationRate(MUTATIONRATE);',
'\n']))
for site in range(L_sites):
f.writelines('\n'.join(['\t initializeMutationType("m'+str(site)+'", 1.0, "f", 1.0);',
'\t initializeGenomicElementType("g'+str(site)+'", m'+str(site)+', 1.0);',
'\t initializeGenomicElement(g'+str(site)+', '+str(site)+', '+str(site)+');',
'\t m'+str(site)+'.convertToSubstitution = F;',
'\t m'+str(site)+'.mutationStackPolicy = "l";',
'\t m'+str(site)+'.mutationStackGroup = -1;',
'\n']))
f.write('\t initializeMutationType("m1000", 1.0, "f", 1.0);\n')
f.write('\t m1000.mutationStackPolicy = "l";\n')
f.write('\t m1000.mutationStackGroup = -1;\n')
f.write('}\n')
# create population and establish parameters
f.write('1 {\n')
f.write('\tsim.addSubpop("p1", POPULATIONSIZE);\n')
f.write('\tfor (genome in p1.genomes) {\n')
f.write('\t\tfor (pos in 0:('+str(L_sites)+'-1)) {\n')
f.write('\t\t\tgenome.addNewMutation(m1000,1.0,pos);\n')
f.write('\t\t}\n')
f.write('\t\tfor (mutation in genome.mutations) {\n')
f.write('\t\t\tmutation.tag=0;\n')
f.write('\t\t}\n')
f.write('\t}\n')
f.write('\tdefineGlobal("gpmap",gpmap_load(GPMAP));\n')
f.write('\tdefineGlobal("gtdict",gt_list(GPMAP));\n')
f.write('}\n')
# epistasis fitness callback
f.write('// epistasis callback\n')
f.write('fitness(NULL) {\n')
f.write('\tmuts1=c(individual.genome1.mutations.mutationType.id);\n')
f.write('\tmuts2=c(individual.genome2.mutations.mutationType.id);\n')
f.write('\tgt1=slim_to_mut(muts1);\n')
f.write('\tgt2=slim_to_mut(muts2);\n')
f.write('\tph1=gpmap.getValue(gt1);\n')
f.write('\tph2=gpmap.getValue(gt2);\n')
if haploid:
f.write('\treturn (ph1);\n')
else:
f.write('\treturn ((ph1+ph2)/2);\n')
f.write('}\n')
# track background of new mutations
f.write('mutation(NULL) {\n')
f.write('// no same mutations at a site\n')
f.write('\tfor (m in genome.mutations) {\n')
f.write('\t\tif (m.mutationType == mut.mutationType) {\n')
f.write('\t\t\treturn F;\n')
f.write('\t\t}\n')
f.write('\t}\n')
f.write('\tmuts=c(genome.mutations.mutationType.id);\n')
f.write('\tgt=slim_to_mut(muts);\n')
f.write('\tgtv=gtdict.getValue(gt);\n')
f.write('\tmut.tag=(asInteger(gtv));\n')
f.write('\treturn T;\n')
f.write('}\n')
# run until max generation or all mutations fix
f.write('1:'+str(generations)+' late() {\n')
f.write('\tgt_count(sim.generation, p1, gtdict); // write genotype count\n')
f.write('\tif (any(sim.mutations.originGeneration==sim.generation)) { // save mutation transitions\n')
f.write('\t\tidx=which(sim.mutations.originGeneration==sim.generation); {\n')
f.write('\t\tfor (i in idx) {\n')
f.write('\t\t\tmutation=sim.mutations[i];\n')
f.write('\t\t\tbackground_check(sim.generation, p1, mutation);\n')
f.write('\t\t\t}\n')
f.write('\t\t}\n')
f.write('\t}\n')
f.write('\tif (check_fix(p1)) {\n')
f.write('\t\ttree_deets(p1, gtdict);\n')
f.write('\t\tsim.treeSeqOutput(OUTPATH+".trees");\n')
f.write('\t\tsim.simulationFinished();\n')
f.write('\t}\n')
f.write('}\n')
f.write(str(generations)+' late() {\n')
f.write('\ttree_deets(p1, gtdict);\n')
f.write('\tsim.treeSeqOutput(OUTPATH+".trees");\n')
f.write('\tsim.simulationFinished();\n')
f.write('}')
return None
|
c385f13d886e9991b8e6464139849fd95262fdd8
| 27,751
|
from typing import List
def is_course_section_row(row_cols: List, time_index: int) -> bool:
"""Determines if a row in a course table contains data for a course section.
:param row_cols: A row in a course table.
:param time_index: The column index where the time data is possibly stored.
:return: True if the row contains course section data, and False otherwise.
"""
if len(row_cols) <= time_index:
return False
time = row_cols[time_index].getText().strip()
return len(time) > 2 and time[2] == ':'
|
640c92bf289d0b9734912140d6624ce639c05e39
| 27,752
|
def readline(string):
"""
read a line from string
"""
x = ""
i = 0
while i < len(string):
if string[i] != "\n":
x += string[i]
i += 1
else:
return x + "\n"
return x
|
67a081e2cba9e791ebcf3d60e42222a89d7c5429
| 27,753
|
import argparse
import socket
def get_parser():
"""Parse t
he user arguments"""
parser = argparse.ArgumentParser(
description="Maintain a BFD session with a remote system"
)
subparser = parser.add_subparsers(dest="command")
run = subparser.add_parser("run")
check = subparser.add_parser("check")
for prs in [run, check]:
family_group = prs.add_mutually_exclusive_group()
family_group.add_argument(
"-4",
"--ipv4",
action="store_const",
dest="family",
default=socket.AF_UNSPEC,
const=socket.AF_INET,
help="Force IPv4 connectivity",
)
family_group.add_argument(
"-6",
"--ipv6",
action="store_const",
dest="family",
default=socket.AF_UNSPEC,
const=socket.AF_INET6,
help="Force IPv6 connectivity",
)
prs.add_argument(
"--remote",
action="append",
help="<Required> remote IP address or hostname",
required=True,
)
prs.add_argument(
"--local", type=str, help="Local IP address or hostname"
)
prs.add_argument(
"--control-port",
type=int,
default=4784,
help="Default control port, use 3784 for singlehop",
)
prs.add_argument(
"-r",
"--rx-interval",
default=1000,
type=int,
help="Required minimum Rx interval (ms)",
)
prs.add_argument(
"-t",
"--tx-interval",
default=1000,
type=int,
help="Desired minimum Tx interval (ms)",
)
prs.add_argument(
"-m",
"--detect-mult",
default=1,
type=int,
help="Detection multiplier",
)
prs.add_argument(
"-p",
"--passive",
action="store_true",
help="Take a passive role in session initialization",
)
prs.add_argument(
"--state-dir",
default="/tmp",
help="Directory where to persist state on filesystem.",
)
return parser
|
f56ffb2a3fe0667d8e91d5dc3d7f1fa22e88513e
| 27,754
|
def updatelist(lTrajVeh,lDict):
"""
Considering a list of dictionaries as an input
the funciton updates the parameter given by lDict
"""
try:
lTrajVeh.update(lDict[0])
except AttributeError:
for d,s in zip(lTrajVeh,lDict):
d.update(s)
return lTrajVeh
|
3bf8914de5d85e0d263bc4b9045028ec7b6bf832
| 27,755
|
import hashlib
def _hashdigest(message, salt):
""" Compute the hexadecimal digest of a message using the SHA256 algorithm."""
processor = hashlib.sha256()
processor.update(salt.encode("utf8"))
processor.update(message.encode("utf8"))
return processor.hexdigest()
|
2c9c5886d72700826da11a62f4cc9c82c2078090
| 27,757
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.