content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def get_params_to_update(model):
""" Returns list of model parameters that have required_grad=True"""
params_to_update = []
for name,param in model.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
return params_to_update
|
bfe643206a42f10d7542a4345932b2625e52bf4e
| 76,509
|
def validate_and_normalize_unique_id(unique_id: str) -> str:
"""
Take a host of unique_id formats and convert them into a "normalized" version.
For example, "@tiktoklive" -> "tiktoklive"
:return: Normalized version of the unique_id
"""
if not isinstance(unique_id, str):
raise Exception("Missing or invalid value for 'uniqueId'. Please provide the username from TikTok URL.")
return (
unique_id
.replace("https://www.tiktok.com/", "")
.replace("/live", "")
.replace("@", "")
.strip()
)
|
11b235eecbca605476ec72ceebec6e576e7bfcdc
| 76,515
|
def span_pred_key(node):
"""
For use as a node_key in SortDictDmrs.
This sorts nodes by: cfrom (ascending), cto (descending), predstring (ascending)
"""
return (node.cfrom, -node.cto, str(node.pred))
|
4f322402dbbf154e85682a1b4224f37c884aeb0c
| 76,518
|
def get_pr_reviews(pr):
"""Gets a list of all submitted reviews on a PR. Does not list requested
reviews."""
# Required to access the PR review API.
headers = {'Accept': 'application/vnd.github.black-cat-preview+json'}
reviews = pr.session.get(
'https://api.github.com/repos/{}/{}/pulls/{}/reviews'.format(
pr.repository[0], pr.repository[1], pr.number),
headers=headers).json()
return reviews
|
a62b0ca5059f318df1d1e355682cee31f4af4513
| 76,519
|
from typing import List
from typing import Any
def list_get(array: List[Any], index: int, default: Any = None) -> Any:
"""
Get an element of an array at a specified index. If the index is out
of bounds, return a given default value.
"""
try:
return array[index]
except IndexError as e:
return default
|
7b38b06fda2ccecf477d9e79457c8eead8534dac
| 76,520
|
def join(words, sep="", template="{word}"):
"""
Join an iterable of strings, with optional template string defining
how each word is to be templated before joining.
"""
return sep.join(template.format(word=word) for word in words)
|
149bb81af634da8ca98efbce6479cc03454a8756
| 76,522
|
def compute_closest_coordinate(value, range_min, range_max):
"""
Function for computing closest coordinate for the neighboring hyperplane.
Parameters
----------
value : float
COORDINATE VALUE (x or y) OF THE TARGET POINT.
range_min : float
MINIMAL COORDINATE (x or y) OF THE NEGHBORING HYPERPLANE.
range_max : float
MAXIMAL COORDINATE (x or y) OF THE NEGHBORING HYPERPLANE..
Returns
-------
v : float
x or y coordinate.
"""
v = None
if range_min < value < range_max:
v = value
elif value <= range_min:
v = range_min
elif value >= range_max:
v = range_max
return v
|
8f1b5ed89f48656a3de6d2cabfe20f86df131852
| 76,523
|
from typing import Tuple
def grid_to_local(point: Tuple[int, int],
altitude: float,
north_min: float,
east_min: float,
cell_size: float
) -> Tuple[float, float, float]:
"""Convert a point from the local grid frame to the local ECEF frame."""
north = point[0]*cell_size + north_min
east = point[1]*cell_size + east_min
return (north, east, altitude)
|
35d1eaef9485ddc86b93fcd0e19e7c9bfe3b2068
| 76,525
|
def flatten_tensor(inputs):
"""
convert [b,t,c] into [b*t,c]
"""
btc = inputs.shape
return inputs.reshape(-1, btc[-1]), btc
|
b475c11996bf6ff96f38d53887770be639e1bcd5
| 76,529
|
def lmPointToParam(xp, yp):
""" Return the line parameters of a landmark from its coordinates.
Wall landmarks are characterized by the point corresponding to the
intersection of the wall line and its perpendicular passing through the
origin (0, 0). The wall line is characterized by a vector (a, c) such as
its equation is given by y = ax + c.
xp and yp are the landmark coordinate.
"""
a = -xp/yp
b = yp*(1+a**2)
return [a, b]
|
b9496470a099be4de08d1502a6eec2a6626ff0e8
| 76,531
|
def text_formatter(team: list) -> str:
"""
transform list of lists into text
from:
[['player1', 'Sorlag'], ['player2', 'Nyx'], ['player3', 'Anarki'], ['player4', 'Ranger']]
to:
player1 - Sorlag
player2 - Nyx
player3 - Anarki
player4 - Ranger
:param team: list
:return: str
"""
text = ""
for i in team:
text += " - ".join(i)
text += "\n"
return text
|
761ae94f7aa0822922ba3c3658ee819a918144b8
| 76,535
|
def content_lines(lines):
"""Return lines with content."""
return [line for line in lines if line and line[0] == "#"]
|
9f8e3a484608627da4ada27e13924e0fac697d2b
| 76,537
|
import hashlib
def hash_secret(credential: str):
"""Hash a secret string (so it can be logged safely.)"""
if credential is not None:
digest = hashlib.sha256(credential.encode('utf-8')).hexdigest()
else:
digest = 'None'
return digest
|
fed696e07259a7ef6a5a04124fc9b53c7f8d01f6
| 76,541
|
import torch
def one_hot(sequence, n_states):
"""
Given a list of integers and the maximal number of unique values found
in the list, return a one-hot encoded tensor of shape (m, n)
where m is sequence length and n is n_states.
"""
if torch.cuda.is_available():
return torch.eye(n_states)[sequence,:].cuda()
else:
return torch.eye(n_states)[sequence,:]
|
4ab01987f5edd11ca3036887770baa75100d00fe
| 76,543
|
def get_new_dim(imgw, imgh, max_dim=400):
"""Get new image dimension with maximum constraint.
Args:
imgw: image width.
imgh: image height.
max_dim: maximum image dimension after.
Returns:
new img width and height.
"""
max_val = max(imgw, imgh)
ratio = max_val / max_dim
new_imgw = int(imgw / ratio)
new_imgh = int(imgh / ratio)
return new_imgw, new_imgh
# new_imgw = imgw
# new_imgh = imgh
# if imgw > imgh:
# new_imgw = max_dim
# new_imgh = imgh * max_dim // imgw
# if imgh > imgw:
# new_imgh = max_dim
# new_imgw = imgw * max_dim // imgh
# return new_imgw, new_imgh
|
dd25018f993b688c4e6767f8f196c3bcbf77fd7e
| 76,545
|
import re
def get_logout_regex(logout_url):
"""Generates the regular expression to avoid the logout page."""
return re.escape(logout_url).replace("\\", "\\\\")
|
0fb8dac365deddd6eba7b1f0227f0551bd9d0d67
| 76,547
|
import random
def random_positive_external_id() -> int:
"""
Generate a random integer ID that's 15-digits long.
"""
return random.SystemRandom().randint(10 ** 14, (10 ** 15) - 1)
|
c8b5e2d43643729b10af3919a3db261261ba2aad
| 76,550
|
def _Strikethrough(text: str) -> str:
"""Returns given text with strikethrough codes after each character.
Args:
text: The text to strikethrough.
Returns:
str: The given text with strikethrough codes.
"""
return ''.join('\u0336{0:s}'.format(char) for char in text)
|
adb356c2ada62c1dd374a753a28a75a01a1a4b7c
| 76,555
|
def async_set_float(hass, ihc_controller, ihc_id: int, value: float):
"""Set a float value on an IHC controller resource."""
return hass.async_add_executor_job(
ihc_controller.set_runtime_value_float, ihc_id, value
)
|
00eadbb9ab2f94d76261dd993c498410cfb12e95
| 76,558
|
def is_letter(char_code):
"""Return True if char_code is a letter character code from the ASCII
table. Otherwise return False.
"""
if isinstance(char_code, str) or isinstance(char_code, bytes):
char_code = ord(char_code)
if char_code >= 65 and char_code <= 90: # uppercase letters
return True
if char_code >= 97 and char_code <= 122: # lowercase letters
return True
return False
|
26ddd52a7904346a0c487b776a77ac26eb8b50f8
| 76,561
|
def design_annotation(name="", description=None, base_model=None, parent=None, conditions=None):
"""
Use to annotate programmable designs with data such as name, parent design and conditions to be loaded
prior to the function execution.
Usage:
from gsmodutils.utils import design_annotation
@design_annotation(name="Design short name", description="Override doc string", parent="some_parent")
def gsmdesign_name(model, project):
...
return model
:param name: Name of the design
:param description: Long description of what the design is (overrides the function's doc string)
:param base_model: base model from project to apply design to. Must be valid in the project config
:param parent: Parent design to be loaded first - must exist in the project
:param conditions: conditions to be loaded - must exist within the project
:return:
"""
def inner_func(func):
if description is not None:
func.description = description
func.name = name
func.parent = parent
func.conditions = conditions
func.base_model = base_model
return func
return inner_func
|
015b109bf7152012a43bd3ea24a9aaab0a4ba69b
| 76,568
|
from typing import Optional
def maybe_int(value) -> Optional[int]:
"""Casts value to int or None."""
try:
return int(value)
except ValueError:
return None
|
036aa7df8d50b1a8769b09fa7417bf7fcea72f63
| 76,573
|
def group(*choices: str) -> str:
"""
Convenience function for creating grouped alternatives in regex
"""
return f"({'|'.join(choices)})"
|
ee51a7d6902ae04875df5817177102e89efd58cb
| 76,574
|
import random
def mutShuffleIndexes(individual, indpb):
"""Shuffle the attributes of the input individual and return the mutant.
The *individual* is expected to be a :term:`sequence`. The *indpb* argument is the
probability of each attribute to be moved. Usually this mutation is applied on
vector of indices.
:param individual: Individual to be mutated.
:param indpb: Independent probability for each attribute to be exchanged to
another position.
:returns: A tuple of one individual.
This function uses the :func:`~random.random` and :func:`~random.randint`
functions from the python base :mod:`random` module.
"""
size = len(individual)
for i in range(size):
if random.random() < indpb:
swap_indx = random.randint(0, size - 2)
if swap_indx >= i:
swap_indx += 1
individual[i], individual[swap_indx] = \
individual[swap_indx], individual[i]
return individual,
|
fb7b482c554c025a8d2a476a1b1948da5f76cb2a
| 76,575
|
def lookup_dict_path(d, path, extra=list()):
"""Lookup value in dictionary based on path + extra.
For instance, [a,b,c] -> d[a][b][c]
"""
element = None
for component in path + extra:
d = d[component]
element = d
return element
|
84380773c9c4a446d22587d41113136588458160
| 76,579
|
def _SubPaths(paths, first_part):
"""Returns paths of sub-tests that start with some name."""
assert first_part
return ['/'.join(p.split('/')[1:]) for p in paths
if '/' in p and p.split('/')[0] == first_part]
|
4b601d14bddefa77d94bdbb17fa5ef81c3ad6da1
| 76,580
|
from typing import Union
from typing import Callable
import inspect
import re
def _name_is_used_in_source(name: str, source: Union[Callable, str]) -> bool:
""" Check if name is used within the source.
This is a naive implementation for now. Should be refactored later.
Parameters
----------
name: an identifier which will be searched for in the source
source: either a function of a source code of a function.
Returns
----------
search_result: whether the source contains a direct reference to the name
"""
assert isinstance(name, str)
assert name.replace("_", "").isalnum() and not name[0].isdigit(), (
"name='" + name + "', must be a valid identifier.")
if callable(source) and hasattr(source, "__wrapped__"):
source = inspect.unwrap(source)
if callable(source):
source = inspect.getsource(source)
searh_pattern = "[^a-zA-Z0-9_]" + name + "[^a-zA-Z0-9_]"
search_outcome = re.search(searh_pattern, source)
search_result = bool(search_outcome)
return search_result
|
c194e60982bb696ddde0b0c33c3a31e7766d3708
| 76,581
|
def result_get(args, result_fds, name):
""" Get a valid descriptor to the bound result file, or 'None' if the given name is not bound.
Args:
name Given name
Returns:
Valid file descriptor, or 'None'
"""
# Check if results are to be output
if args.result_directory is None:
return None
# Return the bound descriptor, if any
return result_fds.get(name, None)
|
7814946802c7a833393a64866fe3ee12f9e36c05
| 76,584
|
from typing import Any
from typing import Mapping
from typing import Type
import dataclasses
def types_by_field(anything: Any) -> Mapping[str, Type[Any]]:
"""Returns a mapping of an objects fields/attributes to the type of those fields.
Parameters
----------
object : Any
Any object.
Returns
-------
Dict[str, type]
Mapping of an attribute name to its type.
"""
types_by_field: Mapping[str, Type[type]] = {}
if dataclasses.is_dataclass(anything):
types_by_field = {field.name: field.type for field in dataclasses.fields(anything)}
else:
types_by_field = {name: type(attribute) for name, attribute in vars(anything).items()}
return types_by_field
|
4414516eb405a4936aebcf1dfe5c19d757f8a2f9
| 76,598
|
import math
def Cos(num):
"""Return the cosine of a value"""
return math.cos(float(num))
|
289c633242a43f96daf0076742e9c5e869afa428
| 76,605
|
import re
def filter_list_by_file_extensions(s3_key_names, file_extensions):
"""
Given a list of s3_key_names, and a list of file_extensions
filter out all but the allowed file extensions
Each file extension should start with a . dot
"""
good_s3_key_names = []
for name in s3_key_names:
match = False
for ext in file_extensions:
# Match file extension as the end of the string and escape the dot
pattern = ".*\\" + ext + "$"
if re.search(pattern, name) is not None:
match = True
if match is True:
good_s3_key_names.append(name)
return good_s3_key_names
|
a360e98b9832c5c58575184bc7336ad5bc0d7b1a
| 76,610
|
def degseq_to_data(degree_sequence):
"""
Convert a degree sequence list to a sorted (max-min) integer data type.
The input degree sequence list (of Integers) is converted to a sorted
(max-min) integer data type, as used for faster access in the underlying
database.
INPUT:
- ``degree_sequence`` -- list of integers; input degree sequence list
EXAMPLES::
sage: from sage.graphs.graph_database import degseq_to_data
sage: degseq_to_data([2,2,3,1])
3221
"""
degree_sequence.sort()
return sum(degree_sequence[i]*10**i for i in range(len(degree_sequence)))
|
d8ee239a447db3f2314ff8e4c8c7602606b85c9b
| 76,611
|
def cp_name(cp):
"""return uniXXXX or uXXXXX(X) as a name for the glyph mapped to this cp."""
return '%s%04X' % ('u' if cp > 0xffff else 'uni', cp)
|
f43f62fb6d5cfe161d866cec1182c6e4f355b653
| 76,612
|
def CONCAT(src_column, dict_value_column = None):
"""
Builtin aggregator that combines values from one or two columns in one group
into either a dictionary value or list value.
If only one column is given, then the values of this column are
aggregated into a list. Order is not preserved. For example:
>>> sf.groupby(["user"],
... {"friends": tc.aggregate.CONCAT("friend")})
would form a new column "friends" containing values in column
"friend" aggregated into a list of friends.
If `dict_value_column` is given, then the aggregation forms a dictionary with
the keys taken from src_column and the values taken from `dict_value_column`.
For example:
>>> sf.groupby(["document"],
... {"word_count": tc.aggregate.CONCAT("word", "count")})
would aggregate words from column "word" and counts from column
"count" into a dictionary with keys being words and values being
counts.
"""
if dict_value_column == None:
return ("__builtin__concat__list__", [src_column])
else:
return ("__builtin__concat__dict__", [src_column, dict_value_column])
|
6f28768871c7fd1f9c970fb99511ecbd604124be
| 76,613
|
def matrix_zero(matrix):
"""Write an algorithm such that if an element in an MxN matrix is 0, its entire row and
column is set to 0."""
# O(n^2) time, O(m+n) space
m = len(matrix)
if m == 0:
return matrix
n = len(matrix[0])
rows_to_delete = set()
cols_to_delete = set()
for i in range(0, m):
for j in range(0, n):
if matrix[i][j] == 0:
rows_to_delete.add(i)
cols_to_delete.add(j)
break
for i in range(0, m):
for j in range(0, n):
if i in rows_to_delete or j in cols_to_delete:
matrix[i][j] = 0
|
94ad1b64b760bf67f5a9ee9518721e791e9c3ce6
| 76,619
|
def _retry_default_callback(attempt, exc):
"""Retry an attempt five times, then give up."""
return attempt < 5
|
0aa219630bfe8aa688d1d5b631bc3d68c0f322a7
| 76,621
|
def _CreateMetadataDict(benchmark_spec):
"""Create metadata dict to be used in run results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
metadata dict
"""
metadata = {
'use_tpu': bool(benchmark_spec.tpus),
'model_dir': benchmark_spec.model_dir,
'model': benchmark_spec.benchmark,
}
if benchmark_spec.tpus:
metadata.update({
'train_tpu_num_shards':
benchmark_spec.tpu_groups['train'].GetNumShards(),
'train_tpu_accelerator_type':
benchmark_spec.tpu_groups['train'].GetAcceleratorType()
})
return metadata
|
2002cfe09511a0c5b8da09c383cea1eeeae859d1
| 76,626
|
def _exactly_one_specified(*values):
"""Make sure exactly one of ``values`` is truthy.
Args:
values (Tuple[Any, ...]): Some values to be checked.
Returns:
bool: Indicating if exactly one of ``values`` was truthy.
"""
count = sum(1 for value in values if value)
return count == 1
|
ed7ebeeb9828fb84794becca5aaf3afc353b79b2
| 76,631
|
def create_rule_spec(client_factory, rule, operation='add'):
"""Create a ClusterRuleSpec object"""
rule_spec = client_factory.create('ns0:ClusterRuleSpec')
rule_spec.operation = operation
rule_spec.info = rule
if operation == 'remove':
rule_spec.removeKey = rule.key
return rule_spec
|
5eedf587d5f3c78a4596bd035a74280fcb87f910
| 76,633
|
def _get_local_helper_file_name(_production=False):
"""This function defines the file name of the local helper file to be used with unit testing.
.. versionadded:: 4.1.0
:param _production: Defines whether or not the helper file is associated with a Production environment
:type _production: bool, None
:returns: The file name for the local helper file
"""
if _production is None:
_file_name = 'helper.yml'
else:
_file_name = 'prod_helper.yml' if _production else 'stage_helper.yml'
return _file_name
|
f20ca5cf68aa0845b09caa84e365d3aa589bfced
| 76,636
|
def get_config_value(config_dict, key, default=None):
"""Get configuration value or default value from dictionary.
Usable for dictionaries which can also contain attribute 'default'
Order of preference:
* attribute value
* value of 'default' attribute
* value of provided default param
"""
val = config_dict.get(key)
if val is None:
val = config_dict.get("default", default)
return val
|
943f6d5ef51f8d2d5a64f587e83212403defa2ec
| 76,638
|
def _get_day_count_before_year(year):
# type: (int) -> int
"""Retrieves the number of days elapsed before a given year."""
year -= 1
return (year * 365) + (year // 4) - (year // 100) + (year // 400)
|
610889cb56d7d60d9611b949ed64d9946a294cb5
| 76,639
|
def parameter_tuple_maker(parameter_code, code_list, i):
"""
Accepts parameter_code, code_list, relative_base, and i.
Returns parameter_code, parameter tuple for opcode operation.
"""
return (parameter_code, code_list[i])
|
12d979330d1f2272fbc60796a648d679a01298cb
| 76,640
|
def get_exp(experiment_name, experiments):
"""
Returns the experiment tuple from the list of experiments
Arguments:
experiment_name: str
experiments: list
list of experiemnt tuples
Returns: tuple
"""
exp_tuple = [i for i in experiments if i[0] == experiment_name][0]
return exp_tuple
|
1019f7187e53c79f4e616a38e03a5a30e8976e91
| 76,642
|
def base41_decode(input):
"""Decode a Base41 string.
input is the string to decode.
The decoded data is returned. A TypeError is raised if input
is not valid (odd number of chars, non-alphabet character present,
invalid word).
"""
rslt = bytearray()
i = 0
while i + 2 < len(input):
x = (ord(input[i]) - 41) + 41 * (ord(input[i+1]) - 41) + 1681*(ord(input[i+2]) - 41)
rslt.extend([x % 256, x // 256])
i += 3
if i != len(input):
raise TypeError("Invalid Base41 string")
return rslt
|
af4238cd545aec6b17aac042db9c55227692e135
| 76,645
|
def val_test_func(item):
"""Test func for check_valid."""
return item != 'BAD'
|
e2ca28da5727e77566d8ec4ec1b84c60b5a013cb
| 76,646
|
import socket
def is_retryable_exception(exception):
"""Return True if we consider the exception retryable"""
# Socket errors look like temporary problems connecting to GCP.
return isinstance(exception, socket.error)
|
f7c7640feb3d232a8d1779cf1b8253cd7071adb0
| 76,654
|
def flatten(chain):
"""
Take input from get_option_chain()
and flatten so that each contract is on its own.
Can then be sorted easily with comprehensions for example.
"""
flattened = []
for key in ["callExpDateMap", "putExpDateMap"]:
for date in chain[key]:
for strike in chain[key][date]:
for contract in chain[key][date][strike]:
flattened.append(contract)
return flattened
|
67ddd935cd5ebd4cd4d5d903fc99e40c53141bba
| 76,656
|
def color_of_square(facet, colors=['lpurple', 'yellow', 'red', 'green', 'orange', 'blue']):
"""
Return the color the facet has in the solved state.
EXAMPLES::
sage: from sage.groups.perm_gps.cubegroup import *
sage: color_of_square(41)
'blue'
"""
return colors[(facet-1) // 8]
|
4f99d076c3f733feb6a44d9536033e248065b9ac
| 76,658
|
import hashlib
def xmls_to_sha1s(iml_path_to_content):
"""Calculate the sha1 hex digest of each xml document"""
iml_path_to_sha1 = {}
for iml_path in iml_path_to_content:
iml_path_to_sha1[iml_path] = hashlib.sha1(str(iml_path_to_content[iml_path]).encode('utf-8')).hexdigest()
return iml_path_to_sha1
|
d91117415cd994d66523f2b814499e0ec5af769a
| 76,659
|
def R_minus(x):
"""
Negative restriction of variable (x if x<0 else 0)
"""
return 0.5 * (x - abs(x))
|
0d2ffb763fd7bf01d52a6a805242c0b1f05b5782
| 76,662
|
def merge(low, high):
"""
Merge two configuration sources.
Arguments:
low -- the source with lower precedence
high -- the source with higher precedence
Returns: a merged configuration
"""
# bail if merging to an empty higher source
if high == {}: return low
merged = {}
for key, val in low.items():
merged[key] = high[key] if key in high else val
return merged
|
85481c7105ad5a2bd0518c07d9c5c965d96ee3bd
| 76,663
|
def parse_args_into_dict(input_arguments):
"""
Takes a tuple like (u'input_b=mystr', u'input_c=18') and returns a dictionary of input name to the
original string value
:param Tuple[Text] input_arguments:
:rtype: dict[Text, Text]
"""
return {split_arg[0]: split_arg[1] for split_arg in [input_arg.split("=", 1) for input_arg in input_arguments]}
|
56600ef8536823b827d2dae16ec9ca1181c537f8
| 76,666
|
def get_duration(func, min_, max_):
"""
Calculate the range of independent variable with opposite sign values
input:
func: any equation
min_, max_: the maximum range of independent variable with opposite sign values
return: a range(left,right)
"""
assert max_ > min_
r = max_ - min_
div = 2
step = r / div
results = [func(min_ + step * i) for i in range(div)]
for i in range(div - 1):
if results[i] * results[i + 1] <= 0:
return (min_ + i * step, min_ + (i + 1) * step)
return (max_ - step, max_)
|
b25e15bbe18407c87b6ff2e5049696b1c4373aa0
| 76,669
|
from typing import Any
from typing import Union
def _func_identity(target_attr: Any, *_: Any) -> Union[float, int]:
"""Function for returning the identity of an integer or float."""
return target_attr
|
a0a07332efc5277d97faa2b67167681c36ecb386
| 76,670
|
def crop_array_with_roi(array, roi):
"""
Crop a roi from an input array
:param array: numpy array
:param roi: list with roi as [a,b,w,h]
:return: cropped array
"""
assert array.shape[0] > roi[3], "roi is larger than the original array"
assert array.shape[1] > roi[2], "roi is larger than the original array"
assert array.shape[0] > roi[1], "roi is not inside the original array"
assert array.shape[1] > roi[0], "roi is not inside the original array"
cropped = array[int(roi[1]):int(roi[1] + roi[3]), int(roi[0]):int(roi[0] + roi[2])]
return cropped
|
1b09744d78ffe83d7d6959acf8d1ca473098e724
| 76,671
|
import pathlib
from typing import Optional
def find_snapshot(snapshot_dir: pathlib.Path,
snapshot_prefix: str = '',
snapshot_root: Optional[pathlib.Path] = None):
""" Find the snapshot directory
:param Path snapshot_dir:
The path to the snapshot directory or None to go fishing
:param str snapshot_prefix:
The model-specific snapshot prefix
:param Path snapshot_root:
The base directory where snapshots are stored
:returns:
The path to the snapshot directory or None if one can't be found
"""
if snapshot_dir is None:
return None
snapshot_dir = pathlib.Path(snapshot_dir)
if snapshot_dir.is_dir():
return snapshot_dir
if snapshot_root is None:
return None
snapshot_name = snapshot_dir.name.strip('-')
# Try adding a prefix to things
for prefix in ('', snapshot_prefix, 'snapshot-' + snapshot_prefix):
if prefix != '':
prefix += '-'
try_dir = snapshot_root / (prefix + snapshot_name)
if try_dir.is_dir():
return try_dir
return None
|
2297a2cec42660127bbef7cbda375b16dd9f010f
| 76,673
|
def _get_output_source_group(output, out_id):
"""Return output source group number for a specific data output."""
try:
return output[output['id'] == out_id].iloc[0, 1]
except:
print(f'ERROR: Cannot get output source group for output id {out_id}!')
raise
|
e126b3044960bdde18f5b271aa865a06a0277ca2
| 76,681
|
def format_list(data, wrap_every=3, sep=", ", newline="\n"):
"""wrap every 3 elements into a newline.
Separate every element with specified separator"""
if not data:
return ""
output = []
for idx, el in enumerate(data):
suffix = sep
if (idx + 1) % wrap_every == 0:
suffix = newline
output.append(el)
output.append(suffix)
return "".join(output[0:-1])
|
df854b81033791274f4d0e260e7c50dad4c19ecc
| 76,683
|
def deg2tenths_of_arcminute(deg):
"""
Return *deg* converted to tenths of arcminutes (i.e., arcminutes *
10).
"""
return 10 * deg * 60
|
2f8ab77642d91eb4e061ad30727ed0098257501c
| 76,686
|
import json
import re
def default_cleaner(word):
"""
The default clearner used to get the strings in shape.
Uses the default config.json location which contains replacements
(trouble words) and other parameters. This is closely based off
the optimus pipeline
Parameters
----------
word : str
a string description of some sorts
Returns
-------
str
a cleaned string
"""
config = json.load(open('config.json'))
w = str(word).lower()
for regex, replacement in config['regex']:
w = re.sub(regex, replacement, w)
for key, value in config['trouble'].items():
w = re.sub(key, value, w)
return w
|
ce16d19ab7ba14048a6fb224647d791318b1db0d
| 76,687
|
def format_deadline(deadline):
"""
Formats the specified deadline to HH:MM on a 24 hour clock.
"""
return '17:00' if deadline == 'EOD' else f'{deadline.hour:02}:{deadline.minute:02}'
|
02fba889195f24b142ff035e67558671ad01502f
| 76,690
|
def _success(code=200, **body_data):
"""Return successfully"""
return {
"hark_ok": True,
**body_data,
}
|
28e2706566e0d514fe8761e594c8caf6fd15676f
| 76,692
|
def set_perms(tarinfo):
"""
Set permissions for a file going into a tar
:param tarinfo: the tarinfo for the file
:return: the modified tarinfo object
"""
if tarinfo.isfile():
if tarinfo.name.endswith('.sh'):
mode = '774'
else:
mode = '664'
else:
mode = '775'
tarinfo.mode = int(mode, 8) # octal to decimal
return tarinfo
|
c219b7c95df5f57ddf0cb064f4940211414e0a47
| 76,693
|
from typing import Tuple
def coord_to_address(s:Tuple[int, int], magnification:int)->str:
"""converts coordinate to address
Args:
s (tuple[int, int]): coordinate consisting of an (x, y) tuple
magnification (int): magnification factor
Returns:
str: a string consisting of an x_y_z address
"""
x = s[0]
y = s[1]
return f"x{x}_y{y}_z{magnification}"
|
5c158b4555f04867376f3d3a2b69ab6be3b6f93d
| 76,694
|
def get_uploader_image(base_images_project):
"""Returns the uploader base image in |base_images_project|."""
return f'gcr.io/{base_images_project}/uploader'
|
33826af3151401c6e498f6908b2ffe03e43a077f
| 76,697
|
def process_rain_data(traffic_data, rain_data):
"""
Prepare data to make plots regarding rainfall. Add a column storing rain information to the traffic DataFrame.
:param DataFrame traffic_data: DataFrame containing traffic data
:param DataFrame rain_data: DataFrame containing rain data
:return: Processed Pandas DataFrame
"""
# Since the rain data is hourly, we have to down sample traffic data to per hour
df = traffic_data.resample('1H').mean().dropna().copy()
df['rain'] = rain_data['rain']
df = df.dropna() # Drop missed data rows
def rain_grouper(value):
"""
Group value into rainfall groups
:param float value:
:return: Group number in int
"""
if value == 0:
return 0
elif 0 < value <= 1:
return 1
elif 1 < value <= 5:
return 2
elif 5 < value <= 10:
return 3
elif 10 < value <= 20:
return 4
elif value > 20:
return 5
df['Rain Level'] = df['rain'].apply(rain_grouper)
df['hour'] = df.index.hour
return df
|
f670ede2b97d0fa60f533bd3e62ee37a1a120e1c
| 76,698
|
def moments_close_enough(t1, t2):
"""
Return True if the two times are very close to each other.
Works around databases losing time precision.
:param t1: Instant 1
:param t2: Instant 2
:return: Closeness boolean
"""
return (t1 - t2).total_seconds() < 0.1
|
af2c9a1dbf193c99bc9123cd5ec44bde07113ff8
| 76,700
|
def compute_edge_weight(hist_one, hist_two, weight_func):
"""
Computes the edge weight between the two histograms.
Parameters
----------
hist_one : sequence
First histogram
hist_two : sequence
Second histogram
weight_func : callable
Identifying the type of distance (or metric) to compute between the pair of histograms.
Must be one of the metrics implemented in medpy.metric.histogram, or another valid callable.
Returns
-------
edge_value : float
Distance or metric between the two histograms
"""
edge_value = weight_func(hist_one, hist_two)
return edge_value
|
ca3f89f6142ba8bc8ad5b9aacb7ae196f8af782f
| 76,706
|
import requests
import yaml
def current_versions(string='all'):
"""Get curent versions of hosted datasets from BTK API."""
btk = "https://blobtoolkit.genomehubs.org/api/v1/search/%s" % string
response = requests.get(btk)
current = {}
if response.ok:
data = yaml.full_load(response.text)
for asm in data:
if 'version' in asm:
current.update({asm['prefix']: asm['version']})
else:
current.update({asm['prefix']: 1})
return current
|
a38acc47e8eba468b780d6c4f720cfdc3ad5f6a0
| 76,711
|
def roundany(x, base):
"""
rounds the number x (integer or float) to
the closest number that increments by `base`.
"""
return base * round(x / base)
|
d9503b8c2923b3d6c1ddc0c8df6728696cd6cd34
| 76,712
|
def get_config(cfg):
"""
Sets the hyper parameter for the optimizer and experiment using the config file
Args:
cfg: A YACS config object.
"""
config_params = {
"train_params": {
"adapt_lambda": cfg.SOLVER.AD_LAMBDA,
"adapt_lr": cfg.SOLVER.AD_LR,
"lambda_init": cfg.SOLVER.INIT_LAMBDA,
"nb_adapt_epochs": cfg.SOLVER.MAX_EPOCHS,
"nb_init_epochs": cfg.SOLVER.MIN_EPOCHS,
"init_lr": cfg.SOLVER.BASE_LR,
"batch_size": cfg.SOLVER.TRAIN_BATCH_SIZE,
"optimizer": {
"type": cfg.SOLVER.TYPE,
"optim_params": {
"momentum": cfg.SOLVER.MOMENTUM,
"weight_decay": cfg.SOLVER.WEIGHT_DECAY,
"nesterov": cfg.SOLVER.NESTEROV,
},
},
},
"data_params": {
# "dataset_group": cfg.DATASET.NAME,
"dataset_name": cfg.DATASET.SOURCE + "2" + cfg.DATASET.TARGET,
"source": cfg.DATASET.SOURCE,
"target": cfg.DATASET.TARGET,
"size_type": cfg.DATASET.SIZE_TYPE,
"weight_type": cfg.DATASET.WEIGHT_TYPE,
},
}
return config_params
|
93be3e07140305bb998b8ec454fafc0171616432
| 76,715
|
def extract_unique_elements(lst: list, ordered=True) -> list:
"""This function extracts the unique elements of the input list and returns them as an output list; by defualt, the returned list is ordered.
Args:
lst (list): input list from which we want to extract the unique elements
ordered (bool): whether the output list of unique values is sorted or not; True by default
Returns:
out_list (list): list containing unique values
"""
out_list = list(set(lst)) # type: list
if ordered: # if we want to sort the list of unique values
out_list.sort() # type: list
return out_list
|
e0d988f11275eeb78fd6eb2a5b355764bcfe3cca
| 76,719
|
import torch
def _sort_batch_by_length(tensor, sequence_lengths):
"""
Sorts input sequences by lengths. This is required by Pytorch
`pack_padded_sequence`. Note: `pack_padded_sequence` has an option to
sort sequences internally, but we do it by ourselves.
Args:
tensor: Input tensor to RNN [batch_size, len, dim].
sequence_lengths: Lengths of input sequences.
Returns:
sorted_tensor: Sorted input tensor ready for RNN [batch_size, len, dim].
sorted_sequence_lengths: Sorted lengths.
restoration_indices: Indices to recover the original order.
"""
# Sort sequence lengths
sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True)
# Sort sequences
sorted_tensor = tensor.index_select(0, permutation_index)
# Find indices to recover the original order
index_range = sequence_lengths.data.clone().copy_(torch.arange(0, len(sequence_lengths))).long()
_, reverse_mapping = permutation_index.sort(0, descending=False)
restoration_indices = index_range.index_select(0, reverse_mapping)
return sorted_tensor, sorted_sequence_lengths, restoration_indices
|
0fd0df95135e82b02ce85f29c366e01c3e67409b
| 76,724
|
def clamp(arrs, boolexpr):
"""
Clamp numpy arrays to a boolean expression
Parameters
----------
arrs: Length-k list of [ndarray(N)]
List of arrays
boolexpr: ndarray(N, dtype=np.boolean)
Boolean expression
Returns
-------
Length-k list of [ndarray(M)], where M is
the number of true evaluations in boolexpr
"""
return [arr[boolexpr] for arr in arrs]
|
0d7cdc44d63ea1c074f457ab1c528a74631fcc5c
| 76,728
|
def extract_demonym(demonyms, gender):
"""
Search through the list of demonyms and find the right one by gender
:param demonyms:
:param gender: may be male (u'Q499327') od female (u'Q1775415')
:return: demonym in Serbian language
"""
description = u''
for demonym in demonyms:
local_demonym = demonym.getTarget()
if local_demonym.language == u'sr':
demonym_qualifiers = demonym.qualifiers
if 'P518' in demonym_qualifiers:
demonym_gender = demonym_qualifiers['P518']
if len(demonym_gender) > 1:
exit()
demonym_gender = demonym_gender[0].target.id
if demonym_gender == gender:
description += local_demonym.text
break
return description
|
1e35a61db65778f98cdbd814d0e9f223a232d675
| 76,732
|
def format_failure(failure):
"""Format how an error or warning should be displayed."""
return "Line {}, Column {} - #{}: {} - Category: {}, Severity: {}".format(
failure.getLine(),
failure.getColumn(),
failure.getErrorId(),
failure.getMessage(),
failure.getCategoryAsString(),
failure.getSeverity()
)
|
dda395cf9c6c25f883d6308f010749f8425e7235
| 76,735
|
def guess_display_type(track_type):
"""
Returns the possible display type to use for a given track type.
:param str track_type: the type of the track
:return: the type of the display to use for the given track type
:rtype: str
"""
displays = {
"AlignmentsTrack": "LinearAlignmentsDisplay",
"VariantTrack": "LinearVariantDisplay",
"ReferenceSequenceTrack": "LinearReferenceSequenceDisplay",
"QuantitativeTrack": "LinearBasicDisplay",
"FeatureTrack": "LinearBasicDisplay",
}
if track_type in displays:
return displays[track_type]
else:
return "LinearBasicDisplay"
|
0a660280c2a244253859517529ff348bd59402f6
| 76,746
|
def gen_ngrams(tokens: list[str], min_n: int, max_n: int) -> list[str]:
"""
Create all ngrams from `tokens` where n is between `min_n`, `max_n`, inclusive.
"""
return [
"_".join(tokens[i : i + n])
for n in range(min_n, max_n + 1)
for i in range(len(tokens) - n + 1)
]
|
aa90c6af9a01e92a2dcbda9e29555a7d1b760337
| 76,755
|
def ret_start_point(pn: str, keyword: bytes):
"""
1) return cid and tid from this example line
CALLID[3] TID[3756] IJ T2M 0x63621040->0x65cf6450(avformat-gp-57.dll!avformat_open_input+0x0)
2) for now, this function is case sensitive
"""
with open(pn, 'rb') as f:
lines = f.readlines()
for line in lines:
if keyword in line and b"0x0" in line:
cid = int(line.split(b"CALLID[")[1].split(b"]")[0])
tid = int(line.split(b"TID[")[1].split(b"]")[0])
return cid, tid
raise Exception("Cannot find the starting function from the trace file")
|
435fa2f5a65b93ca427d6afb0b1e817f44bfef10
| 76,758
|
def get_rect_ymin(data):
"""Find minimum y value from four (x,y) vertices."""
return min(data[0][1], data[1][1], data[2][1], data[3][1])
|
aebfbeab4bbb0bc15478ce37809a241d86a681a7
| 76,759
|
def logOut(command):
""" Check if command is Quit (b | q | x | e). """
return (command.strip().lower() == "b" or command.strip().lower() == 'back' or \
command.strip().lower() == 'q' or command.strip().lower() == 'quit' or \
command.strip().lower() == 'x' or command.strip().lower() == 'exit' or \
command.strip().lower() == 'e')
|
dfd7a9b2d9cf28091ba6ad88a451d943a8eb1e1f
| 76,761
|
def state_to_id(state):
"""Convert a state to its ID as used by the IBGE databases.
Raises KeyError if the state is invalid. """
state = state.upper()
states = {'AC': 12, 'AL': 27,
'AP': 16, 'AM': 13,
'BA': 29, 'CE': 23,
'DF': 53, 'ES': 32,
'GO': 52, 'MA': 21,
'MT': 51, 'MS': 50,
'MG': 31, 'PA': 15,
'PB': 25, 'PR': 41,
'PE': 26, 'PI': 22,
'RJ': 33, 'RN': 24,
'RS': 43, 'RO': 11,
'RR': 14, 'SC': 42,
'SP': 35, 'SE': 28,
'TO': 17}
return states[state]
|
5d57e6578881caa5ad2ee2e58ea6f290771c3c58
| 76,762
|
def _GetPlotData(chart_series, anomaly_points, anomaly_segments):
"""Returns data to embed on the front-end for the chart.
Args:
chart_series: A series, i.e. a list of (index, value) pairs.
anomaly_points: A series which contains the list of points where the
anomalies were detected.
anomaly_segments: A list of series, each of which represents one segment,
which is a horizontal line across a range of values used in finding
an anomaly.
Returns:
A list of data series, in the format accepted by Flot, which can be
serialized as JSON and embedded on the page.
"""
data = [
{
'data': chart_series,
'color': '#666',
'lines': {'show': True},
'points': {'show': False},
},
{
'data': anomaly_points,
'color': '#f90',
'lines': {'show': False},
'points': {'show': True, 'radius': 4}
},
]
for series in anomaly_segments:
data.append({
'data': series,
'color': '#f90',
'lines': {'show': True},
'points': {'show': False},
})
return data
|
fb81c506a5682e2af006a84f64f35e99ed5d7718
| 76,778
|
def get_rel_parts(relations_gold):
"""Extract only discourse relation parts/spans of token ids by relation id from CoNLL16st corpus.
rel_parts[14905] = {
'Arg1': (879, 880, 881, 882, 883, 884, 885, 886),
'Arg1Len': 46,
'Arg2': (877, 889, 890, 891, 892, 893, 894),
'Arg2Len': 36,
'Connective': (878, 888),
'ConnectiveLen': 6,
'Punctuation': (),
'PunctuationLen': 0,
'PunctuationType': '',
'DocID': 'wsj_1000',
'ID': 14905,
'TokenMin': 877,
'TokenMax': 894,
'TokenCount': 17,
}
"""
rel_parts = {}
for rel_id, gold in relations_gold.iteritems():
doc_id = gold['DocID']
punct_type = gold['Punctuation']['PunctuationType']
# short token lists from detailed/gold format to only token id
arg1_list = tuple( t[2] for t in gold['Arg1']['TokenList'] )
arg2_list = tuple( t[2] for t in gold['Arg2']['TokenList'] )
conn_list = tuple( t[2] for t in gold['Connective']['TokenList'] )
punct_list = tuple( t[2] for t in gold['Punctuation']['TokenList'] )
all_list = sum([list(arg1_list), list(arg2_list), list(conn_list), list(punct_list)], [])
# character lengths of parts
arg1_len = sum(( (e - b) for b, e in gold['Arg1']['CharacterSpanList'] ))
arg2_len = sum(( (e - b) for b, e in gold['Arg2']['CharacterSpanList'] ))
conn_len = sum(( (e - b) for b, e in gold['Connective']['CharacterSpanList'] ))
punct_len = sum(( (e - b) for b, e in gold['Punctuation']['CharacterSpanList'] ))
# save relation parts
rel = {
'Arg1': arg1_list,
'Arg1Len': arg1_len,
'Arg2': arg2_list,
'Arg2Len': arg2_len,
'Connective': conn_list,
'ConnectiveLen': conn_len,
'Punctuation': punct_list,
'PunctuationLen': punct_len,
'PunctuationType': punct_type,
'DocID': doc_id,
'ID': rel_id,
'TokenMin': min(all_list),
'TokenMax': max(all_list),
'TokenCount': len(all_list),
}
rel_parts[rel_id] = rel
return rel_parts
|
dbad395977c123617b0c7f8a65efdd61ae3a0382
| 76,779
|
def similarity_to_connection(matrix, rows, columns, threshold):
""" Convert similarity to connections
Parameters
----------
matrix : np.array
Similarity matrix
rows : list
Components 0
columns : list
Components 1
threshold : float
Threshold for connections
Returns
-------
connections : list (tuples)
Connections between components
"""
# Calculation
connections = []
for col in range(matrix.shape[0]):
for row in range(matrix.shape[1]):
if matrix[col, row] < threshold:
connections.append([columns[row], rows[col]])
return connections
|
b698a6161a22beab9e50ab99baf65ff121000b30
| 76,788
|
def hamming_distance(seq1, seq2):
"""
Calculate the Hamming distance between two sequences
of the same length.
The Hamming distance corresponds to the number of characters
that differ between these two sequences.
Args:
seq1 (str), seq2 (str): Sequences to compare.
Returns:
Hamming distance (int) of seq1 and seq2.
"""
dist = sum([char1 != char2 for char1, char2 in zip(seq1, seq2)])
return dist
|
2e71d1276498a71cd6ca196d525560a7ba8e7046
| 76,789
|
def create_card(conn, card):
"""
Create a new card into the card table
:param conn:
:param card:
:return: project id
"""
sql = ''' INSERT INTO card(id,number,pin,balance)
VALUES(?,?,?,?) '''
cur = conn.cursor()
cur.execute(sql, card)
conn.commit()
return cur.lastrowid
|
ab1833aa1cb21dda182193a146cca740635231f8
| 76,793
|
def sum_diagonal(i_list: list)-> int:
"""
Sum all the element on the diagonal of the matrix
:param i_list: The source list
:return: The addition of all the element on the diagonal
"""
i=0
_sum = 0
for row in i_list:
_sum += row[i]
i+=1
return _sum
|
c78fe5859bacc74971c209354911f2b639834162
| 76,798
|
def byte_align(pos):
"""Return the smallest multiple of 8 greater than ``pos``. Raises
``ValueError`` if ``pos`` is negative.
"""
if pos < 0:
msg = "Expected positive integer, got {}"
raise ValueError(msg.format(pos))
return ((pos + 7) // 8) * 8
|
50f050da2c24f35e7c53b887a3b81c23458ddbef
| 76,803
|
def get_csr_position(index, shape, crow_indices, col_indices):
"""Return the position of index in CSR indices representation. If
index is not in indices, return None.
"""
# binary search
row, col = index
left = crow_indices[row]
right = crow_indices[row+1] - 1
while left <= right:
i = (left + right) // 2
d = col_indices[i] - col
if d == 0:
return i
if d < 0:
left = i + 1
elif d > 0:
right = i - 1
# index is not in indices
return
|
baf115707a23963a253470ebc7a63cd49cd87278
| 76,805
|
import re
def my_replacements(text):
"""
Quick function to clean up some of my review text. It clears HTML and some extra characters.
Also removes my frequent mentions of full reviews on my blog.
:param text:
Text to process
:return:
Processed text
"""
text = re.sub(r'<(.*?)>', ' ', text) # removing HTML code encapsulated within <>
text = re.sub(r'\n', ' ', text) # removing newline characters
text = re.sub(r' ', ' ', text) # removing some extra HTML code
text = re.sub(r'\"','', text) # removing explicit quotation marks
text = re.sub(r"\'", '', text) # removing explicit single quotation marks
# Text replacement
stop_text = ["For my full review", "For a full review", "check out my blog", "Read my full review at my blog",
"review can be found in my blog", "A full review is available on my blog", "review is up on my blog",
"full review", "my blog"]
for elem in stop_text:
text = re.sub(elem, '', text)
return text
|
0d38768f7ab617dac8825ae9a90d97af2eb883ba
| 76,809
|
def ChapmanKolmogorovTest(MSM_object, nsets,memberships=None, error_estimation=False, mlags=2):
""" Perform the ChapmanKolmogorov test to validate the MSM"""
return MSM_object.cktest(nsets,memberships=memberships,err_est=error_estimation, mlags=mlags)
|
23c0560c16f78ce0562f82ed642dea78f6c14289
| 76,813
|
import torch
def log_binom(n, k):
""" Returns of the log of n choose k. """
return torch.lgamma(n+1) - ( torch.lgamma(k+1) + torch.lgamma((n-k)+1))
|
b9275e8043533790cef92dcf9e65226a3e84dc6f
| 76,816
|
def cf(device, field):
"""Get the value for the specified custom field name.
This, combined with the a prefetch_related() results into much more
efficient access of custom fields and their values. See:
https://github.com/digitalocean/netbox/issues/3185
Be warned that this treats empty values as non-existing fields.
"""
for cfv in device.custom_field_values.all():
if cfv.field.name == field:
return cfv.value
return None
|
05228fe2bc36d9f9e870bc5522e616d83ef93ba2
| 76,817
|
def get_moya_interface(context, obj):
"""Get a Moya context interface, from an object if available"""
if hasattr(obj, "__moyacontext__"):
return obj.__moyacontext__(context)
return obj
|
1d4c1fb448c8e57b95aa4e0ed9a34204fa6f3d23
| 76,822
|
def selection(list):
"""Selection sort for an unordered list."""
if len(list) < 2:
return list
for i in range(len(list)):
current = i
for n in range(i + 1, len(list)):
if list[current] > list[n]:
current = n
list[i], list[current] = list[current], list[i]
return list
|
2c8f3f68da28affed9ef5c761d0635f9c683c918
| 76,825
|
def layer_counts(layer):
"""counts the digits in a layer
args: layer (a tuple of digits 0-9)
returns: dictionary with keys 0-9, values are the frequency of the key
"""
counts = {i:layer.count(i) for i in range(10)}
return counts
|
bf6096b4fb4e335b0b43aea566117f51d4fd2e60
| 76,827
|
import collections
import csv
def load_barcode_csv(barcode_csv):
""" Load a csv file of (genome,barcode) """
bcs_per_genome = collections.defaultdict(list)
with open(barcode_csv, 'r') as f:
reader = csv.reader(f)
for row in reader:
if len(row) != 2:
raise ValueError("Bad barcode file: %s" % barcode_csv)
(genome, barcode) = row
bcs_per_genome[genome].append(barcode)
return bcs_per_genome
|
a54f31e02f6883460179ec898f2a0f9d804462c2
| 76,828
|
def gas_driven_generator(onset, duration, amount):
"""
Generates a function of time which will serve as the infall history
in Msun yr^-1 onto the galaxy.
Parameters
==========
onset :: real number
The time at which the starburst should start in Gyr
duration :: real number
The length of the starburst in Gyr
amount :: real number
The amount of "extra" gas to be added to the ISM over this time
interval
Returns
=======
The function describing the infall rate with time in Msun yr^-1
"""
def infall(t):
ifr = 9.1
if onset <= t < onset + duration: ifr += (1.e-9 * amount) / duration
return ifr
return infall
|
69da332c41be13ce51b5ceae7efb5bb6a38028be
| 76,829
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.